net/hns3: support SVE Rx
[dpdk.git] / drivers / net / hns3 / hns3_dcb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <rte_io.h>
11 #include <rte_common.h>
12 #include <rte_ethdev.h>
13
14 #include "hns3_logs.h"
15 #include "hns3_regs.h"
16 #include "hns3_ethdev.h"
17 #include "hns3_dcb.h"
18
19 #define HNS3_SHAPER_BS_U_DEF    5
20 #define HNS3_SHAPER_BS_S_DEF    20
21 #define BW_MAX_PERCENT          100
22
23 /*
24  * hns3_shaper_para_calc: calculate ir parameter for the shaper
25  * @ir: Rate to be config, its unit is Mbps
26  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
27  * @shaper_para: shaper parameter of IR shaper
28  *
29  * the formula:
30  *
31  *              IR_b * (2 ^ IR_u) * 8
32  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
33  *              Tick * (2 ^ IR_s)
34  *
35  * @return: 0: calculate sucessful, negative: fail
36  */
37 static int
38 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
39                       struct hns3_shaper_parameter *shaper_para)
40 {
41 #define SHAPER_DEFAULT_IR_B     126
42 #define DIVISOR_CLK             (1000 * 8)
43 #define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
44
45         const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
46                 6 * 256,    /* Prioriy level */
47                 6 * 32,     /* Prioriy group level */
48                 6 * 8,      /* Port level */
49                 6 * 256     /* Qset level */
50         };
51         uint8_t ir_u_calc = 0;
52         uint8_t ir_s_calc = 0;
53         uint32_t denominator;
54         uint32_t ir_calc;
55         uint32_t tick;
56
57         /* Calc tick */
58         if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
59                 hns3_err(hw,
60                          "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)",
61                          shaper_level, HNS3_SHAPER_LVL_CNT);
62                 return -EINVAL;
63         }
64
65         if (ir > hw->max_tm_rate) {
66                 hns3_err(hw, "rate(%d) exceeds the max rate(%d) driver "
67                          "supported.", ir, hw->max_tm_rate);
68                 return -EINVAL;
69         }
70
71         tick = tick_array[shaper_level];
72
73         /*
74          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
75          * the formula is changed to:
76          *              126 * 1 * 8
77          * ir_calc = ---------------- * 1000
78          *              tick * 1
79          */
80         ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
81
82         if (ir_calc == ir) {
83                 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
84         } else if (ir_calc > ir) {
85                 /* Increasing the denominator to select ir_s value */
86                 do {
87                         ir_s_calc++;
88                         ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
89                 } while (ir_calc > ir);
90
91                 if (ir_calc == ir)
92                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
93                 else
94                         shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
95                                  (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
96         } else {
97                 /*
98                  * Increasing the numerator to select ir_u value. ir_u_calc will
99                  * get maximum value when ir_calc is minimum and ir is maximum.
100                  * ir_calc gets minimum value when tick is the maximum value.
101                  * At the same time, value of ir_u_calc can only be increased up
102                  * to eight after the while loop if the value of ir is equal
103                  * to hw->max_tm_rate.
104                  */
105                 uint32_t numerator;
106                 do {
107                         ir_u_calc++;
108                         numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
109                         ir_calc = (numerator + (tick >> 1)) / tick;
110                 } while (ir_calc < ir);
111
112                 if (ir_calc == ir) {
113                         shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
114                 } else {
115                         --ir_u_calc;
116
117                         /*
118                          * The maximum value of ir_u_calc in this branch is
119                          * seven in all cases. Thus, value of denominator can
120                          * not be zero here.
121                          */
122                         denominator = DIVISOR_CLK * (1 << ir_u_calc);
123                         shaper_para->ir_b =
124                                 (ir * tick + (denominator >> 1)) / denominator;
125                 }
126         }
127
128         shaper_para->ir_u = ir_u_calc;
129         shaper_para->ir_s = ir_s_calc;
130
131         return 0;
132 }
133
134 static int
135 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
136 {
137 #define HNS3_HALF_BYTE_BIT_OFFSET 4
138         uint8_t tc = hw->dcb_info.prio_tc[pri_id];
139
140         if (tc >= hw->dcb_info.num_tc)
141                 return -EINVAL;
142
143         /*
144          * The register for priority has four bytes, the first bytes includes
145          *  priority0 and priority1, the higher 4bit stands for priority1
146          *  while the lower 4bit stands for priority0, as below:
147          * first byte:  | pri_1 | pri_0 |
148          * second byte: | pri_3 | pri_2 |
149          * third byte:  | pri_5 | pri_4 |
150          * fourth byte: | pri_7 | pri_6 |
151          */
152         pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
153
154         return 0;
155 }
156
157 static int
158 hns3_up_to_tc_map(struct hns3_hw *hw)
159 {
160         struct hns3_cmd_desc desc;
161         uint8_t *pri = (uint8_t *)desc.data;
162         uint8_t pri_id;
163         int ret;
164
165         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
166
167         for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
168                 ret = hns3_fill_pri_array(hw, pri, pri_id);
169                 if (ret)
170                         return ret;
171         }
172
173         return hns3_cmd_send(hw, &desc, 1);
174 }
175
176 static int
177 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
178 {
179         struct hns3_pg_to_pri_link_cmd *map;
180         struct hns3_cmd_desc desc;
181
182         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
183
184         map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
185
186         map->pg_id = pg_id;
187         map->pri_bit_map = pri_bit_map;
188
189         return hns3_cmd_send(hw, &desc, 1);
190 }
191
192 static int
193 hns3_pg_to_pri_map(struct hns3_hw *hw)
194 {
195         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
196         struct hns3_pf *pf = &hns->pf;
197         struct hns3_pg_info *pg_info;
198         int ret, i;
199
200         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
201                 return -EINVAL;
202
203         for (i = 0; i < hw->dcb_info.num_pg; i++) {
204                 /* Cfg pg to priority mapping */
205                 pg_info = &hw->dcb_info.pg_info[i];
206                 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
207                 if (ret)
208                         return ret;
209         }
210
211         return 0;
212 }
213
214 static int
215 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
216 {
217         struct hns3_qs_to_pri_link_cmd *map;
218         struct hns3_cmd_desc desc;
219
220         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
221
222         map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
223
224         map->qs_id = rte_cpu_to_le_16(qs_id);
225         map->priority = pri;
226         map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
227
228         return hns3_cmd_send(hw, &desc, 1);
229 }
230
231 static int
232 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
233 {
234         struct hns3_qs_weight_cmd *weight;
235         struct hns3_cmd_desc desc;
236
237         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
238
239         weight = (struct hns3_qs_weight_cmd *)desc.data;
240
241         weight->qs_id = rte_cpu_to_le_16(qs_id);
242         weight->dwrr = dwrr;
243
244         return hns3_cmd_send(hw, &desc, 1);
245 }
246
247 static int
248 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
249 {
250 #define DEFAULT_TC_WEIGHT       1
251 #define DEFAULT_TC_OFFSET       14
252         struct hns3_ets_tc_weight_cmd *ets_weight;
253         struct hns3_cmd_desc desc;
254         uint8_t i;
255
256         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
257         ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
258
259         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
260                 struct hns3_pg_info *pg_info;
261
262                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
263
264                 if (!(hw->hw_tc_map & BIT(i)))
265                         continue;
266
267                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
268                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
269         }
270
271         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
272
273         return hns3_cmd_send(hw, &desc, 1);
274 }
275
276 static int
277 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
278 {
279         struct hns3_priority_weight_cmd *weight;
280         struct hns3_cmd_desc desc;
281
282         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
283
284         weight = (struct hns3_priority_weight_cmd *)desc.data;
285
286         weight->pri_id = pri_id;
287         weight->dwrr = dwrr;
288
289         return hns3_cmd_send(hw, &desc, 1);
290 }
291
292 static int
293 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
294 {
295         struct hns3_pg_weight_cmd *weight;
296         struct hns3_cmd_desc desc;
297
298         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
299
300         weight = (struct hns3_pg_weight_cmd *)desc.data;
301
302         weight->pg_id = pg_id;
303         weight->dwrr = dwrr;
304
305         return hns3_cmd_send(hw, &desc, 1);
306 }
307 static int
308 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
309 {
310         struct hns3_cmd_desc desc;
311
312         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
313
314         if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
315                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
316         else
317                 desc.data[1] = 0;
318
319         desc.data[0] = rte_cpu_to_le_32(pg_id);
320
321         return hns3_cmd_send(hw, &desc, 1);
322 }
323
324 static uint32_t
325 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
326                            uint8_t bs_b, uint8_t bs_s)
327 {
328         uint32_t shapping_para = 0;
329
330         hns3_dcb_set_field(shapping_para, IR_B, ir_b);
331         hns3_dcb_set_field(shapping_para, IR_U, ir_u);
332         hns3_dcb_set_field(shapping_para, IR_S, ir_s);
333         hns3_dcb_set_field(shapping_para, BS_B, bs_b);
334         hns3_dcb_set_field(shapping_para, BS_S, bs_s);
335
336         return shapping_para;
337 }
338
339 static int
340 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
341 {
342         struct hns3_port_shapping_cmd *shap_cfg_cmd;
343         struct hns3_shaper_parameter shaper_parameter;
344         uint32_t shapping_para;
345         uint32_t ir_u, ir_b, ir_s;
346         struct hns3_cmd_desc desc;
347         int ret;
348
349         ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
350                                     HNS3_SHAPER_LVL_PORT, &shaper_parameter);
351         if (ret) {
352                 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
353                 return ret;
354         }
355
356         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
357         shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
358
359         ir_b = shaper_parameter.ir_b;
360         ir_u = shaper_parameter.ir_u;
361         ir_s = shaper_parameter.ir_s;
362         shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
363                                                    HNS3_SHAPER_BS_U_DEF,
364                                                    HNS3_SHAPER_BS_S_DEF);
365
366         shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
367
368         /*
369          * Configure the port_rate and set bit HNS3_TM_RATE_VLD_B of flag
370          * field in hns3_port_shapping_cmd to require firmware to recalculate
371          * shapping parameters. And whether the parameters are recalculated
372          * depends on the firmware version. But driver still needs to
373          * calculate it and configure to firmware for better compatibility.
374          */
375         shap_cfg_cmd->port_rate = rte_cpu_to_le_32(hw->mac.link_speed);
376         hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
377
378         return hns3_cmd_send(hw, &desc, 1);
379 }
380
381 static int
382 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
383                          uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
384 {
385         struct hns3_pg_shapping_cmd *shap_cfg_cmd;
386         enum hns3_opcode_type opcode;
387         struct hns3_cmd_desc desc;
388
389         opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
390                  HNS3_OPC_TM_PG_C_SHAPPING;
391         hns3_cmd_setup_basic_desc(&desc, opcode, false);
392
393         shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
394
395         shap_cfg_cmd->pg_id = pg_id;
396
397         shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
398
399         /*
400          * Configure the pg_rate and set bit HNS3_TM_RATE_VLD_B of flag field in
401          * hns3_pg_shapping_cmd to require firmware to recalculate shapping
402          * parameters. And whether parameters are recalculated depends on
403          * the firmware version. But driver still needs to calculate it and
404          * configure to firmware for better compatibility.
405          */
406         shap_cfg_cmd->pg_rate = rte_cpu_to_le_32(rate);
407         hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
408
409         return hns3_cmd_send(hw, &desc, 1);
410 }
411
412 static int
413 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
414 {
415         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
416         struct hns3_shaper_parameter shaper_parameter;
417         struct hns3_pf *pf = &hns->pf;
418         uint32_t ir_u, ir_b, ir_s;
419         uint32_t shaper_para;
420         uint32_t rate;
421         uint8_t i;
422         int ret;
423
424         /* Cfg pg schd */
425         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
426                 return -EINVAL;
427
428         /* Pg to pri */
429         for (i = 0; i < hw->dcb_info.num_pg; i++) {
430                 rate = hw->dcb_info.pg_info[i].bw_limit;
431
432                 /* Calc shaper para */
433                 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
434                                             &shaper_parameter);
435                 if (ret) {
436                         hns3_err(hw, "calculate shaper parameter failed: %d",
437                                  ret);
438                         return ret;
439                 }
440
441                 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
442                                                          HNS3_SHAPER_BS_U_DEF,
443                                                          HNS3_SHAPER_BS_S_DEF);
444
445                 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
446                                                shaper_para, rate);
447                 if (ret) {
448                         hns3_err(hw,
449                                  "config PG CIR shaper parameter failed: %d",
450                                  ret);
451                         return ret;
452                 }
453
454                 ir_b = shaper_parameter.ir_b;
455                 ir_u = shaper_parameter.ir_u;
456                 ir_s = shaper_parameter.ir_s;
457                 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
458                                                          HNS3_SHAPER_BS_U_DEF,
459                                                          HNS3_SHAPER_BS_S_DEF);
460
461                 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
462                                                shaper_para, rate);
463                 if (ret) {
464                         hns3_err(hw,
465                                  "config PG PIR shaper parameter failed: %d",
466                                  ret);
467                         return ret;
468                 }
469         }
470
471         return 0;
472 }
473
474 static int
475 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
476 {
477         struct hns3_cmd_desc desc;
478
479         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
480
481         if (mode == HNS3_SCH_MODE_DWRR)
482                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
483         else
484                 desc.data[1] = 0;
485
486         desc.data[0] = rte_cpu_to_le_32(qs_id);
487
488         return hns3_cmd_send(hw, &desc, 1);
489 }
490
491 static int
492 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
493 {
494         struct hns3_cmd_desc desc;
495
496         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
497
498         if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
499                 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
500         else
501                 desc.data[1] = 0;
502
503         desc.data[0] = rte_cpu_to_le_32(pri_id);
504
505         return hns3_cmd_send(hw, &desc, 1);
506 }
507
508 static int
509 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
510                           uint8_t pri_id, uint32_t shapping_para, uint32_t rate)
511 {
512         struct hns3_pri_shapping_cmd *shap_cfg_cmd;
513         enum hns3_opcode_type opcode;
514         struct hns3_cmd_desc desc;
515
516         opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
517                  HNS3_OPC_TM_PRI_C_SHAPPING;
518
519         hns3_cmd_setup_basic_desc(&desc, opcode, false);
520
521         shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
522
523         shap_cfg_cmd->pri_id = pri_id;
524
525         shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
526
527         /*
528          * Configure the pri_rate and set bit HNS3_TM_RATE_VLD_B of flag
529          * field in hns3_pri_shapping_cmd to require firmware to recalculate
530          * shapping parameters. And whether the parameters are recalculated
531          * depends on the firmware version. But driver still needs to
532          * calculate it and configure to firmware for better compatibility.
533          */
534         shap_cfg_cmd->pri_rate = rte_cpu_to_le_32(rate);
535         hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
536
537         return hns3_cmd_send(hw, &desc, 1);
538 }
539
540 static int
541 hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
542 {
543         struct hns3_shaper_parameter shaper_parameter;
544         uint32_t ir_u, ir_b, ir_s;
545         uint32_t shaper_para;
546         uint32_t rate;
547         int ret, i;
548
549         for (i = 0; i < hw->dcb_info.num_tc; i++) {
550                 rate = hw->dcb_info.tc_info[i].bw_limit;
551                 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
552                                             &shaper_parameter);
553                 if (ret) {
554                         hns3_err(hw, "calculate shaper parameter failed: %d",
555                                  ret);
556                         return ret;
557                 }
558
559                 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
560                                                          HNS3_SHAPER_BS_U_DEF,
561                                                          HNS3_SHAPER_BS_S_DEF);
562
563                 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
564                                                 shaper_para, rate);
565                 if (ret) {
566                         hns3_err(hw,
567                                  "config priority CIR shaper parameter failed: %d",
568                                  ret);
569                         return ret;
570                 }
571
572                 ir_b = shaper_parameter.ir_b;
573                 ir_u = shaper_parameter.ir_u;
574                 ir_s = shaper_parameter.ir_s;
575                 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
576                                                          HNS3_SHAPER_BS_U_DEF,
577                                                          HNS3_SHAPER_BS_S_DEF);
578
579                 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
580                                                 shaper_para, rate);
581                 if (ret) {
582                         hns3_err(hw,
583                                  "config priority PIR shaper parameter failed: %d",
584                                  ret);
585                         return ret;
586                 }
587         }
588
589         return 0;
590 }
591
592
593 static int
594 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
595 {
596         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
597         struct hns3_pf *pf = &hns->pf;
598         int ret;
599
600         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
601                 return -EINVAL;
602
603         ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
604         if (ret)
605                 hns3_err(hw, "config port shaper failed: %d", ret);
606
607         return ret;
608 }
609
610 static int
611 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
612 {
613         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
614         uint16_t rx_qnum_per_tc;
615         uint16_t used_rx_queues;
616         int i;
617
618         rx_qnum_per_tc = nb_rx_q / hw->num_tc;
619         if (rx_qnum_per_tc > hw->rss_size_max) {
620                 hns3_err(hw, "rx queue number of per tc (%u) is greater than "
621                          "value (%u) hardware supported.",
622                          rx_qnum_per_tc, hw->rss_size_max);
623                 return -EINVAL;
624         }
625
626         used_rx_queues = hw->num_tc * rx_qnum_per_tc;
627         if (used_rx_queues != nb_rx_q) {
628                 hns3_err(hw, "rx queue number (%u) configured must be an "
629                          "integral multiple of valid tc number (%u).",
630                          nb_rx_q, hw->num_tc);
631                 return -EINVAL;
632         }
633         hw->alloc_rss_size = rx_qnum_per_tc;
634         hw->used_rx_queues = used_rx_queues;
635
636         /*
637          * When rss size is changed, we need to update rss redirection table
638          * maintained by driver. Besides, during the entire reset process, we
639          * need to ensure that the rss table information are not overwritten
640          * and configured directly to the hardware in the RESET_STAGE_RESTORE
641          * stage of the reset process.
642          */
643         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
644                 for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
645                         rss_cfg->rss_indirection_tbl[i] =
646                                                         i % hw->alloc_rss_size;
647         }
648
649         return 0;
650 }
651
652 static int
653 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
654 {
655         struct hns3_tc_queue_info *tc_queue;
656         uint16_t used_tx_queues;
657         uint16_t tx_qnum_per_tc;
658         uint8_t i;
659
660         tx_qnum_per_tc = nb_tx_q / hw->num_tc;
661         used_tx_queues = hw->num_tc * tx_qnum_per_tc;
662         if (used_tx_queues != nb_tx_q) {
663                 hns3_err(hw, "tx queue number (%u) configured must be an "
664                          "integral multiple of valid tc number (%u).",
665                          nb_tx_q, hw->num_tc);
666                 return -EINVAL;
667         }
668
669         hw->used_tx_queues = used_tx_queues;
670         hw->tx_qnum_per_tc = tx_qnum_per_tc;
671         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
672                 tc_queue = &hw->tc_queue[i];
673                 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
674                         tc_queue->enable = true;
675                         tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
676                         tc_queue->tqp_count = hw->tx_qnum_per_tc;
677                         tc_queue->tc = i;
678                 } else {
679                         /* Set to default queue if TC is disable */
680                         tc_queue->enable = false;
681                         tc_queue->tqp_offset = 0;
682                         tc_queue->tqp_count = 0;
683                         tc_queue->tc = 0;
684                 }
685         }
686
687         return 0;
688 }
689
690 int
691 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
692 {
693         int ret;
694
695         ret = hns3_set_rss_size(hw, nb_rx_q);
696         if (ret)
697                 return ret;
698
699         return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
700 }
701
702 static int
703 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
704                                  uint16_t nb_tx_q)
705 {
706         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
707         struct hns3_pf *pf = &hns->pf;
708         int ret;
709
710         hw->num_tc = hw->dcb_info.num_tc;
711         ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
712         if (ret)
713                 return ret;
714
715         if (!hns->is_vf)
716                 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
717
718         return 0;
719 }
720
721 int
722 hns3_dcb_info_init(struct hns3_hw *hw)
723 {
724         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
725         struct hns3_pf *pf = &hns->pf;
726         int i, k;
727
728         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
729             hw->dcb_info.num_pg != 1)
730                 return -EINVAL;
731
732         /* Initializing PG information */
733         memset(hw->dcb_info.pg_info, 0,
734                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
735         for (i = 0; i < hw->dcb_info.num_pg; i++) {
736                 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
737                 hw->dcb_info.pg_info[i].pg_id = i;
738                 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
739                 hw->dcb_info.pg_info[i].bw_limit = hw->max_tm_rate;
740
741                 if (i != 0)
742                         continue;
743
744                 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
745                 for (k = 0; k < hw->dcb_info.num_tc; k++)
746                         hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
747         }
748
749         /* All UPs mapping to TC0 */
750         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
751                 hw->dcb_info.prio_tc[i] = 0;
752
753         /* Initializing tc information */
754         memset(hw->dcb_info.tc_info, 0,
755                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
756         for (i = 0; i < hw->dcb_info.num_tc; i++) {
757                 hw->dcb_info.tc_info[i].tc_id = i;
758                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
759                 hw->dcb_info.tc_info[i].pgid = 0;
760                 hw->dcb_info.tc_info[i].bw_limit =
761                         hw->dcb_info.pg_info[0].bw_limit;
762         }
763
764         return 0;
765 }
766
767 static int
768 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
769 {
770         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
771         struct hns3_pf *pf = &hns->pf;
772         int ret, i;
773
774         /* Only being config on TC-Based scheduler mode */
775         if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
776                 return -EINVAL;
777
778         for (i = 0; i < hw->dcb_info.num_pg; i++) {
779                 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
780                 if (ret)
781                         return ret;
782         }
783
784         return 0;
785 }
786
787 static int
788 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
789 {
790         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
791         struct hns3_pf *pf = &hns->pf;
792         uint8_t i;
793         int ret;
794
795         if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
796                 for (i = 0; i < hw->dcb_info.num_tc; i++) {
797                         ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
798                         if (ret)
799                                 return ret;
800
801                         ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
802                                                         HNS3_SCH_MODE_DWRR);
803                         if (ret)
804                                 return ret;
805                 }
806         }
807
808         return 0;
809 }
810
811 static int
812 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
813 {
814         int ret;
815
816         ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
817         if (ret) {
818                 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
819                 return ret;
820         }
821
822         ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
823         if (ret)
824                 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
825
826         return ret;
827 }
828
829 static int
830 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
831 {
832         struct hns3_pg_info *pg_info;
833         uint8_t dwrr;
834         int ret, i;
835
836         for (i = 0; i < hw->dcb_info.num_tc; i++) {
837                 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
838                 dwrr = pg_info->tc_dwrr[i];
839
840                 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
841                 if (ret) {
842                         hns3_err(hw,
843                                "fail to send priority weight cmd: %d, ret = %d",
844                                i, ret);
845                         return ret;
846                 }
847
848                 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
849                 if (ret) {
850                         hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
851                                  i, ret);
852                         return ret;
853                 }
854         }
855
856         return 0;
857 }
858
859 static int
860 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
861 {
862         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
863         struct hns3_pf *pf = &hns->pf;
864         uint32_t version;
865         int ret;
866
867         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
868                 return -EINVAL;
869
870         ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
871         if (ret)
872                 return ret;
873
874         if (!hns3_dev_dcb_supported(hw))
875                 return 0;
876
877         ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
878         if (ret == -EOPNOTSUPP) {
879                 version = hw->fw_version;
880                 hns3_warn(hw,
881                           "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
882                           hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
883                                          HNS3_FW_VERSION_BYTE3_S),
884                           hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
885                                          HNS3_FW_VERSION_BYTE2_S),
886                           hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
887                                          HNS3_FW_VERSION_BYTE1_S),
888                           hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
889                                          HNS3_FW_VERSION_BYTE0_S));
890                 ret = 0;
891         }
892
893         return ret;
894 }
895
896 static int
897 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
898 {
899         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
900         struct hns3_pf *pf = &hns->pf;
901         int ret, i;
902
903         /* Cfg pg schd */
904         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
905                 return -EINVAL;
906
907         /* Cfg pg to prio */
908         for (i = 0; i < hw->dcb_info.num_pg; i++) {
909                 /* Cfg dwrr */
910                 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
911                 if (ret)
912                         return ret;
913         }
914
915         return 0;
916 }
917
918 static int
919 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
920 {
921         int ret;
922
923         ret = hns3_dcb_pg_dwrr_cfg(hw);
924         if (ret) {
925                 hns3_err(hw, "config pg_dwrr failed: %d", ret);
926                 return ret;
927         }
928
929         ret = hns3_dcb_pri_dwrr_cfg(hw);
930         if (ret)
931                 hns3_err(hw, "config pri_dwrr failed: %d", ret);
932
933         return ret;
934 }
935
936 static int
937 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
938 {
939         int ret;
940
941         ret = hns3_dcb_port_shaper_cfg(hw);
942         if (ret) {
943                 hns3_err(hw, "config port shaper failed: %d", ret);
944                 return ret;
945         }
946
947         ret = hns3_dcb_pg_shaper_cfg(hw);
948         if (ret) {
949                 hns3_err(hw, "config pg shaper failed: %d", ret);
950                 return ret;
951         }
952
953         return hns3_dcb_pri_shaper_cfg(hw);
954 }
955
956 static int
957 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
958 {
959         struct hns3_nq_to_qs_link_cmd *map;
960         struct hns3_cmd_desc desc;
961         uint16_t tmp_qs_id = 0;
962         uint16_t qs_id_l;
963         uint16_t qs_id_h;
964
965         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
966
967         map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
968
969         map->nq_id = rte_cpu_to_le_16(q_id);
970
971         /*
972          * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
973          * configure qset_id. So we need to convert qs_id to the follow
974          * format to support qset_id > 1024.
975          * qs_id: | 15 | 14 ~ 10 |  9 ~ 0   |
976          *            /         / \         \
977          *           /         /   \         \
978          * qset_id: | 15 ~ 11 |  10 |  9 ~ 0  |
979          *          | qs_id_h | vld | qs_id_l |
980          */
981         qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
982                                  HNS3_DCB_QS_ID_L_S);
983         qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
984                                  HNS3_DCB_QS_ID_H_S);
985         hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
986                        qs_id_l);
987         hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
988                        HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
989         map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
990
991         return hns3_cmd_send(hw, &desc, 1);
992 }
993
994 static int
995 hns3_q_to_qs_map(struct hns3_hw *hw)
996 {
997         struct hns3_tc_queue_info *tc_queue;
998         uint16_t q_id;
999         uint32_t i, j;
1000         int ret;
1001
1002         for (i = 0; i < hw->num_tc; i++) {
1003                 tc_queue = &hw->tc_queue[i];
1004                 for (j = 0; j < tc_queue->tqp_count; j++) {
1005                         q_id = tc_queue->tqp_offset + j;
1006                         ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
1007                         if (ret)
1008                                 return ret;
1009                 }
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int
1016 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
1017 {
1018         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1019         struct hns3_pf *pf = &hns->pf;
1020         uint32_t i;
1021         int ret;
1022
1023         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
1024                 return -EINVAL;
1025
1026         /* Cfg qs -> pri mapping */
1027         for (i = 0; i < hw->num_tc; i++) {
1028                 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
1029                 if (ret) {
1030                         hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1031                         return ret;
1032                 }
1033         }
1034
1035         /* Cfg q -> qs mapping */
1036         ret = hns3_q_to_qs_map(hw);
1037         if (ret)
1038                 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1039
1040         return ret;
1041 }
1042
1043 static int
1044 hns3_dcb_map_cfg(struct hns3_hw *hw)
1045 {
1046         int ret;
1047
1048         ret = hns3_up_to_tc_map(hw);
1049         if (ret) {
1050                 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1051                 return ret;
1052         }
1053
1054         ret = hns3_pg_to_pri_map(hw);
1055         if (ret) {
1056                 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1057                 return ret;
1058         }
1059
1060         return hns3_pri_q_qs_cfg(hw);
1061 }
1062
1063 static int
1064 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1065 {
1066         int ret;
1067
1068         /* Cfg dcb mapping  */
1069         ret = hns3_dcb_map_cfg(hw);
1070         if (ret)
1071                 return ret;
1072
1073         /* Cfg dcb shaper */
1074         ret = hns3_dcb_shaper_cfg(hw);
1075         if (ret)
1076                 return ret;
1077
1078         /* Cfg dwrr */
1079         ret = hns3_dcb_dwrr_cfg(hw);
1080         if (ret)
1081                 return ret;
1082
1083         /* Cfg schd mode for each level schd */
1084         return hns3_dcb_schd_mode_cfg(hw);
1085 }
1086
1087 static int
1088 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1089                      uint8_t pause_trans_gap, uint16_t pause_trans_time)
1090 {
1091         struct hns3_cfg_pause_param_cmd *pause_param;
1092         struct hns3_cmd_desc desc;
1093
1094         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1095
1096         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1097
1098         memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1099         memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1100         pause_param->pause_trans_gap = pause_trans_gap;
1101         pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1102
1103         return hns3_cmd_send(hw, &desc, 1);
1104 }
1105
1106 int
1107 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1108 {
1109         struct hns3_cfg_pause_param_cmd *pause_param;
1110         struct hns3_cmd_desc desc;
1111         uint16_t trans_time;
1112         uint8_t trans_gap;
1113         int ret;
1114
1115         pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1116
1117         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1118
1119         ret = hns3_cmd_send(hw, &desc, 1);
1120         if (ret)
1121                 return ret;
1122
1123         trans_gap = pause_param->pause_trans_gap;
1124         trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1125
1126         return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1127 }
1128
1129 static int
1130 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1131 {
1132 #define PAUSE_TIME_DIV_BY       2
1133 #define PAUSE_TIME_MIN_VALUE    0x4
1134
1135         struct hns3_mac *mac = &hw->mac;
1136         uint8_t pause_trans_gap;
1137
1138         /*
1139          * Pause transmit gap must be less than "pause_time / 2", otherwise
1140          * the behavior of MAC is undefined.
1141          */
1142         if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1143                 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1144         else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1145                  pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1146                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1147         else {
1148                 hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time);
1149                 pause_time = PAUSE_TIME_MIN_VALUE;
1150                 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1151         }
1152
1153         return hns3_pause_param_cfg(hw, mac->mac_addr,
1154                                     pause_trans_gap, pause_time);
1155 }
1156
1157 static int
1158 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1159 {
1160         struct hns3_cmd_desc desc;
1161
1162         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1163
1164         desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1165                 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1166
1167         return hns3_cmd_send(hw, &desc, 1);
1168 }
1169
1170 static int
1171 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1172 {
1173         struct hns3_cmd_desc desc;
1174         struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1175
1176         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1177
1178         pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1179                                         (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1180
1181         pfc->pri_en_bitmap = pfc_bitmap;
1182
1183         return hns3_cmd_send(hw, &desc, 1);
1184 }
1185
1186 static int
1187 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1188 {
1189         struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1190         struct hns3_cmd_desc desc;
1191
1192         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1193
1194         bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1195
1196         bp_to_qs_map_cmd->tc_id = tc;
1197         bp_to_qs_map_cmd->qs_group_id = grp_id;
1198         bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1199
1200         return hns3_cmd_send(hw, &desc, 1);
1201 }
1202
1203 static void
1204 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1205 {
1206         switch (hw->current_mode) {
1207         case HNS3_FC_NONE:
1208                 *tx_en = false;
1209                 *rx_en = false;
1210                 break;
1211         case HNS3_FC_RX_PAUSE:
1212                 *tx_en = false;
1213                 *rx_en = true;
1214                 break;
1215         case HNS3_FC_TX_PAUSE:
1216                 *tx_en = true;
1217                 *rx_en = false;
1218                 break;
1219         case HNS3_FC_FULL:
1220                 *tx_en = true;
1221                 *rx_en = true;
1222                 break;
1223         default:
1224                 *tx_en = false;
1225                 *rx_en = false;
1226                 break;
1227         }
1228 }
1229
1230 static int
1231 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1232 {
1233         bool tx_en, rx_en;
1234
1235         if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1236                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1237         else {
1238                 tx_en = false;
1239                 rx_en = false;
1240         }
1241
1242         return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1243 }
1244
1245 static int
1246 hns3_pfc_setup_hw(struct hns3_hw *hw)
1247 {
1248         bool tx_en, rx_en;
1249
1250         if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1251                 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1252         else {
1253                 tx_en = false;
1254                 rx_en = false;
1255         }
1256
1257         return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1258 }
1259
1260 /*
1261  * Each Tc has a 1024 queue sets to backpress, it divides to
1262  * 32 group, each group contains 32 queue sets, which can be
1263  * represented by uint32_t bitmap.
1264  */
1265 static int
1266 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1267 {
1268         uint32_t qs_bitmap;
1269         int ret;
1270         int i;
1271
1272         for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1273                 uint8_t grp, sub_grp;
1274                 qs_bitmap = 0;
1275
1276                 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1277                 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1278                                          HNS3_BP_SUB_GRP_ID_S);
1279                 if (i == grp)
1280                         qs_bitmap |= (1 << sub_grp);
1281
1282                 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1283                 if (ret)
1284                         return ret;
1285         }
1286
1287         return 0;
1288 }
1289
1290 static int
1291 hns3_dcb_bp_setup(struct hns3_hw *hw)
1292 {
1293         int ret, i;
1294
1295         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1296                 ret = hns3_bp_setup_hw(hw, i);
1297                 if (ret)
1298                         return ret;
1299         }
1300
1301         return 0;
1302 }
1303
1304 static int
1305 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1306 {
1307         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1308         struct hns3_pf *pf = &hns->pf;
1309         int ret;
1310
1311         ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1312         if (ret) {
1313                 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1314                 return ret;
1315         }
1316
1317         ret = hns3_mac_pause_setup_hw(hw);
1318         if (ret) {
1319                 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1320                 return ret;
1321         }
1322
1323         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1324         if (!hns3_dev_dcb_supported(hw))
1325                 return 0;
1326
1327         ret = hns3_pfc_setup_hw(hw);
1328         if (ret) {
1329                 hns3_err(hw, "config pfc failed! ret = %d", ret);
1330                 return ret;
1331         }
1332
1333         return hns3_dcb_bp_setup(hw);
1334 }
1335
1336 static uint8_t
1337 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1338 {
1339         uint8_t pfc_map = 0;
1340         uint8_t *prio_tc;
1341         uint8_t i, j;
1342
1343         prio_tc = hw->dcb_info.prio_tc;
1344         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1345                 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1346                         if (prio_tc[j] == i && pfc_en & BIT(j)) {
1347                                 pfc_map |= BIT(i);
1348                                 break;
1349                         }
1350                 }
1351         }
1352
1353         return pfc_map;
1354 }
1355
1356 static void
1357 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1358 {
1359         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1360         struct hns3_hw *hw = &hns->hw;
1361         uint8_t max_tc = 0;
1362         uint8_t pfc_en;
1363         int i;
1364
1365         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1366         for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1367                 if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1368                         *changed = true;
1369
1370                 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1371                         max_tc = dcb_rx_conf->dcb_tc[i];
1372         }
1373         *tc = max_tc + 1;
1374         if (*tc != hw->dcb_info.num_tc)
1375                 *changed = true;
1376
1377         /*
1378          * We ensure that dcb information can be reconfigured
1379          * after the hns3_priority_flow_ctrl_set function called.
1380          */
1381         if (hw->current_mode != HNS3_FC_FULL)
1382                 *changed = true;
1383         pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1384         if (hw->dcb_info.pfc_en != pfc_en)
1385                 *changed = true;
1386 }
1387
1388 static int
1389 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1390 {
1391         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1392         struct hns3_pf *pf = &hns->pf;
1393         struct hns3_hw *hw = &hns->hw;
1394         uint8_t tc_bw, bw_rest;
1395         uint8_t i, j;
1396         int ret;
1397
1398         dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1399         pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1400         pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1401
1402         /* Config pg0 */
1403         memset(hw->dcb_info.pg_info, 0,
1404                sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1405         hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1406         hw->dcb_info.pg_info[0].pg_id = 0;
1407         hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1408         hw->dcb_info.pg_info[0].bw_limit = hw->max_tm_rate;
1409         hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1410
1411         /* Each tc has same bw for valid tc by default */
1412         tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1413         for (i = 0; i < hw->dcb_info.num_tc; i++)
1414                 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1415         /* To ensure the sum of tc_dwrr is equal to 100 */
1416         bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1417         for (j = 0; j < bw_rest; j++)
1418                 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1419         for (; i < dcb_rx_conf->nb_tcs; i++)
1420                 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1421
1422         /* All tcs map to pg0 */
1423         memset(hw->dcb_info.tc_info, 0,
1424                sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1425         for (i = 0; i < hw->dcb_info.num_tc; i++) {
1426                 hw->dcb_info.tc_info[i].tc_id = i;
1427                 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1428                 hw->dcb_info.tc_info[i].pgid = 0;
1429                 hw->dcb_info.tc_info[i].bw_limit =
1430                                         hw->dcb_info.pg_info[0].bw_limit;
1431         }
1432
1433         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1434                 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1435
1436         ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1437                                                hw->data->nb_tx_queues);
1438         if (ret)
1439                 hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1440
1441         return ret;
1442 }
1443
1444 static int
1445 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1446 {
1447         struct hns3_pf *pf = &hns->pf;
1448         struct hns3_hw *hw = &hns->hw;
1449         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1450         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1451         uint8_t bit_map = 0;
1452         uint8_t i;
1453
1454         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1455             hw->dcb_info.num_pg != 1)
1456                 return -EINVAL;
1457
1458         if (nb_rx_q < num_tc) {
1459                 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
1460                          nb_rx_q, num_tc);
1461                 return -EINVAL;
1462         }
1463
1464         if (nb_tx_q < num_tc) {
1465                 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
1466                          nb_tx_q, num_tc);
1467                 return -EINVAL;
1468         }
1469
1470         /* Currently not support uncontinuous tc */
1471         hw->dcb_info.num_tc = num_tc;
1472         for (i = 0; i < hw->dcb_info.num_tc; i++)
1473                 bit_map |= BIT(i);
1474
1475         if (!bit_map) {
1476                 bit_map = 1;
1477                 hw->dcb_info.num_tc = 1;
1478         }
1479         hw->hw_tc_map = bit_map;
1480
1481         return hns3_dcb_info_cfg(hns);
1482 }
1483
1484 static int
1485 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1486 {
1487         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1488         struct hns3_pf *pf = &hns->pf;
1489         struct hns3_hw *hw = &hns->hw;
1490         enum hns3_fc_status fc_status = hw->current_fc_status;
1491         enum hns3_fc_mode current_mode = hw->current_mode;
1492         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1493         int ret, status;
1494
1495         if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1496             pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1497                 return -ENOTSUP;
1498
1499         ret = hns3_dcb_schd_setup_hw(hw);
1500         if (ret) {
1501                 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1502                 return ret;
1503         }
1504
1505         if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1506                 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1507                 if (dcb_rx_conf->nb_tcs == 0)
1508                         hw->dcb_info.pfc_en = 1; /* tc0 only */
1509                 else
1510                         hw->dcb_info.pfc_en =
1511                         RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1512
1513                 hw->dcb_info.hw_pfc_map =
1514                                 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1515
1516                 ret = hns3_buffer_alloc(hw);
1517                 if (ret)
1518                         return ret;
1519
1520                 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1521                 hw->current_mode = HNS3_FC_FULL;
1522                 ret = hns3_dcb_pause_setup_hw(hw);
1523                 if (ret) {
1524                         hns3_err(hw, "setup pfc failed! ret = %d", ret);
1525                         goto pfc_setup_fail;
1526                 }
1527         } else {
1528                 /*
1529                  * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1530                  * flag, the DCB information is configured, such as tc numbers.
1531                  * Therefore, refreshing the allocation of packet buffer is
1532                  * necessary.
1533                  */
1534                 ret = hns3_buffer_alloc(hw);
1535                 if (ret)
1536                         return ret;
1537         }
1538
1539         return 0;
1540
1541 pfc_setup_fail:
1542         hw->current_mode = current_mode;
1543         hw->current_fc_status = fc_status;
1544         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1545         status = hns3_buffer_alloc(hw);
1546         if (status)
1547                 hns3_err(hw, "recover packet buffer fail! status = %d", status);
1548
1549         return ret;
1550 }
1551
1552 /*
1553  * hns3_dcb_configure - setup dcb related config
1554  * @hns: pointer to hns3 adapter
1555  * Returns 0 on success, negative value on failure.
1556  */
1557 int
1558 hns3_dcb_configure(struct hns3_adapter *hns)
1559 {
1560         struct hns3_hw *hw = &hns->hw;
1561         bool map_changed = false;
1562         uint8_t num_tc = 0;
1563         int ret;
1564
1565         hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1566         if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
1567                 ret = hns3_dcb_info_update(hns, num_tc);
1568                 if (ret) {
1569                         hns3_err(hw, "dcb info update failed: %d", ret);
1570                         return ret;
1571                 }
1572
1573                 ret = hns3_dcb_hw_configure(hns);
1574                 if (ret) {
1575                         hns3_err(hw, "dcb sw configure failed: %d", ret);
1576                         return ret;
1577                 }
1578         }
1579
1580         return 0;
1581 }
1582
1583 int
1584 hns3_dcb_init_hw(struct hns3_hw *hw)
1585 {
1586         int ret;
1587
1588         ret = hns3_dcb_schd_setup_hw(hw);
1589         if (ret) {
1590                 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1591                 return ret;
1592         }
1593
1594         ret = hns3_dcb_pause_setup_hw(hw);
1595         if (ret)
1596                 hns3_err(hw, "PAUSE setup failed: %d", ret);
1597
1598         return ret;
1599 }
1600
1601 int
1602 hns3_dcb_init(struct hns3_hw *hw)
1603 {
1604         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1605         struct hns3_pf *pf = &hns->pf;
1606         uint16_t default_tqp_num;
1607         int ret;
1608
1609         PMD_INIT_FUNC_TRACE();
1610
1611         /*
1612          * According to the 'adapter_state' identifier, the following branch
1613          * is only executed to initialize default configurations of dcb during
1614          * the initializing driver process. Due to driver saving dcb-related
1615          * information before reset triggered, the reinit dev stage of the
1616          * reset process can not access to the branch, or those information
1617          * will be changed.
1618          */
1619         if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1620                 hw->requested_mode = HNS3_FC_NONE;
1621                 hw->current_mode = hw->requested_mode;
1622                 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1623                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1624
1625                 ret = hns3_dcb_info_init(hw);
1626                 if (ret) {
1627                         hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1628                         return ret;
1629                 }
1630
1631                 /*
1632                  * The number of queues configured by default cannot exceed
1633                  * the maximum number of queues for a single TC.
1634                  */
1635                 default_tqp_num = RTE_MIN(hw->rss_size_max,
1636                                           hw->tqps_num / hw->dcb_info.num_tc);
1637                 ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1638                                                        default_tqp_num);
1639                 if (ret) {
1640                         hns3_err(hw,
1641                                  "update tc queue mapping failed, ret = %d.",
1642                                  ret);
1643                         return ret;
1644                 }
1645         }
1646
1647         /*
1648          * DCB hardware will be configured by following the function during
1649          * the initializing driver process and the reset process. However,
1650          * driver will restore directly configurations of dcb hardware based
1651          * on dcb-related information soft maintained when driver
1652          * initialization has finished and reset is coming.
1653          */
1654         ret = hns3_dcb_init_hw(hw);
1655         if (ret) {
1656                 hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1657                 return ret;
1658         }
1659
1660         return 0;
1661 }
1662
1663 static int
1664 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1665 {
1666         struct hns3_hw *hw = &hns->hw;
1667         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1668         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1669         int ret;
1670
1671         ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1672         if (ret) {
1673                 hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1674                          ret);
1675                 return ret;
1676         }
1677         ret = hns3_q_to_qs_map(hw);
1678         if (ret)
1679                 hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1680
1681         return ret;
1682 }
1683
1684 int
1685 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1686 {
1687         struct hns3_hw *hw = &hns->hw;
1688         enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1689         int ret;
1690
1691         if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1692                 ret = hns3_dcb_configure(hns);
1693                 if (ret)
1694                         hns3_err(hw, "Failed to config dcb: %d", ret);
1695         } else {
1696                 /*
1697                  * Update queue map without PFC configuration,
1698                  * due to queues reconfigured by user.
1699                  */
1700                 ret = hns3_update_queue_map_configure(hns);
1701                 if (ret)
1702                         hns3_err(hw,
1703                                  "Failed to update queue mapping configure: %d",
1704                                  ret);
1705         }
1706
1707         return ret;
1708 }
1709
1710 /*
1711  * hns3_dcb_pfc_enable - Enable priority flow control
1712  * @dev: pointer to ethernet device
1713  *
1714  * Configures the pfc settings for one porority.
1715  */
1716 int
1717 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1718 {
1719         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1720         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1721         enum hns3_fc_status fc_status = hw->current_fc_status;
1722         enum hns3_fc_mode current_mode = hw->current_mode;
1723         uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1724         uint8_t pfc_en = hw->dcb_info.pfc_en;
1725         uint8_t priority = pfc_conf->priority;
1726         uint16_t pause_time = pf->pause_time;
1727         int ret, status;
1728
1729         pf->pause_time = pfc_conf->fc.pause_time;
1730         hw->current_mode = hw->requested_mode;
1731         hw->current_fc_status = HNS3_FC_STATUS_PFC;
1732         hw->dcb_info.pfc_en |= BIT(priority);
1733         hw->dcb_info.hw_pfc_map =
1734                         hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1735         ret = hns3_buffer_alloc(hw);
1736         if (ret)
1737                 goto pfc_setup_fail;
1738
1739         /*
1740          * The flow control mode of all UPs will be changed based on
1741          * current_mode coming from user.
1742          */
1743         ret = hns3_dcb_pause_setup_hw(hw);
1744         if (ret) {
1745                 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1746                 goto pfc_setup_fail;
1747         }
1748
1749         return 0;
1750
1751 pfc_setup_fail:
1752         hw->current_mode = current_mode;
1753         hw->current_fc_status = fc_status;
1754         pf->pause_time = pause_time;
1755         hw->dcb_info.pfc_en = pfc_en;
1756         hw->dcb_info.hw_pfc_map = hw_pfc_map;
1757         status = hns3_buffer_alloc(hw);
1758         if (status)
1759                 hns3_err(hw, "recover packet buffer fail: %d", status);
1760
1761         return ret;
1762 }
1763
1764 /*
1765  * hns3_fc_enable - Enable MAC pause
1766  * @dev: pointer to ethernet device
1767  *
1768  * Configures the MAC pause settings.
1769  */
1770 int
1771 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1772 {
1773         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1774         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1775         enum hns3_fc_status fc_status = hw->current_fc_status;
1776         enum hns3_fc_mode current_mode = hw->current_mode;
1777         uint16_t pause_time = pf->pause_time;
1778         int ret;
1779
1780         pf->pause_time = fc_conf->pause_time;
1781         hw->current_mode = hw->requested_mode;
1782
1783         /*
1784          * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1785          * of flow control is configured to be HNS3_FC_NONE.
1786          */
1787         if (hw->current_mode == HNS3_FC_NONE)
1788                 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1789         else
1790                 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1791
1792         ret = hns3_dcb_pause_setup_hw(hw);
1793         if (ret) {
1794                 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1795                 goto setup_fc_fail;
1796         }
1797
1798         return 0;
1799
1800 setup_fc_fail:
1801         hw->current_mode = current_mode;
1802         hw->current_fc_status = fc_status;
1803         pf->pause_time = pause_time;
1804
1805         return ret;
1806 }