1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_common.h>
12 #include <rte_ethdev.h>
14 #include "hns3_logs.h"
15 #include "hns3_regs.h"
16 #include "hns3_ethdev.h"
19 #define HNS3_SHAPER_BS_U_DEF 5
20 #define HNS3_SHAPER_BS_S_DEF 20
21 #define BW_MAX_PERCENT 100
22 #define HNS3_ETHER_MAX_RATE 100000
25 * hns3_shaper_para_calc: calculate ir parameter for the shaper
26 * @ir: Rate to be config, its unit is Mbps
27 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
28 * @shaper_para: shaper parameter of IR shaper
32 * IR_b * (2 ^ IR_u) * 8
33 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
36 * @return: 0: calculate sucessful, negative: fail
39 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
40 struct hns3_shaper_parameter *shaper_para)
42 #define SHAPER_DEFAULT_IR_B 126
43 #define DIVISOR_CLK (1000 * 8)
44 #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
46 const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
47 6 * 256, /* Prioriy level */
48 6 * 32, /* Prioriy group level */
49 6 * 8, /* Port level */
50 6 * 256 /* Qset level */
52 uint8_t ir_u_calc = 0;
53 uint8_t ir_s_calc = 0;
59 if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
61 "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)",
62 shaper_level, HNS3_SHAPER_LVL_CNT);
66 if (ir > HNS3_ETHER_MAX_RATE) {
67 hns3_err(hw, "rate(%d) exceeds the rate driver supported "
68 "HNS3_ETHER_MAX_RATE(%d)", ir, HNS3_ETHER_MAX_RATE);
72 tick = tick_array[shaper_level];
75 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
76 * the formula is changed to:
78 * ir_calc = ---------------- * 1000
81 ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
84 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
85 } else if (ir_calc > ir) {
86 /* Increasing the denominator to select ir_s value */
89 ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
90 } while (ir_calc > ir);
93 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
95 shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
96 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
99 * Increasing the numerator to select ir_u value. ir_u_calc will
100 * get maximum value when ir_calc is minimum and ir is maximum.
101 * ir_calc gets minimum value when tick is the maximum value.
102 * At the same time, value of ir_u_calc can only be increased up
103 * to eight after the while loop if the value of ir is equal
104 * to HNS3_ETHER_MAX_RATE.
109 numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
110 ir_calc = (numerator + (tick >> 1)) / tick;
111 } while (ir_calc < ir);
114 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
119 * The maximum value of ir_u_calc in this branch is
120 * seven in all cases. Thus, value of denominator can
123 denominator = DIVISOR_CLK * (1 << ir_u_calc);
125 (ir * tick + (denominator >> 1)) / denominator;
129 shaper_para->ir_u = ir_u_calc;
130 shaper_para->ir_s = ir_s_calc;
136 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
138 #define HNS3_HALF_BYTE_BIT_OFFSET 4
139 uint8_t tc = hw->dcb_info.prio_tc[pri_id];
141 if (tc >= hw->dcb_info.num_tc)
145 * The register for priority has four bytes, the first bytes includes
146 * priority0 and priority1, the higher 4bit stands for priority1
147 * while the lower 4bit stands for priority0, as below:
148 * first byte: | pri_1 | pri_0 |
149 * second byte: | pri_3 | pri_2 |
150 * third byte: | pri_5 | pri_4 |
151 * fourth byte: | pri_7 | pri_6 |
153 pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
159 hns3_up_to_tc_map(struct hns3_hw *hw)
161 struct hns3_cmd_desc desc;
162 uint8_t *pri = (uint8_t *)desc.data;
166 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
168 for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
169 ret = hns3_fill_pri_array(hw, pri, pri_id);
174 return hns3_cmd_send(hw, &desc, 1);
178 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
180 struct hns3_pg_to_pri_link_cmd *map;
181 struct hns3_cmd_desc desc;
183 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
185 map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
188 map->pri_bit_map = pri_bit_map;
190 return hns3_cmd_send(hw, &desc, 1);
194 hns3_pg_to_pri_map(struct hns3_hw *hw)
196 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
197 struct hns3_pf *pf = &hns->pf;
198 struct hns3_pg_info *pg_info;
201 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
204 for (i = 0; i < hw->dcb_info.num_pg; i++) {
205 /* Cfg pg to priority mapping */
206 pg_info = &hw->dcb_info.pg_info[i];
207 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
216 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
218 struct hns3_qs_to_pri_link_cmd *map;
219 struct hns3_cmd_desc desc;
221 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
223 map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
225 map->qs_id = rte_cpu_to_le_16(qs_id);
227 map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
229 return hns3_cmd_send(hw, &desc, 1);
233 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
235 struct hns3_qs_weight_cmd *weight;
236 struct hns3_cmd_desc desc;
238 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
240 weight = (struct hns3_qs_weight_cmd *)desc.data;
242 weight->qs_id = rte_cpu_to_le_16(qs_id);
245 return hns3_cmd_send(hw, &desc, 1);
249 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
251 #define DEFAULT_TC_WEIGHT 1
252 #define DEFAULT_TC_OFFSET 14
253 struct hns3_ets_tc_weight_cmd *ets_weight;
254 struct hns3_cmd_desc desc;
257 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
258 ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
260 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
261 struct hns3_pg_info *pg_info;
263 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
265 if (!(hw->hw_tc_map & BIT(i)))
268 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
269 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
272 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
274 return hns3_cmd_send(hw, &desc, 1);
278 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
280 struct hns3_priority_weight_cmd *weight;
281 struct hns3_cmd_desc desc;
283 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
285 weight = (struct hns3_priority_weight_cmd *)desc.data;
287 weight->pri_id = pri_id;
290 return hns3_cmd_send(hw, &desc, 1);
294 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
296 struct hns3_pg_weight_cmd *weight;
297 struct hns3_cmd_desc desc;
299 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
301 weight = (struct hns3_pg_weight_cmd *)desc.data;
303 weight->pg_id = pg_id;
306 return hns3_cmd_send(hw, &desc, 1);
309 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
311 struct hns3_cmd_desc desc;
313 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
315 if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
316 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
320 desc.data[0] = rte_cpu_to_le_32(pg_id);
322 return hns3_cmd_send(hw, &desc, 1);
326 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
327 uint8_t bs_b, uint8_t bs_s)
329 uint32_t shapping_para = 0;
331 hns3_dcb_set_field(shapping_para, IR_B, ir_b);
332 hns3_dcb_set_field(shapping_para, IR_U, ir_u);
333 hns3_dcb_set_field(shapping_para, IR_S, ir_s);
334 hns3_dcb_set_field(shapping_para, BS_B, bs_b);
335 hns3_dcb_set_field(shapping_para, BS_S, bs_s);
337 return shapping_para;
341 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
343 struct hns3_port_shapping_cmd *shap_cfg_cmd;
344 struct hns3_shaper_parameter shaper_parameter;
345 uint32_t shapping_para;
346 uint32_t ir_u, ir_b, ir_s;
347 struct hns3_cmd_desc desc;
350 ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
351 HNS3_SHAPER_LVL_PORT, &shaper_parameter);
353 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
357 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
358 shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
360 ir_b = shaper_parameter.ir_b;
361 ir_u = shaper_parameter.ir_u;
362 ir_s = shaper_parameter.ir_s;
363 shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
364 HNS3_SHAPER_BS_U_DEF,
365 HNS3_SHAPER_BS_S_DEF);
367 shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
369 return hns3_cmd_send(hw, &desc, 1);
373 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
374 uint8_t pg_id, uint32_t shapping_para)
376 struct hns3_pg_shapping_cmd *shap_cfg_cmd;
377 enum hns3_opcode_type opcode;
378 struct hns3_cmd_desc desc;
380 opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
381 HNS3_OPC_TM_PG_C_SHAPPING;
382 hns3_cmd_setup_basic_desc(&desc, opcode, false);
384 shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
386 shap_cfg_cmd->pg_id = pg_id;
388 shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
390 return hns3_cmd_send(hw, &desc, 1);
394 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
396 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
397 struct hns3_shaper_parameter shaper_parameter;
398 struct hns3_pf *pf = &hns->pf;
399 uint32_t ir_u, ir_b, ir_s;
400 uint32_t shaper_para;
405 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
409 for (i = 0; i < hw->dcb_info.num_pg; i++) {
410 /* Calc shaper para */
411 ret = hns3_shaper_para_calc(hw,
412 hw->dcb_info.pg_info[i].bw_limit,
416 hns3_err(hw, "calculate shaper parameter failed: %d",
421 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
422 HNS3_SHAPER_BS_U_DEF,
423 HNS3_SHAPER_BS_S_DEF);
425 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
429 "config PG CIR shaper parameter failed: %d",
434 ir_b = shaper_parameter.ir_b;
435 ir_u = shaper_parameter.ir_u;
436 ir_s = shaper_parameter.ir_s;
437 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
438 HNS3_SHAPER_BS_U_DEF,
439 HNS3_SHAPER_BS_S_DEF);
441 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
445 "config PG PIR shaper parameter failed: %d",
455 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
457 struct hns3_cmd_desc desc;
459 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
461 if (mode == HNS3_SCH_MODE_DWRR)
462 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
466 desc.data[0] = rte_cpu_to_le_32(qs_id);
468 return hns3_cmd_send(hw, &desc, 1);
472 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
474 struct hns3_cmd_desc desc;
476 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
478 if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
479 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
483 desc.data[0] = rte_cpu_to_le_32(pri_id);
485 return hns3_cmd_send(hw, &desc, 1);
489 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
490 uint8_t pri_id, uint32_t shapping_para)
492 struct hns3_pri_shapping_cmd *shap_cfg_cmd;
493 enum hns3_opcode_type opcode;
494 struct hns3_cmd_desc desc;
496 opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
497 HNS3_OPC_TM_PRI_C_SHAPPING;
499 hns3_cmd_setup_basic_desc(&desc, opcode, false);
501 shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
503 shap_cfg_cmd->pri_id = pri_id;
505 shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
507 return hns3_cmd_send(hw, &desc, 1);
511 hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
513 struct hns3_shaper_parameter shaper_parameter;
514 uint32_t ir_u, ir_b, ir_s;
515 uint32_t shaper_para;
518 for (i = 0; i < hw->dcb_info.num_tc; i++) {
519 ret = hns3_shaper_para_calc(hw,
520 hw->dcb_info.tc_info[i].bw_limit,
524 hns3_err(hw, "calculate shaper parameter failed: %d",
529 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
530 HNS3_SHAPER_BS_U_DEF,
531 HNS3_SHAPER_BS_S_DEF);
533 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
537 "config priority CIR shaper parameter failed: %d",
542 ir_b = shaper_parameter.ir_b;
543 ir_u = shaper_parameter.ir_u;
544 ir_s = shaper_parameter.ir_s;
545 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
546 HNS3_SHAPER_BS_U_DEF,
547 HNS3_SHAPER_BS_S_DEF);
549 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
553 "config priority PIR shaper parameter failed: %d",
564 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
566 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
567 struct hns3_pf *pf = &hns->pf;
570 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
573 ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
575 hns3_err(hw, "config port shaper failed: %d", ret);
581 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
583 struct hns3_rss_conf *rss_cfg = &hw->rss_info;
584 uint16_t rx_qnum_per_tc;
587 rx_qnum_per_tc = nb_rx_q / hw->num_tc;
588 rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);
589 if (hw->alloc_rss_size != rx_qnum_per_tc) {
590 hns3_info(hw, "rss size changes from %u to %u",
591 hw->alloc_rss_size, rx_qnum_per_tc);
592 hw->alloc_rss_size = rx_qnum_per_tc;
594 hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;
597 * When rss size is changed, we need to update rss redirection table
598 * maintained by driver. Besides, during the entire reset process, we
599 * need to ensure that the rss table information are not overwritten
600 * and configured directly to the hardware in the RESET_STAGE_RESTORE
601 * stage of the reset process.
603 if (rte_atomic16_read(&hw->reset.resetting) == 0) {
604 for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
605 rss_cfg->rss_indirection_tbl[i] =
606 i % hw->alloc_rss_size;
611 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue)
613 struct hns3_tc_queue_info *tc_queue;
616 hw->tx_qnum_per_tc = nb_queue / hw->num_tc;
617 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
618 tc_queue = &hw->tc_queue[i];
619 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
620 tc_queue->enable = true;
621 tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
622 tc_queue->tqp_count = hw->tx_qnum_per_tc;
625 /* Set to default queue if TC is disable */
626 tc_queue->enable = false;
627 tc_queue->tqp_offset = 0;
628 tc_queue->tqp_count = 0;
632 hw->used_tx_queues = hw->num_tc * hw->tx_qnum_per_tc;
636 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
639 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
640 struct hns3_pf *pf = &hns->pf;
642 hw->num_tc = hw->dcb_info.num_tc;
643 hns3_set_rss_size(hw, nb_rx_q);
644 hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
647 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
651 hns3_dcb_info_init(struct hns3_hw *hw)
653 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
654 struct hns3_pf *pf = &hns->pf;
657 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
658 hw->dcb_info.num_pg != 1)
661 /* Initializing PG information */
662 memset(hw->dcb_info.pg_info, 0,
663 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
664 for (i = 0; i < hw->dcb_info.num_pg; i++) {
665 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
666 hw->dcb_info.pg_info[i].pg_id = i;
667 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
668 hw->dcb_info.pg_info[i].bw_limit = HNS3_ETHER_MAX_RATE;
673 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
674 for (k = 0; k < hw->dcb_info.num_tc; k++)
675 hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
678 /* All UPs mapping to TC0 */
679 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
680 hw->dcb_info.prio_tc[i] = 0;
682 /* Initializing tc information */
683 memset(hw->dcb_info.tc_info, 0,
684 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
685 for (i = 0; i < hw->dcb_info.num_tc; i++) {
686 hw->dcb_info.tc_info[i].tc_id = i;
687 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
688 hw->dcb_info.tc_info[i].pgid = 0;
689 hw->dcb_info.tc_info[i].bw_limit =
690 hw->dcb_info.pg_info[0].bw_limit;
697 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
699 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
700 struct hns3_pf *pf = &hns->pf;
703 /* Only being config on TC-Based scheduler mode */
704 if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
707 for (i = 0; i < hw->dcb_info.num_pg; i++) {
708 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
717 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
719 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
720 struct hns3_pf *pf = &hns->pf;
724 if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
725 for (i = 0; i < hw->dcb_info.num_tc; i++) {
726 ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
730 ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
741 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
745 ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
747 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
751 ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
753 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
759 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
761 struct hns3_pg_info *pg_info;
765 for (i = 0; i < hw->dcb_info.num_tc; i++) {
766 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
767 dwrr = pg_info->tc_dwrr[i];
769 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
772 "fail to send priority weight cmd: %d, ret = %d",
777 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
779 hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
789 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
791 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
792 struct hns3_pf *pf = &hns->pf;
796 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
799 ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
803 if (!hns3_dev_dcb_supported(hw))
806 ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
807 if (ret == -EOPNOTSUPP) {
808 version = hw->fw_version;
810 "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
811 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
812 HNS3_FW_VERSION_BYTE3_S),
813 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
814 HNS3_FW_VERSION_BYTE2_S),
815 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
816 HNS3_FW_VERSION_BYTE1_S),
817 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
818 HNS3_FW_VERSION_BYTE0_S));
826 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
828 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
829 struct hns3_pf *pf = &hns->pf;
833 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
837 for (i = 0; i < hw->dcb_info.num_pg; i++) {
839 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
848 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
852 ret = hns3_dcb_pg_dwrr_cfg(hw);
854 hns3_err(hw, "config pg_dwrr failed: %d", ret);
858 ret = hns3_dcb_pri_dwrr_cfg(hw);
860 hns3_err(hw, "config pri_dwrr failed: %d", ret);
866 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
870 ret = hns3_dcb_port_shaper_cfg(hw);
872 hns3_err(hw, "config port shaper failed: %d", ret);
876 ret = hns3_dcb_pg_shaper_cfg(hw);
878 hns3_err(hw, "config pg shaper failed: %d", ret);
882 return hns3_dcb_pri_shaper_cfg(hw);
886 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
888 struct hns3_nq_to_qs_link_cmd *map;
889 struct hns3_cmd_desc desc;
891 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
893 map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
895 map->nq_id = rte_cpu_to_le_16(q_id);
896 map->qset_id = rte_cpu_to_le_16(qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
898 return hns3_cmd_send(hw, &desc, 1);
902 hns3_q_to_qs_map(struct hns3_hw *hw)
904 struct hns3_tc_queue_info *tc_queue;
909 for (i = 0; i < hw->num_tc; i++) {
910 tc_queue = &hw->tc_queue[i];
911 for (j = 0; j < tc_queue->tqp_count; j++) {
912 q_id = tc_queue->tqp_offset + j;
913 ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
923 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
925 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
926 struct hns3_pf *pf = &hns->pf;
930 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
933 /* Cfg qs -> pri mapping */
934 for (i = 0; i < hw->num_tc; i++) {
935 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
937 hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
942 /* Cfg q -> qs mapping */
943 ret = hns3_q_to_qs_map(hw);
945 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
951 hns3_dcb_map_cfg(struct hns3_hw *hw)
955 ret = hns3_up_to_tc_map(hw);
957 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
961 ret = hns3_pg_to_pri_map(hw);
963 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
967 return hns3_pri_q_qs_cfg(hw);
971 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
975 /* Cfg dcb mapping */
976 ret = hns3_dcb_map_cfg(hw);
981 ret = hns3_dcb_shaper_cfg(hw);
986 ret = hns3_dcb_dwrr_cfg(hw);
990 /* Cfg schd mode for each level schd */
991 return hns3_dcb_schd_mode_cfg(hw);
995 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
996 uint8_t pause_trans_gap, uint16_t pause_trans_time)
998 struct hns3_cfg_pause_param_cmd *pause_param;
999 struct hns3_cmd_desc desc;
1001 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1003 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1005 memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1006 memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1007 pause_param->pause_trans_gap = pause_trans_gap;
1008 pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1010 return hns3_cmd_send(hw, &desc, 1);
1014 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1016 struct hns3_cfg_pause_param_cmd *pause_param;
1017 struct hns3_cmd_desc desc;
1018 uint16_t trans_time;
1022 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1024 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1026 ret = hns3_cmd_send(hw, &desc, 1);
1030 trans_gap = pause_param->pause_trans_gap;
1031 trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1033 return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1037 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1039 #define PAUSE_TIME_DIV_BY 2
1040 #define PAUSE_TIME_MIN_VALUE 0x4
1042 struct hns3_mac *mac = &hw->mac;
1043 uint8_t pause_trans_gap;
1046 * Pause transmit gap must be less than "pause_time / 2", otherwise
1047 * the behavior of MAC is undefined.
1049 if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1050 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1051 else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1052 pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1053 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1055 hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time);
1056 pause_time = PAUSE_TIME_MIN_VALUE;
1057 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1060 return hns3_pause_param_cfg(hw, mac->mac_addr,
1061 pause_trans_gap, pause_time);
1065 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1067 struct hns3_cmd_desc desc;
1069 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1071 desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1072 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1074 return hns3_cmd_send(hw, &desc, 1);
1078 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1080 struct hns3_cmd_desc desc;
1081 struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1083 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1085 pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1086 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1088 pfc->pri_en_bitmap = pfc_bitmap;
1090 return hns3_cmd_send(hw, &desc, 1);
1094 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1096 struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1097 struct hns3_cmd_desc desc;
1099 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1101 bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1103 bp_to_qs_map_cmd->tc_id = tc;
1104 bp_to_qs_map_cmd->qs_group_id = grp_id;
1105 bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1107 return hns3_cmd_send(hw, &desc, 1);
1111 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1113 switch (hw->current_mode) {
1118 case HNS3_FC_RX_PAUSE:
1122 case HNS3_FC_TX_PAUSE:
1138 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1142 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1143 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1149 return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1153 hns3_pfc_setup_hw(struct hns3_hw *hw)
1157 if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1158 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1164 return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1168 * Each Tc has a 1024 queue sets to backpress, it divides to
1169 * 32 group, each group contains 32 queue sets, which can be
1170 * represented by uint32_t bitmap.
1173 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1179 for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1180 uint8_t grp, sub_grp;
1183 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1184 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1185 HNS3_BP_SUB_GRP_ID_S);
1187 qs_bitmap |= (1 << sub_grp);
1189 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1198 hns3_dcb_bp_setup(struct hns3_hw *hw)
1202 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1203 ret = hns3_bp_setup_hw(hw, i);
1212 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1214 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1215 struct hns3_pf *pf = &hns->pf;
1218 ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1220 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1224 ret = hns3_mac_pause_setup_hw(hw);
1226 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1230 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1231 if (!hns3_dev_dcb_supported(hw))
1234 ret = hns3_pfc_setup_hw(hw);
1236 hns3_err(hw, "config pfc failed! ret = %d", ret);
1240 return hns3_dcb_bp_setup(hw);
1244 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1246 uint8_t pfc_map = 0;
1250 prio_tc = hw->dcb_info.prio_tc;
1251 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1252 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1253 if (prio_tc[j] == i && pfc_en & BIT(j)) {
1264 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1266 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1267 struct hns3_hw *hw = &hns->hw;
1272 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1273 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1274 if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1277 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1278 max_tc = dcb_rx_conf->dcb_tc[i];
1281 if (*tc != hw->dcb_info.num_tc)
1285 * We ensure that dcb information can be reconfigured
1286 * after the hns3_priority_flow_ctrl_set function called.
1288 if (hw->current_mode != HNS3_FC_FULL)
1290 pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1291 if (hw->dcb_info.pfc_en != pfc_en)
1296 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1298 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1299 struct hns3_pf *pf = &hns->pf;
1300 struct hns3_hw *hw = &hns->hw;
1301 uint8_t tc_bw, bw_rest;
1304 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1305 pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1306 pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1309 memset(hw->dcb_info.pg_info, 0,
1310 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1311 hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1312 hw->dcb_info.pg_info[0].pg_id = 0;
1313 hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1314 hw->dcb_info.pg_info[0].bw_limit = HNS3_ETHER_MAX_RATE;
1315 hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1317 /* Each tc has same bw for valid tc by default */
1318 tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1319 for (i = 0; i < hw->dcb_info.num_tc; i++)
1320 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1321 /* To ensure the sum of tc_dwrr is equal to 100 */
1322 bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1323 for (j = 0; j < bw_rest; j++)
1324 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1325 for (; i < dcb_rx_conf->nb_tcs; i++)
1326 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1328 /* All tcs map to pg0 */
1329 memset(hw->dcb_info.tc_info, 0,
1330 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1331 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1332 hw->dcb_info.tc_info[i].tc_id = i;
1333 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1334 hw->dcb_info.tc_info[i].pgid = 0;
1335 hw->dcb_info.tc_info[i].bw_limit =
1336 hw->dcb_info.pg_info[0].bw_limit;
1339 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1340 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1342 hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1343 hw->data->nb_tx_queues);
1347 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1349 struct hns3_pf *pf = &hns->pf;
1350 struct hns3_hw *hw = &hns->hw;
1351 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1352 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1353 uint8_t bit_map = 0;
1356 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1357 hw->dcb_info.num_pg != 1)
1360 if (nb_rx_q < num_tc) {
1361 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
1366 if (nb_tx_q < num_tc) {
1367 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
1372 /* Currently not support uncontinuous tc */
1373 hw->dcb_info.num_tc = num_tc;
1374 for (i = 0; i < hw->dcb_info.num_tc; i++)
1379 hw->dcb_info.num_tc = 1;
1381 hw->hw_tc_map = bit_map;
1382 hns3_dcb_info_cfg(hns);
1388 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1390 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1391 struct hns3_pf *pf = &hns->pf;
1392 struct hns3_hw *hw = &hns->hw;
1393 enum hns3_fc_status fc_status = hw->current_fc_status;
1394 enum hns3_fc_mode current_mode = hw->current_mode;
1395 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1398 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1399 pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1402 ret = hns3_dcb_schd_setup_hw(hw);
1404 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1408 if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1409 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1410 if (dcb_rx_conf->nb_tcs == 0)
1411 hw->dcb_info.pfc_en = 1; /* tc0 only */
1413 hw->dcb_info.pfc_en =
1414 RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1416 hw->dcb_info.hw_pfc_map =
1417 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1419 ret = hns3_buffer_alloc(hw);
1423 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1424 hw->current_mode = HNS3_FC_FULL;
1425 ret = hns3_dcb_pause_setup_hw(hw);
1427 hns3_err(hw, "setup pfc failed! ret = %d", ret);
1428 goto pfc_setup_fail;
1432 * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1433 * flag, the DCB information is configured, such as tc numbers.
1434 * Therefore, refreshing the allocation of packet buffer is
1437 ret = hns3_buffer_alloc(hw);
1445 hw->current_mode = current_mode;
1446 hw->current_fc_status = fc_status;
1447 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1448 status = hns3_buffer_alloc(hw);
1450 hns3_err(hw, "recover packet buffer fail! status = %d", status);
1456 * hns3_dcb_configure - setup dcb related config
1457 * @hns: pointer to hns3 adapter
1458 * Returns 0 on success, negative value on failure.
1461 hns3_dcb_configure(struct hns3_adapter *hns)
1463 struct hns3_hw *hw = &hns->hw;
1464 bool map_changed = false;
1468 hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1469 if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
1470 ret = hns3_dcb_info_update(hns, num_tc);
1472 hns3_err(hw, "dcb info update failed: %d", ret);
1476 ret = hns3_dcb_hw_configure(hns);
1478 hns3_err(hw, "dcb sw configure failed: %d", ret);
1487 hns3_dcb_init_hw(struct hns3_hw *hw)
1491 ret = hns3_dcb_schd_setup_hw(hw);
1493 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1497 ret = hns3_dcb_pause_setup_hw(hw);
1499 hns3_err(hw, "PAUSE setup failed: %d", ret);
1505 hns3_dcb_init(struct hns3_hw *hw)
1507 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1508 struct hns3_pf *pf = &hns->pf;
1511 PMD_INIT_FUNC_TRACE();
1514 * According to the 'adapter_state' identifier, the following branch
1515 * is only executed to initialize default configurations of dcb during
1516 * the initializing driver process. Due to driver saving dcb-related
1517 * information before reset triggered, the reinit dev stage of the
1518 * reset process can not access to the branch, or those information
1521 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1522 hw->requested_mode = HNS3_FC_NONE;
1523 hw->current_mode = hw->requested_mode;
1524 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1525 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1527 ret = hns3_dcb_info_init(hw);
1529 hns3_err(hw, "dcb info init failed: %d", ret);
1532 hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num,
1537 * DCB hardware will be configured by following the function during
1538 * the initializing driver process and the reset process. However,
1539 * driver will restore directly configurations of dcb hardware based
1540 * on dcb-related information soft maintained when driver
1541 * initialization has finished and reset is coming.
1543 ret = hns3_dcb_init_hw(hw);
1545 hns3_err(hw, "dcb init hardware failed: %d", ret);
1553 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1555 struct hns3_hw *hw = &hns->hw;
1556 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1557 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1560 hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1561 ret = hns3_q_to_qs_map(hw);
1563 hns3_err(hw, "failed to map nq to qs! ret = %d", ret);
1569 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1571 struct hns3_hw *hw = &hns->hw;
1572 enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1575 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1576 ret = hns3_dcb_configure(hns);
1578 hns3_err(hw, "Failed to config dcb: %d", ret);
1581 * Update queue map without PFC configuration,
1582 * due to queues reconfigured by user.
1584 ret = hns3_update_queue_map_configure(hns);
1587 "Failed to update queue mapping configure: %d",
1595 * hns3_dcb_pfc_enable - Enable priority flow control
1596 * @dev: pointer to ethernet device
1598 * Configures the pfc settings for one porority.
1601 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1603 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1604 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1605 enum hns3_fc_status fc_status = hw->current_fc_status;
1606 enum hns3_fc_mode current_mode = hw->current_mode;
1607 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1608 uint8_t pfc_en = hw->dcb_info.pfc_en;
1609 uint8_t priority = pfc_conf->priority;
1610 uint16_t pause_time = pf->pause_time;
1613 pf->pause_time = pfc_conf->fc.pause_time;
1614 hw->current_mode = hw->requested_mode;
1615 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1616 hw->dcb_info.pfc_en |= BIT(priority);
1617 hw->dcb_info.hw_pfc_map =
1618 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1619 ret = hns3_buffer_alloc(hw);
1621 goto pfc_setup_fail;
1624 * The flow control mode of all UPs will be changed based on
1625 * current_mode coming from user.
1627 ret = hns3_dcb_pause_setup_hw(hw);
1629 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1630 goto pfc_setup_fail;
1636 hw->current_mode = current_mode;
1637 hw->current_fc_status = fc_status;
1638 pf->pause_time = pause_time;
1639 hw->dcb_info.pfc_en = pfc_en;
1640 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1641 status = hns3_buffer_alloc(hw);
1643 hns3_err(hw, "recover packet buffer fail: %d", status);
1649 * hns3_fc_enable - Enable MAC pause
1650 * @dev: pointer to ethernet device
1652 * Configures the MAC pause settings.
1655 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1657 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1658 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1659 enum hns3_fc_status fc_status = hw->current_fc_status;
1660 enum hns3_fc_mode current_mode = hw->current_mode;
1661 uint16_t pause_time = pf->pause_time;
1664 pf->pause_time = fc_conf->pause_time;
1665 hw->current_mode = hw->requested_mode;
1668 * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1669 * of flow control is configured to be HNS3_FC_NONE.
1671 if (hw->current_mode == HNS3_FC_NONE)
1672 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1674 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1676 ret = hns3_dcb_pause_setup_hw(hw);
1678 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1685 hw->current_mode = current_mode;
1686 hw->current_fc_status = fc_status;
1687 pf->pause_time = pause_time;