1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
6 #include <rte_ethdev.h>
9 #include "hns3_ethdev.h"
12 #define HNS3_SHAPER_BS_U_DEF 5
13 #define HNS3_SHAPER_BS_S_DEF 20
14 #define BW_MAX_PERCENT 100
17 * hns3_shaper_para_calc: calculate ir parameter for the shaper
18 * @ir: Rate to be config, its unit is Mbps
19 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
20 * @shaper_para: shaper parameter of IR shaper
24 * IR_b * (2 ^ IR_u) * 8
25 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
28 * @return: 0: calculate sucessful, negative: fail
31 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
32 struct hns3_shaper_parameter *shaper_para)
34 #define SHAPER_DEFAULT_IR_B 126
35 #define DIVISOR_CLK (1000 * 8)
36 #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
38 const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
39 6 * 256, /* Prioriy level */
40 6 * 32, /* Prioriy group level */
41 6 * 8, /* Port level */
42 6 * 256 /* Qset level */
44 uint8_t ir_u_calc = 0;
45 uint8_t ir_s_calc = 0;
51 if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
53 "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
54 shaper_level, HNS3_SHAPER_LVL_CNT);
58 if (ir > hw->max_tm_rate) {
59 hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
60 "supported.", ir, hw->max_tm_rate);
64 tick = tick_array[shaper_level];
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
70 * ir_calc = ---------------- * 1000
73 ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
76 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
77 } else if (ir_calc > ir) {
78 /* Increasing the denominator to select ir_s value */
81 ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
82 } while (ir_calc > ir);
85 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
87 shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
88 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
91 * Increasing the numerator to select ir_u value. ir_u_calc will
92 * get maximum value when ir_calc is minimum and ir is maximum.
93 * ir_calc gets minimum value when tick is the maximum value.
94 * At the same time, value of ir_u_calc can only be increased up
95 * to eight after the while loop if the value of ir is equal
101 numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
102 ir_calc = (numerator + (tick >> 1)) / tick;
103 } while (ir_calc < ir);
106 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
111 * The maximum value of ir_u_calc in this branch is
112 * seven in all cases. Thus, value of denominator can
115 denominator = DIVISOR_CLK * (1 << ir_u_calc);
117 (ir * tick + (denominator >> 1)) / denominator;
121 shaper_para->ir_u = ir_u_calc;
122 shaper_para->ir_s = ir_s_calc;
128 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
130 #define HNS3_HALF_BYTE_BIT_OFFSET 4
131 uint8_t tc = hw->dcb_info.prio_tc[pri_id];
133 if (tc >= hw->dcb_info.num_tc)
137 * The register for priority has four bytes, the first bytes includes
138 * priority0 and priority1, the higher 4bit stands for priority1
139 * while the lower 4bit stands for priority0, as below:
140 * first byte: | pri_1 | pri_0 |
141 * second byte: | pri_3 | pri_2 |
142 * third byte: | pri_5 | pri_4 |
143 * fourth byte: | pri_7 | pri_6 |
145 pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
151 hns3_up_to_tc_map(struct hns3_hw *hw)
153 struct hns3_cmd_desc desc;
154 uint8_t *pri = (uint8_t *)desc.data;
158 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
160 for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
161 ret = hns3_fill_pri_array(hw, pri, pri_id);
166 return hns3_cmd_send(hw, &desc, 1);
170 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
172 struct hns3_pg_to_pri_link_cmd *map;
173 struct hns3_cmd_desc desc;
175 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
177 map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
180 map->pri_bit_map = pri_bit_map;
182 return hns3_cmd_send(hw, &desc, 1);
186 hns3_pg_to_pri_map(struct hns3_hw *hw)
188 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
189 struct hns3_pf *pf = &hns->pf;
190 struct hns3_pg_info *pg_info;
193 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
196 for (i = 0; i < hw->dcb_info.num_pg; i++) {
197 /* Cfg pg to priority mapping */
198 pg_info = &hw->dcb_info.pg_info[i];
199 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
208 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
210 struct hns3_qs_to_pri_link_cmd *map;
211 struct hns3_cmd_desc desc;
213 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
215 map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
217 map->qs_id = rte_cpu_to_le_16(qs_id);
219 map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
221 return hns3_cmd_send(hw, &desc, 1);
225 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
227 struct hns3_qs_weight_cmd *weight;
228 struct hns3_cmd_desc desc;
230 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
232 weight = (struct hns3_qs_weight_cmd *)desc.data;
234 weight->qs_id = rte_cpu_to_le_16(qs_id);
237 return hns3_cmd_send(hw, &desc, 1);
241 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
243 #define DEFAULT_TC_WEIGHT 1
244 #define DEFAULT_TC_OFFSET 14
245 struct hns3_ets_tc_weight_cmd *ets_weight;
246 struct hns3_cmd_desc desc;
249 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
250 ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
252 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
253 struct hns3_pg_info *pg_info;
255 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
257 if (!(hw->hw_tc_map & BIT(i)))
260 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
261 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
264 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
266 return hns3_cmd_send(hw, &desc, 1);
270 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
272 struct hns3_priority_weight_cmd *weight;
273 struct hns3_cmd_desc desc;
275 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
277 weight = (struct hns3_priority_weight_cmd *)desc.data;
279 weight->pri_id = pri_id;
282 return hns3_cmd_send(hw, &desc, 1);
286 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
288 struct hns3_pg_weight_cmd *weight;
289 struct hns3_cmd_desc desc;
291 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
293 weight = (struct hns3_pg_weight_cmd *)desc.data;
295 weight->pg_id = pg_id;
298 return hns3_cmd_send(hw, &desc, 1);
301 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
303 struct hns3_cmd_desc desc;
305 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
307 if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
308 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
312 desc.data[0] = rte_cpu_to_le_32(pg_id);
314 return hns3_cmd_send(hw, &desc, 1);
318 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
319 uint8_t bs_b, uint8_t bs_s)
321 uint32_t shapping_para = 0;
323 hns3_dcb_set_field(shapping_para, IR_B, ir_b);
324 hns3_dcb_set_field(shapping_para, IR_U, ir_u);
325 hns3_dcb_set_field(shapping_para, IR_S, ir_s);
326 hns3_dcb_set_field(shapping_para, BS_B, bs_b);
327 hns3_dcb_set_field(shapping_para, BS_S, bs_s);
329 return shapping_para;
333 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
335 struct hns3_port_shapping_cmd *shap_cfg_cmd;
336 struct hns3_shaper_parameter shaper_parameter;
337 uint32_t shapping_para;
338 uint32_t ir_u, ir_b, ir_s;
339 struct hns3_cmd_desc desc;
342 ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
343 HNS3_SHAPER_LVL_PORT, &shaper_parameter);
345 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
349 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
350 shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
352 ir_b = shaper_parameter.ir_b;
353 ir_u = shaper_parameter.ir_u;
354 ir_s = shaper_parameter.ir_s;
355 shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
356 HNS3_SHAPER_BS_U_DEF,
357 HNS3_SHAPER_BS_S_DEF);
359 shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
362 * Configure the port_rate and set bit HNS3_TM_RATE_VLD_B of flag
363 * field in hns3_port_shapping_cmd to require firmware to recalculate
364 * shapping parameters. And whether the parameters are recalculated
365 * depends on the firmware version. But driver still needs to
366 * calculate it and configure to firmware for better compatibility.
368 shap_cfg_cmd->port_rate = rte_cpu_to_le_32(hw->mac.link_speed);
369 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
371 return hns3_cmd_send(hw, &desc, 1);
375 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
376 uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
378 struct hns3_pg_shapping_cmd *shap_cfg_cmd;
379 enum hns3_opcode_type opcode;
380 struct hns3_cmd_desc desc;
382 opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
383 HNS3_OPC_TM_PG_C_SHAPPING;
384 hns3_cmd_setup_basic_desc(&desc, opcode, false);
386 shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
388 shap_cfg_cmd->pg_id = pg_id;
390 shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
393 * Configure the pg_rate and set bit HNS3_TM_RATE_VLD_B of flag field in
394 * hns3_pg_shapping_cmd to require firmware to recalculate shapping
395 * parameters. And whether parameters are recalculated depends on
396 * the firmware version. But driver still needs to calculate it and
397 * configure to firmware for better compatibility.
399 shap_cfg_cmd->pg_rate = rte_cpu_to_le_32(rate);
400 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
402 return hns3_cmd_send(hw, &desc, 1);
406 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
408 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
409 struct hns3_shaper_parameter shaper_parameter;
410 struct hns3_pf *pf = &hns->pf;
411 uint32_t ir_u, ir_b, ir_s;
412 uint32_t shaper_para;
418 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
422 for (i = 0; i < hw->dcb_info.num_pg; i++) {
423 rate = hw->dcb_info.pg_info[i].bw_limit;
425 /* Calc shaper para */
426 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
429 hns3_err(hw, "calculate shaper parameter failed: %d",
434 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
435 HNS3_SHAPER_BS_U_DEF,
436 HNS3_SHAPER_BS_S_DEF);
438 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
442 "config PG CIR shaper parameter failed: %d",
447 ir_b = shaper_parameter.ir_b;
448 ir_u = shaper_parameter.ir_u;
449 ir_s = shaper_parameter.ir_s;
450 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
451 HNS3_SHAPER_BS_U_DEF,
452 HNS3_SHAPER_BS_S_DEF);
454 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
458 "config PG PIR shaper parameter failed: %d",
468 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
470 struct hns3_cmd_desc desc;
472 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
474 if (mode == HNS3_SCH_MODE_DWRR)
475 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
479 desc.data[0] = rte_cpu_to_le_32(qs_id);
481 return hns3_cmd_send(hw, &desc, 1);
485 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
487 struct hns3_cmd_desc desc;
489 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
491 if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
492 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
496 desc.data[0] = rte_cpu_to_le_32(pri_id);
498 return hns3_cmd_send(hw, &desc, 1);
502 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
503 uint8_t pri_id, uint32_t shapping_para, uint32_t rate)
505 struct hns3_pri_shapping_cmd *shap_cfg_cmd;
506 enum hns3_opcode_type opcode;
507 struct hns3_cmd_desc desc;
509 opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
510 HNS3_OPC_TM_PRI_C_SHAPPING;
512 hns3_cmd_setup_basic_desc(&desc, opcode, false);
514 shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
516 shap_cfg_cmd->pri_id = pri_id;
518 shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
521 * Configure the pri_rate and set bit HNS3_TM_RATE_VLD_B of flag
522 * field in hns3_pri_shapping_cmd to require firmware to recalculate
523 * shapping parameters. And whether the parameters are recalculated
524 * depends on the firmware version. But driver still needs to
525 * calculate it and configure to firmware for better compatibility.
527 shap_cfg_cmd->pri_rate = rte_cpu_to_le_32(rate);
528 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
530 return hns3_cmd_send(hw, &desc, 1);
534 hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
536 struct hns3_shaper_parameter shaper_parameter;
537 uint32_t ir_u, ir_b, ir_s;
538 uint32_t shaper_para;
542 for (i = 0; i < hw->dcb_info.num_tc; i++) {
543 rate = hw->dcb_info.tc_info[i].bw_limit;
544 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
547 hns3_err(hw, "calculate shaper parameter failed: %d",
552 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
553 HNS3_SHAPER_BS_U_DEF,
554 HNS3_SHAPER_BS_S_DEF);
556 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
560 "config priority CIR shaper parameter failed: %d",
565 ir_b = shaper_parameter.ir_b;
566 ir_u = shaper_parameter.ir_u;
567 ir_s = shaper_parameter.ir_s;
568 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
569 HNS3_SHAPER_BS_U_DEF,
570 HNS3_SHAPER_BS_S_DEF);
572 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
576 "config priority PIR shaper parameter failed: %d",
587 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
589 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
590 struct hns3_pf *pf = &hns->pf;
593 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
596 ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
598 hns3_err(hw, "config port shaper failed: %d", ret);
604 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
606 struct hns3_rss_conf *rss_cfg = &hw->rss_info;
607 uint16_t rx_qnum_per_tc;
608 uint16_t used_rx_queues;
611 rx_qnum_per_tc = nb_rx_q / hw->num_tc;
612 if (rx_qnum_per_tc > hw->rss_size_max) {
613 hns3_err(hw, "rx queue number of per tc (%u) is greater than "
614 "value (%u) hardware supported.",
615 rx_qnum_per_tc, hw->rss_size_max);
619 used_rx_queues = hw->num_tc * rx_qnum_per_tc;
620 if (used_rx_queues != nb_rx_q) {
621 hns3_err(hw, "rx queue number (%u) configured must be an "
622 "integral multiple of valid tc number (%u).",
623 nb_rx_q, hw->num_tc);
626 hw->alloc_rss_size = rx_qnum_per_tc;
627 hw->used_rx_queues = used_rx_queues;
630 * When rss size is changed, we need to update rss redirection table
631 * maintained by driver. Besides, during the entire reset process, we
632 * need to ensure that the rss table information are not overwritten
633 * and configured directly to the hardware in the RESET_STAGE_RESTORE
634 * stage of the reset process.
636 if (rte_atomic16_read(&hw->reset.resetting) == 0) {
637 for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
638 rss_cfg->rss_indirection_tbl[i] =
639 i % hw->alloc_rss_size;
646 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
648 struct hns3_tc_queue_info *tc_queue;
649 uint16_t used_tx_queues;
650 uint16_t tx_qnum_per_tc;
653 tx_qnum_per_tc = nb_tx_q / hw->num_tc;
654 used_tx_queues = hw->num_tc * tx_qnum_per_tc;
655 if (used_tx_queues != nb_tx_q) {
656 hns3_err(hw, "tx queue number (%u) configured must be an "
657 "integral multiple of valid tc number (%u).",
658 nb_tx_q, hw->num_tc);
662 hw->used_tx_queues = used_tx_queues;
663 hw->tx_qnum_per_tc = tx_qnum_per_tc;
664 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
665 tc_queue = &hw->tc_queue[i];
666 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
667 tc_queue->enable = true;
668 tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
669 tc_queue->tqp_count = hw->tx_qnum_per_tc;
672 /* Set to default queue if TC is disable */
673 tc_queue->enable = false;
674 tc_queue->tqp_offset = 0;
675 tc_queue->tqp_count = 0;
684 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
688 ret = hns3_set_rss_size(hw, nb_rx_q);
692 return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
696 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
699 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
700 struct hns3_pf *pf = &hns->pf;
703 hw->num_tc = hw->dcb_info.num_tc;
704 ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
709 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
715 hns3_dcb_info_init(struct hns3_hw *hw)
717 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
718 struct hns3_pf *pf = &hns->pf;
721 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
722 hw->dcb_info.num_pg != 1)
725 /* Initializing PG information */
726 memset(hw->dcb_info.pg_info, 0,
727 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
728 for (i = 0; i < hw->dcb_info.num_pg; i++) {
729 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
730 hw->dcb_info.pg_info[i].pg_id = i;
731 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
732 hw->dcb_info.pg_info[i].bw_limit = hw->max_tm_rate;
737 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
738 for (k = 0; k < hw->dcb_info.num_tc; k++)
739 hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
742 /* All UPs mapping to TC0 */
743 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
744 hw->dcb_info.prio_tc[i] = 0;
746 /* Initializing tc information */
747 memset(hw->dcb_info.tc_info, 0,
748 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
749 for (i = 0; i < hw->dcb_info.num_tc; i++) {
750 hw->dcb_info.tc_info[i].tc_id = i;
751 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
752 hw->dcb_info.tc_info[i].pgid = 0;
753 hw->dcb_info.tc_info[i].bw_limit =
754 hw->dcb_info.pg_info[0].bw_limit;
761 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
763 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
764 struct hns3_pf *pf = &hns->pf;
767 /* Only being config on TC-Based scheduler mode */
768 if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
771 for (i = 0; i < hw->dcb_info.num_pg; i++) {
772 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
781 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
783 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
784 struct hns3_pf *pf = &hns->pf;
788 if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
789 for (i = 0; i < hw->dcb_info.num_tc; i++) {
790 ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
794 ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
805 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
809 ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
811 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
815 ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
817 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
823 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
825 struct hns3_pg_info *pg_info;
829 for (i = 0; i < hw->dcb_info.num_tc; i++) {
830 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
831 dwrr = pg_info->tc_dwrr[i];
833 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
836 "fail to send priority weight cmd: %d, ret = %d",
841 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
843 hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
853 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
855 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
856 struct hns3_pf *pf = &hns->pf;
860 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
863 ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
867 if (!hns3_dev_dcb_supported(hw))
870 ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
871 if (ret == -EOPNOTSUPP) {
872 version = hw->fw_version;
874 "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
875 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
876 HNS3_FW_VERSION_BYTE3_S),
877 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
878 HNS3_FW_VERSION_BYTE2_S),
879 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
880 HNS3_FW_VERSION_BYTE1_S),
881 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
882 HNS3_FW_VERSION_BYTE0_S));
890 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
892 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
893 struct hns3_pf *pf = &hns->pf;
897 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
901 for (i = 0; i < hw->dcb_info.num_pg; i++) {
903 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
912 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
916 ret = hns3_dcb_pg_dwrr_cfg(hw);
918 hns3_err(hw, "config pg_dwrr failed: %d", ret);
922 ret = hns3_dcb_pri_dwrr_cfg(hw);
924 hns3_err(hw, "config pri_dwrr failed: %d", ret);
930 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
934 ret = hns3_dcb_port_shaper_cfg(hw);
936 hns3_err(hw, "config port shaper failed: %d", ret);
940 ret = hns3_dcb_pg_shaper_cfg(hw);
942 hns3_err(hw, "config pg shaper failed: %d", ret);
946 return hns3_dcb_pri_shaper_cfg(hw);
950 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
952 struct hns3_nq_to_qs_link_cmd *map;
953 struct hns3_cmd_desc desc;
954 uint16_t tmp_qs_id = 0;
958 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
960 map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
962 map->nq_id = rte_cpu_to_le_16(q_id);
965 * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
966 * configure qset_id. So we need to convert qs_id to the follow
967 * format to support qset_id > 1024.
968 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
971 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
972 * | qs_id_h | vld | qs_id_l |
974 qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
976 qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
978 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
980 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
981 HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
982 map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
984 return hns3_cmd_send(hw, &desc, 1);
988 hns3_q_to_qs_map(struct hns3_hw *hw)
990 struct hns3_tc_queue_info *tc_queue;
995 for (i = 0; i < hw->num_tc; i++) {
996 tc_queue = &hw->tc_queue[i];
997 for (j = 0; j < tc_queue->tqp_count; j++) {
998 q_id = tc_queue->tqp_offset + j;
999 ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
1009 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
1011 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1012 struct hns3_pf *pf = &hns->pf;
1016 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
1019 /* Cfg qs -> pri mapping */
1020 for (i = 0; i < hw->num_tc; i++) {
1021 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
1023 hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1028 /* Cfg q -> qs mapping */
1029 ret = hns3_q_to_qs_map(hw);
1031 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1037 hns3_dcb_map_cfg(struct hns3_hw *hw)
1041 ret = hns3_up_to_tc_map(hw);
1043 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1047 ret = hns3_pg_to_pri_map(hw);
1049 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1053 return hns3_pri_q_qs_cfg(hw);
1057 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1061 /* Cfg dcb mapping */
1062 ret = hns3_dcb_map_cfg(hw);
1066 /* Cfg dcb shaper */
1067 ret = hns3_dcb_shaper_cfg(hw);
1072 ret = hns3_dcb_dwrr_cfg(hw);
1076 /* Cfg schd mode for each level schd */
1077 return hns3_dcb_schd_mode_cfg(hw);
1081 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1082 uint8_t pause_trans_gap, uint16_t pause_trans_time)
1084 struct hns3_cfg_pause_param_cmd *pause_param;
1085 struct hns3_cmd_desc desc;
1087 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1089 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1091 memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1092 memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1093 pause_param->pause_trans_gap = pause_trans_gap;
1094 pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1096 return hns3_cmd_send(hw, &desc, 1);
1100 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1102 struct hns3_cfg_pause_param_cmd *pause_param;
1103 struct hns3_cmd_desc desc;
1104 uint16_t trans_time;
1108 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1110 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1112 ret = hns3_cmd_send(hw, &desc, 1);
1116 trans_gap = pause_param->pause_trans_gap;
1117 trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1119 return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1123 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1125 #define PAUSE_TIME_DIV_BY 2
1126 #define PAUSE_TIME_MIN_VALUE 0x4
1128 struct hns3_mac *mac = &hw->mac;
1129 uint8_t pause_trans_gap;
1132 * Pause transmit gap must be less than "pause_time / 2", otherwise
1133 * the behavior of MAC is undefined.
1135 if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1136 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1137 else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1138 pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1139 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1141 hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
1142 pause_time = PAUSE_TIME_MIN_VALUE;
1143 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1146 return hns3_pause_param_cfg(hw, mac->mac_addr,
1147 pause_trans_gap, pause_time);
1151 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1153 struct hns3_cmd_desc desc;
1155 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1157 desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1158 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1160 return hns3_cmd_send(hw, &desc, 1);
1164 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1166 struct hns3_cmd_desc desc;
1167 struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1169 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1171 pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1172 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1174 pfc->pri_en_bitmap = pfc_bitmap;
1176 return hns3_cmd_send(hw, &desc, 1);
1180 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1182 struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1183 struct hns3_cmd_desc desc;
1185 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1187 bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1189 bp_to_qs_map_cmd->tc_id = tc;
1190 bp_to_qs_map_cmd->qs_group_id = grp_id;
1191 bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1193 return hns3_cmd_send(hw, &desc, 1);
1197 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1199 switch (hw->current_mode) {
1204 case HNS3_FC_RX_PAUSE:
1208 case HNS3_FC_TX_PAUSE:
1224 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1228 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1229 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1235 return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1239 hns3_pfc_setup_hw(struct hns3_hw *hw)
1243 if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1244 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1250 return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1254 * Each Tc has a 1024 queue sets to backpress, it divides to
1255 * 32 group, each group contains 32 queue sets, which can be
1256 * represented by uint32_t bitmap.
1259 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1265 for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1266 uint8_t grp, sub_grp;
1269 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1270 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1271 HNS3_BP_SUB_GRP_ID_S);
1273 qs_bitmap |= (1 << sub_grp);
1275 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1284 hns3_dcb_bp_setup(struct hns3_hw *hw)
1288 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1289 ret = hns3_bp_setup_hw(hw, i);
1298 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1300 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1301 struct hns3_pf *pf = &hns->pf;
1304 ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1306 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1310 ret = hns3_mac_pause_setup_hw(hw);
1312 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1316 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1317 if (!hns3_dev_dcb_supported(hw))
1320 ret = hns3_pfc_setup_hw(hw);
1322 hns3_err(hw, "config pfc failed! ret = %d", ret);
1326 return hns3_dcb_bp_setup(hw);
1330 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1332 uint8_t pfc_map = 0;
1336 prio_tc = hw->dcb_info.prio_tc;
1337 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1338 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1339 if (prio_tc[j] == i && pfc_en & BIT(j)) {
1350 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1352 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1353 struct hns3_hw *hw = &hns->hw;
1354 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1355 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1360 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1361 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1362 if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1365 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1366 max_tc = dcb_rx_conf->dcb_tc[i];
1369 if (*tc != hw->dcb_info.num_tc)
1373 * We ensure that dcb information can be reconfigured
1374 * after the hns3_priority_flow_ctrl_set function called.
1376 if (hw->current_mode != HNS3_FC_FULL)
1378 pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1379 if (hw->dcb_info.pfc_en != pfc_en)
1382 /* tx/rx queue number is reconfigured. */
1383 if (nb_rx_q != hw->used_rx_queues || nb_tx_q != hw->used_tx_queues)
1388 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1390 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1391 struct hns3_pf *pf = &hns->pf;
1392 struct hns3_hw *hw = &hns->hw;
1393 uint8_t tc_bw, bw_rest;
1397 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1398 pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1399 pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1402 memset(hw->dcb_info.pg_info, 0,
1403 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1404 hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1405 hw->dcb_info.pg_info[0].pg_id = 0;
1406 hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1407 hw->dcb_info.pg_info[0].bw_limit = hw->max_tm_rate;
1408 hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1410 /* Each tc has same bw for valid tc by default */
1411 tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1412 for (i = 0; i < hw->dcb_info.num_tc; i++)
1413 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1414 /* To ensure the sum of tc_dwrr is equal to 100 */
1415 bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1416 for (j = 0; j < bw_rest; j++)
1417 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1418 for (; i < dcb_rx_conf->nb_tcs; i++)
1419 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1421 /* All tcs map to pg0 */
1422 memset(hw->dcb_info.tc_info, 0,
1423 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1424 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1425 hw->dcb_info.tc_info[i].tc_id = i;
1426 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1427 hw->dcb_info.tc_info[i].pgid = 0;
1428 hw->dcb_info.tc_info[i].bw_limit =
1429 hw->dcb_info.pg_info[0].bw_limit;
1432 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1433 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1435 ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1436 hw->data->nb_tx_queues);
1438 hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1444 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1446 struct hns3_pf *pf = &hns->pf;
1447 struct hns3_hw *hw = &hns->hw;
1448 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1449 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1450 uint8_t bit_map = 0;
1453 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1454 hw->dcb_info.num_pg != 1)
1457 if (nb_rx_q < num_tc) {
1458 hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
1463 if (nb_tx_q < num_tc) {
1464 hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
1469 /* Currently not support uncontinuous tc */
1470 hw->dcb_info.num_tc = num_tc;
1471 for (i = 0; i < hw->dcb_info.num_tc; i++)
1476 hw->dcb_info.num_tc = 1;
1478 hw->hw_tc_map = bit_map;
1480 return hns3_dcb_info_cfg(hns);
1484 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1486 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1487 struct hns3_pf *pf = &hns->pf;
1488 struct hns3_hw *hw = &hns->hw;
1489 enum hns3_fc_status fc_status = hw->current_fc_status;
1490 enum hns3_fc_mode current_mode = hw->current_mode;
1491 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1494 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1495 pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1498 ret = hns3_dcb_schd_setup_hw(hw);
1500 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1504 if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1505 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1506 if (dcb_rx_conf->nb_tcs == 0)
1507 hw->dcb_info.pfc_en = 1; /* tc0 only */
1509 hw->dcb_info.pfc_en =
1510 RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1512 hw->dcb_info.hw_pfc_map =
1513 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1515 ret = hns3_buffer_alloc(hw);
1519 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1520 hw->current_mode = HNS3_FC_FULL;
1521 ret = hns3_dcb_pause_setup_hw(hw);
1523 hns3_err(hw, "setup pfc failed! ret = %d", ret);
1524 goto pfc_setup_fail;
1528 * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1529 * flag, the DCB information is configured, such as tc numbers.
1530 * Therefore, refreshing the allocation of packet buffer is
1533 ret = hns3_buffer_alloc(hw);
1541 hw->current_mode = current_mode;
1542 hw->current_fc_status = fc_status;
1543 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1544 status = hns3_buffer_alloc(hw);
1546 hns3_err(hw, "recover packet buffer fail! status = %d", status);
1552 * hns3_dcb_configure - setup dcb related config
1553 * @hns: pointer to hns3 adapter
1554 * Returns 0 on success, negative value on failure.
1557 hns3_dcb_configure(struct hns3_adapter *hns)
1559 struct hns3_hw *hw = &hns->hw;
1560 bool map_changed = false;
1564 hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1565 if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
1566 ret = hns3_dcb_info_update(hns, num_tc);
1568 hns3_err(hw, "dcb info update failed: %d", ret);
1572 ret = hns3_dcb_hw_configure(hns);
1574 hns3_err(hw, "dcb sw configure failed: %d", ret);
1583 hns3_dcb_init_hw(struct hns3_hw *hw)
1587 ret = hns3_dcb_schd_setup_hw(hw);
1589 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1593 ret = hns3_dcb_pause_setup_hw(hw);
1595 hns3_err(hw, "PAUSE setup failed: %d", ret);
1601 hns3_dcb_init(struct hns3_hw *hw)
1603 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1604 struct hns3_pf *pf = &hns->pf;
1605 uint16_t default_tqp_num;
1608 PMD_INIT_FUNC_TRACE();
1611 * According to the 'adapter_state' identifier, the following branch
1612 * is only executed to initialize default configurations of dcb during
1613 * the initializing driver process. Due to driver saving dcb-related
1614 * information before reset triggered, the reinit dev stage of the
1615 * reset process can not access to the branch, or those information
1618 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1619 hw->requested_mode = HNS3_FC_NONE;
1620 hw->current_mode = hw->requested_mode;
1621 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1622 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1624 ret = hns3_dcb_info_init(hw);
1626 hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1631 * The number of queues configured by default cannot exceed
1632 * the maximum number of queues for a single TC.
1634 default_tqp_num = RTE_MIN(hw->rss_size_max,
1635 hw->tqps_num / hw->dcb_info.num_tc);
1636 ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1640 "update tc queue mapping failed, ret = %d.",
1647 * DCB hardware will be configured by following the function during
1648 * the initializing driver process and the reset process. However,
1649 * driver will restore directly configurations of dcb hardware based
1650 * on dcb-related information soft maintained when driver
1651 * initialization has finished and reset is coming.
1653 ret = hns3_dcb_init_hw(hw);
1655 hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1663 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1665 struct hns3_hw *hw = &hns->hw;
1666 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1667 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1670 ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1672 hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1676 ret = hns3_q_to_qs_map(hw);
1678 hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1684 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1686 struct hns3_hw *hw = &hns->hw;
1687 enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1690 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1691 ret = hns3_dcb_configure(hns);
1693 hns3_err(hw, "Failed to config dcb: %d", ret);
1696 * Update queue map without PFC configuration,
1697 * due to queues reconfigured by user.
1699 ret = hns3_update_queue_map_configure(hns);
1702 "Failed to update queue mapping configure: %d",
1710 * hns3_dcb_pfc_enable - Enable priority flow control
1711 * @dev: pointer to ethernet device
1713 * Configures the pfc settings for one porority.
1716 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1718 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1719 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1720 enum hns3_fc_status fc_status = hw->current_fc_status;
1721 enum hns3_fc_mode current_mode = hw->current_mode;
1722 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1723 uint8_t pfc_en = hw->dcb_info.pfc_en;
1724 uint8_t priority = pfc_conf->priority;
1725 uint16_t pause_time = pf->pause_time;
1728 pf->pause_time = pfc_conf->fc.pause_time;
1729 hw->current_mode = hw->requested_mode;
1730 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1731 hw->dcb_info.pfc_en |= BIT(priority);
1732 hw->dcb_info.hw_pfc_map =
1733 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1734 ret = hns3_buffer_alloc(hw);
1736 goto pfc_setup_fail;
1739 * The flow control mode of all UPs will be changed based on
1740 * current_mode coming from user.
1742 ret = hns3_dcb_pause_setup_hw(hw);
1744 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1745 goto pfc_setup_fail;
1751 hw->current_mode = current_mode;
1752 hw->current_fc_status = fc_status;
1753 pf->pause_time = pause_time;
1754 hw->dcb_info.pfc_en = pfc_en;
1755 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1756 status = hns3_buffer_alloc(hw);
1758 hns3_err(hw, "recover packet buffer fail: %d", status);
1764 * hns3_fc_enable - Enable MAC pause
1765 * @dev: pointer to ethernet device
1767 * Configures the MAC pause settings.
1770 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1772 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1773 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1774 enum hns3_fc_status fc_status = hw->current_fc_status;
1775 enum hns3_fc_mode current_mode = hw->current_mode;
1776 uint16_t pause_time = pf->pause_time;
1779 pf->pause_time = fc_conf->pause_time;
1780 hw->current_mode = hw->requested_mode;
1783 * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1784 * of flow control is configured to be HNS3_FC_NONE.
1786 if (hw->current_mode == HNS3_FC_NONE)
1787 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1789 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1791 ret = hns3_dcb_pause_setup_hw(hw);
1793 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1800 hw->current_mode = current_mode;
1801 hw->current_fc_status = fc_status;
1802 pf->pause_time = pause_time;