1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
6 #include <rte_ethdev.h>
9 #include "hns3_ethdev.h"
12 #define HNS3_SHAPER_BS_U_DEF 5
13 #define HNS3_SHAPER_BS_S_DEF 20
14 #define BW_MAX_PERCENT 100
17 * hns3_shaper_para_calc: calculate ir parameter for the shaper
18 * @ir: Rate to be config, its unit is Mbps
19 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
20 * @shaper_para: shaper parameter of IR shaper
24 * IR_b * (2 ^ IR_u) * 8
25 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
28 * @return: 0: calculate sucessful, negative: fail
31 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
32 struct hns3_shaper_parameter *shaper_para)
34 #define SHAPER_DEFAULT_IR_B 126
35 #define DIVISOR_CLK (1000 * 8)
36 #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
38 const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
39 6 * 256, /* Prioriy level */
40 6 * 32, /* Prioriy group level */
41 6 * 8, /* Port level */
42 6 * 256 /* Qset level */
44 uint8_t ir_u_calc = 0;
45 uint8_t ir_s_calc = 0;
51 if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
53 "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
54 shaper_level, HNS3_SHAPER_LVL_CNT);
58 if (ir > hw->max_tm_rate) {
59 hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
60 "supported.", ir, hw->max_tm_rate);
64 tick = tick_array[shaper_level];
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
70 * ir_calc = ---------------- * 1000
73 ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
76 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
77 } else if (ir_calc > ir) {
78 /* Increasing the denominator to select ir_s value */
79 while (ir_calc >= ir && ir) {
81 ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
84 shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
85 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
88 * Increasing the numerator to select ir_u value. ir_u_calc will
89 * get maximum value when ir_calc is minimum and ir is maximum.
90 * ir_calc gets minimum value when tick is the maximum value.
91 * At the same time, value of ir_u_calc can only be increased up
92 * to eight after the while loop if the value of ir is equal
98 numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
99 ir_calc = (numerator + (tick >> 1)) / tick;
100 } while (ir_calc < ir);
103 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
108 * The maximum value of ir_u_calc in this branch is
109 * seven in all cases. Thus, value of denominator can
112 denominator = DIVISOR_CLK * (1 << ir_u_calc);
114 (ir * tick + (denominator >> 1)) / denominator;
118 shaper_para->ir_u = ir_u_calc;
119 shaper_para->ir_s = ir_s_calc;
125 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
127 #define HNS3_HALF_BYTE_BIT_OFFSET 4
128 uint8_t tc = hw->dcb_info.prio_tc[pri_id];
130 if (tc >= hw->dcb_info.num_tc)
134 * The register for priority has four bytes, the first bytes includes
135 * priority0 and priority1, the higher 4bit stands for priority1
136 * while the lower 4bit stands for priority0, as below:
137 * first byte: | pri_1 | pri_0 |
138 * second byte: | pri_3 | pri_2 |
139 * third byte: | pri_5 | pri_4 |
140 * fourth byte: | pri_7 | pri_6 |
142 pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
148 hns3_up_to_tc_map(struct hns3_hw *hw)
150 struct hns3_cmd_desc desc;
151 uint8_t *pri = (uint8_t *)desc.data;
155 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
157 for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
158 ret = hns3_fill_pri_array(hw, pri, pri_id);
163 return hns3_cmd_send(hw, &desc, 1);
167 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
169 struct hns3_pg_to_pri_link_cmd *map;
170 struct hns3_cmd_desc desc;
172 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
174 map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
177 map->pri_bit_map = pri_bit_map;
179 return hns3_cmd_send(hw, &desc, 1);
183 hns3_pg_to_pri_map(struct hns3_hw *hw)
185 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
186 struct hns3_pf *pf = &hns->pf;
187 struct hns3_pg_info *pg_info;
190 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
193 for (i = 0; i < hw->dcb_info.num_pg; i++) {
194 /* Cfg pg to priority mapping */
195 pg_info = &hw->dcb_info.pg_info[i];
196 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
205 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
207 struct hns3_qs_to_pri_link_cmd *map;
208 struct hns3_cmd_desc desc;
210 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
212 map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
214 map->qs_id = rte_cpu_to_le_16(qs_id);
216 map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
218 return hns3_cmd_send(hw, &desc, 1);
222 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
224 struct hns3_qs_weight_cmd *weight;
225 struct hns3_cmd_desc desc;
227 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
229 weight = (struct hns3_qs_weight_cmd *)desc.data;
231 weight->qs_id = rte_cpu_to_le_16(qs_id);
234 return hns3_cmd_send(hw, &desc, 1);
238 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
240 #define DEFAULT_TC_WEIGHT 1
241 #define DEFAULT_TC_OFFSET 14
242 struct hns3_ets_tc_weight_cmd *ets_weight;
243 struct hns3_cmd_desc desc;
246 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
247 ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
249 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
250 struct hns3_pg_info *pg_info;
252 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
254 if (!(hw->hw_tc_map & BIT(i)))
257 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
258 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
261 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
263 return hns3_cmd_send(hw, &desc, 1);
267 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
269 struct hns3_priority_weight_cmd *weight;
270 struct hns3_cmd_desc desc;
272 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
274 weight = (struct hns3_priority_weight_cmd *)desc.data;
276 weight->pri_id = pri_id;
279 return hns3_cmd_send(hw, &desc, 1);
283 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
285 struct hns3_pg_weight_cmd *weight;
286 struct hns3_cmd_desc desc;
288 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
290 weight = (struct hns3_pg_weight_cmd *)desc.data;
292 weight->pg_id = pg_id;
295 return hns3_cmd_send(hw, &desc, 1);
298 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
300 struct hns3_cmd_desc desc;
302 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
304 if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
305 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
309 desc.data[0] = rte_cpu_to_le_32(pg_id);
311 return hns3_cmd_send(hw, &desc, 1);
315 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
316 uint8_t bs_b, uint8_t bs_s)
318 uint32_t shapping_para = 0;
320 /* If ir_b is zero it means IR is 0Mbps, return zero of shapping_para */
322 return shapping_para;
324 hns3_dcb_set_field(shapping_para, IR_B, ir_b);
325 hns3_dcb_set_field(shapping_para, IR_U, ir_u);
326 hns3_dcb_set_field(shapping_para, IR_S, ir_s);
327 hns3_dcb_set_field(shapping_para, BS_B, bs_b);
328 hns3_dcb_set_field(shapping_para, BS_S, bs_s);
330 return shapping_para;
334 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed)
336 struct hns3_port_shapping_cmd *shap_cfg_cmd;
337 struct hns3_shaper_parameter shaper_parameter;
338 uint32_t shapping_para;
339 uint32_t ir_u, ir_b, ir_s;
340 struct hns3_cmd_desc desc;
343 ret = hns3_shaper_para_calc(hw, speed,
344 HNS3_SHAPER_LVL_PORT, &shaper_parameter);
346 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
350 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
351 shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
353 ir_b = shaper_parameter.ir_b;
354 ir_u = shaper_parameter.ir_u;
355 ir_s = shaper_parameter.ir_s;
356 shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
357 HNS3_SHAPER_BS_U_DEF,
358 HNS3_SHAPER_BS_S_DEF);
360 shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
363 * Configure the port_rate and set bit HNS3_TM_RATE_VLD_B of flag
364 * field in hns3_port_shapping_cmd to require firmware to recalculate
365 * shapping parameters. And whether the parameters are recalculated
366 * depends on the firmware version. But driver still needs to
367 * calculate it and configure to firmware for better compatibility.
369 shap_cfg_cmd->port_rate = rte_cpu_to_le_32(speed);
370 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
372 return hns3_cmd_send(hw, &desc, 1);
376 hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed)
380 ret = hns3_dcb_port_shaper_cfg(hw, speed);
382 hns3_err(hw, "configure port shappering failed: ret = %d", ret);
388 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
389 uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
391 struct hns3_pg_shapping_cmd *shap_cfg_cmd;
392 enum hns3_opcode_type opcode;
393 struct hns3_cmd_desc desc;
395 opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
396 HNS3_OPC_TM_PG_C_SHAPPING;
397 hns3_cmd_setup_basic_desc(&desc, opcode, false);
399 shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
401 shap_cfg_cmd->pg_id = pg_id;
403 shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
406 * Configure the pg_rate and set bit HNS3_TM_RATE_VLD_B of flag field in
407 * hns3_pg_shapping_cmd to require firmware to recalculate shapping
408 * parameters. And whether parameters are recalculated depends on
409 * the firmware version. But driver still needs to calculate it and
410 * configure to firmware for better compatibility.
412 shap_cfg_cmd->pg_rate = rte_cpu_to_le_32(rate);
413 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
415 return hns3_cmd_send(hw, &desc, 1);
419 hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
421 struct hns3_shaper_parameter shaper_parameter;
422 uint32_t ir_u, ir_b, ir_s;
423 uint32_t shaper_para;
426 /* Calc shaper para */
427 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
430 hns3_err(hw, "calculate shaper parameter fail, ret = %d.",
435 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
436 HNS3_SHAPER_BS_U_DEF,
437 HNS3_SHAPER_BS_S_DEF);
439 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,
442 hns3_err(hw, "config PG CIR shaper parameter fail, ret = %d.",
447 ir_b = shaper_parameter.ir_b;
448 ir_u = shaper_parameter.ir_u;
449 ir_s = shaper_parameter.ir_s;
450 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
451 HNS3_SHAPER_BS_U_DEF,
452 HNS3_SHAPER_BS_S_DEF);
454 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,
457 hns3_err(hw, "config PG PIR shaper parameter fail, ret = %d.",
466 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
468 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
474 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
478 for (i = 0; i < hw->dcb_info.num_pg; i++) {
479 rate = hw->dcb_info.pg_info[i].bw_limit;
480 ret = hns3_pg_shaper_rate_cfg(hw, i, rate);
489 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
491 struct hns3_cmd_desc desc;
493 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
495 if (mode == HNS3_SCH_MODE_DWRR)
496 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
500 desc.data[0] = rte_cpu_to_le_32(qs_id);
502 return hns3_cmd_send(hw, &desc, 1);
506 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
508 struct hns3_cmd_desc desc;
510 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
512 if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
513 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
517 desc.data[0] = rte_cpu_to_le_32(pri_id);
519 return hns3_cmd_send(hw, &desc, 1);
523 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
524 uint8_t pri_id, uint32_t shapping_para, uint32_t rate)
526 struct hns3_pri_shapping_cmd *shap_cfg_cmd;
527 enum hns3_opcode_type opcode;
528 struct hns3_cmd_desc desc;
530 opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
531 HNS3_OPC_TM_PRI_C_SHAPPING;
533 hns3_cmd_setup_basic_desc(&desc, opcode, false);
535 shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
537 shap_cfg_cmd->pri_id = pri_id;
539 shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
542 * Configure the pri_rate and set bit HNS3_TM_RATE_VLD_B of flag
543 * field in hns3_pri_shapping_cmd to require firmware to recalculate
544 * shapping parameters. And whether the parameters are recalculated
545 * depends on the firmware version. But driver still needs to
546 * calculate it and configure to firmware for better compatibility.
548 shap_cfg_cmd->pri_rate = rte_cpu_to_le_32(rate);
549 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
551 return hns3_cmd_send(hw, &desc, 1);
555 hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
557 struct hns3_shaper_parameter shaper_parameter;
558 uint32_t ir_u, ir_b, ir_s;
559 uint32_t shaper_para;
562 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
565 hns3_err(hw, "calculate shaper parameter failed: %d.",
570 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
571 HNS3_SHAPER_BS_U_DEF,
572 HNS3_SHAPER_BS_S_DEF);
574 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,
578 "config priority CIR shaper parameter failed: %d.",
583 ir_b = shaper_parameter.ir_b;
584 ir_u = shaper_parameter.ir_u;
585 ir_s = shaper_parameter.ir_s;
586 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
587 HNS3_SHAPER_BS_U_DEF,
588 HNS3_SHAPER_BS_S_DEF);
590 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,
594 "config priority PIR shaper parameter failed: %d.",
603 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
605 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
610 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
613 for (i = 0; i < hw->dcb_info.num_tc; i++) {
614 rate = hw->dcb_info.tc_info[i].bw_limit;
615 ret = hns3_pri_shaper_rate_cfg(hw, i, rate);
617 hns3_err(hw, "config pri shaper failed: %d.", ret);
626 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
628 struct hns3_rss_conf *rss_cfg = &hw->rss_info;
629 uint16_t rx_qnum_per_tc;
630 uint16_t used_rx_queues;
633 rx_qnum_per_tc = nb_rx_q / hw->num_tc;
634 if (rx_qnum_per_tc > hw->rss_size_max) {
635 hns3_err(hw, "rx queue number of per tc (%u) is greater than "
636 "value (%u) hardware supported.",
637 rx_qnum_per_tc, hw->rss_size_max);
641 used_rx_queues = hw->num_tc * rx_qnum_per_tc;
642 if (used_rx_queues != nb_rx_q) {
643 hns3_err(hw, "rx queue number (%u) configured must be an "
644 "integral multiple of valid tc number (%u).",
645 nb_rx_q, hw->num_tc);
648 hw->alloc_rss_size = rx_qnum_per_tc;
649 hw->used_rx_queues = used_rx_queues;
652 * When rss size is changed, we need to update rss redirection table
653 * maintained by driver. Besides, during the entire reset process, we
654 * need to ensure that the rss table information are not overwritten
655 * and configured directly to the hardware in the RESET_STAGE_RESTORE
656 * stage of the reset process.
658 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
659 for (i = 0; i < hw->rss_ind_tbl_size; i++)
660 rss_cfg->rss_indirection_tbl[i] =
661 i % hw->alloc_rss_size;
668 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
670 struct hns3_tc_queue_info *tc_queue;
671 uint16_t used_tx_queues;
672 uint16_t tx_qnum_per_tc;
675 tx_qnum_per_tc = nb_tx_q / hw->num_tc;
676 used_tx_queues = hw->num_tc * tx_qnum_per_tc;
677 if (used_tx_queues != nb_tx_q) {
678 hns3_err(hw, "tx queue number (%u) configured must be an "
679 "integral multiple of valid tc number (%u).",
680 nb_tx_q, hw->num_tc);
684 hw->used_tx_queues = used_tx_queues;
685 hw->tx_qnum_per_tc = tx_qnum_per_tc;
686 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
687 tc_queue = &hw->tc_queue[i];
688 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
689 tc_queue->enable = true;
690 tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
691 tc_queue->tqp_count = hw->tx_qnum_per_tc;
694 /* Set to default queue if TC is disable */
695 tc_queue->enable = false;
696 tc_queue->tqp_offset = 0;
697 tc_queue->tqp_count = 0;
706 hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no)
708 struct hns3_tc_queue_info *tc_queue;
711 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
712 tc_queue = &hw->tc_queue[i];
713 if (!tc_queue->enable)
716 if (txq_no >= tc_queue->tqp_offset &&
717 txq_no < tc_queue->tqp_offset + tc_queue->tqp_count)
721 /* return TC0 in default case */
726 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
730 if (nb_rx_q < hw->num_tc) {
731 hns3_err(hw, "number of Rx queues(%u) is less than number of TC(%u).",
732 nb_rx_q, hw->num_tc);
736 if (nb_tx_q < hw->num_tc) {
737 hns3_err(hw, "number of Tx queues(%u) is less than number of TC(%u).",
738 nb_tx_q, hw->num_tc);
742 ret = hns3_set_rss_size(hw, nb_rx_q);
746 return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
750 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
753 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
754 struct hns3_pf *pf = &hns->pf;
757 hw->num_tc = hw->dcb_info.num_tc;
758 ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
763 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
769 hns3_dcb_info_init(struct hns3_hw *hw)
771 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
772 struct hns3_pf *pf = &hns->pf;
775 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
776 hw->dcb_info.num_pg != 1)
779 /* Initializing PG information */
780 memset(hw->dcb_info.pg_info, 0,
781 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
782 for (i = 0; i < hw->dcb_info.num_pg; i++) {
783 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
784 hw->dcb_info.pg_info[i].pg_id = i;
785 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
786 hw->dcb_info.pg_info[i].bw_limit = hw->max_tm_rate;
791 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
792 for (k = 0; k < hw->dcb_info.num_tc; k++)
793 hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
796 /* All UPs mapping to TC0 */
797 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
798 hw->dcb_info.prio_tc[i] = 0;
800 /* Initializing tc information */
801 memset(hw->dcb_info.tc_info, 0,
802 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
803 for (i = 0; i < hw->dcb_info.num_tc; i++) {
804 hw->dcb_info.tc_info[i].tc_id = i;
805 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
806 hw->dcb_info.tc_info[i].pgid = 0;
807 hw->dcb_info.tc_info[i].bw_limit =
808 hw->dcb_info.pg_info[0].bw_limit;
815 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
817 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
818 struct hns3_pf *pf = &hns->pf;
821 /* Only being config on TC-Based scheduler mode */
822 if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
825 for (i = 0; i < hw->dcb_info.num_pg; i++) {
826 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
835 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
837 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
838 struct hns3_pf *pf = &hns->pf;
842 if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
843 for (i = 0; i < hw->dcb_info.num_tc; i++) {
844 ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
848 ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
859 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
863 ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
865 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
869 ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
871 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
877 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
879 struct hns3_pg_info *pg_info;
883 for (i = 0; i < hw->dcb_info.num_tc; i++) {
884 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
885 dwrr = pg_info->tc_dwrr[i];
887 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
890 "fail to send priority weight cmd: %d, ret = %d",
895 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
897 hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
907 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
909 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
910 struct hns3_pf *pf = &hns->pf;
914 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
917 ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
921 if (!hns3_dev_get_support(hw, DCB))
924 ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
925 if (ret == -EOPNOTSUPP) {
926 version = hw->fw_version;
928 "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
929 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
930 HNS3_FW_VERSION_BYTE3_S),
931 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
932 HNS3_FW_VERSION_BYTE2_S),
933 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
934 HNS3_FW_VERSION_BYTE1_S),
935 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
936 HNS3_FW_VERSION_BYTE0_S));
944 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
946 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
947 struct hns3_pf *pf = &hns->pf;
951 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
955 for (i = 0; i < hw->dcb_info.num_pg; i++) {
957 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
966 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
970 ret = hns3_dcb_pg_dwrr_cfg(hw);
972 hns3_err(hw, "config pg_dwrr failed: %d", ret);
976 ret = hns3_dcb_pri_dwrr_cfg(hw);
978 hns3_err(hw, "config pri_dwrr failed: %d", ret);
984 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
988 ret = hns3_dcb_port_shaper_cfg(hw, hw->mac.link_speed);
990 hns3_err(hw, "config port shaper failed: %d", ret);
994 ret = hns3_dcb_pg_shaper_cfg(hw);
996 hns3_err(hw, "config pg shaper failed: %d", ret);
1000 return hns3_dcb_pri_shaper_cfg(hw);
1004 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
1006 struct hns3_nq_to_qs_link_cmd *map;
1007 struct hns3_cmd_desc desc;
1008 uint16_t tmp_qs_id = 0;
1012 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
1014 map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
1016 map->nq_id = rte_cpu_to_le_16(q_id);
1019 * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
1020 * configure qset_id. So we need to convert qs_id to the follow
1021 * format to support qset_id > 1024.
1022 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
1025 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
1026 * | qs_id_h | vld | qs_id_l |
1028 qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
1029 HNS3_DCB_QS_ID_L_S);
1030 qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
1031 HNS3_DCB_QS_ID_H_S);
1032 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
1034 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
1035 HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
1036 map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
1038 return hns3_cmd_send(hw, &desc, 1);
1042 hns3_q_to_qs_map(struct hns3_hw *hw)
1044 struct hns3_tc_queue_info *tc_queue;
1049 for (i = 0; i < hw->num_tc; i++) {
1050 tc_queue = &hw->tc_queue[i];
1051 for (j = 0; j < tc_queue->tqp_count; j++) {
1052 q_id = tc_queue->tqp_offset + j;
1053 ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
1063 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
1065 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1066 struct hns3_pf *pf = &hns->pf;
1070 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
1073 /* Cfg qs -> pri mapping */
1074 for (i = 0; i < hw->num_tc; i++) {
1075 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
1077 hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1082 /* Cfg q -> qs mapping */
1083 ret = hns3_q_to_qs_map(hw);
1085 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1091 hns3_dcb_map_cfg(struct hns3_hw *hw)
1095 ret = hns3_up_to_tc_map(hw);
1097 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1101 ret = hns3_pg_to_pri_map(hw);
1103 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1107 return hns3_pri_q_qs_cfg(hw);
1111 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1115 /* Cfg dcb mapping */
1116 ret = hns3_dcb_map_cfg(hw);
1120 /* Cfg dcb shaper */
1121 ret = hns3_dcb_shaper_cfg(hw);
1126 ret = hns3_dcb_dwrr_cfg(hw);
1130 /* Cfg schd mode for each level schd */
1131 return hns3_dcb_schd_mode_cfg(hw);
1135 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1136 uint8_t pause_trans_gap, uint16_t pause_trans_time)
1138 struct hns3_cfg_pause_param_cmd *pause_param;
1139 struct hns3_cmd_desc desc;
1141 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1143 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1145 memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1146 memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1147 pause_param->pause_trans_gap = pause_trans_gap;
1148 pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1150 return hns3_cmd_send(hw, &desc, 1);
1154 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1156 struct hns3_cfg_pause_param_cmd *pause_param;
1157 struct hns3_cmd_desc desc;
1158 uint16_t trans_time;
1162 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1164 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1166 ret = hns3_cmd_send(hw, &desc, 1);
1170 trans_gap = pause_param->pause_trans_gap;
1171 trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1173 return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1177 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1179 #define PAUSE_TIME_DIV_BY 2
1180 #define PAUSE_TIME_MIN_VALUE 0x4
1182 struct hns3_mac *mac = &hw->mac;
1183 uint8_t pause_trans_gap;
1186 * Pause transmit gap must be less than "pause_time / 2", otherwise
1187 * the behavior of MAC is undefined.
1189 if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1190 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1191 else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1192 pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1193 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1195 hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
1196 pause_time = PAUSE_TIME_MIN_VALUE;
1197 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1200 return hns3_pause_param_cfg(hw, mac->mac_addr,
1201 pause_trans_gap, pause_time);
1205 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1207 struct hns3_cmd_desc desc;
1209 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1211 desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1212 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1214 return hns3_cmd_send(hw, &desc, 1);
1218 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1220 struct hns3_cmd_desc desc;
1221 struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1223 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1225 pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1226 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1228 pfc->pri_en_bitmap = pfc_bitmap;
1230 return hns3_cmd_send(hw, &desc, 1);
1234 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1236 struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1237 struct hns3_cmd_desc desc;
1239 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1241 bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1243 bp_to_qs_map_cmd->tc_id = tc;
1244 bp_to_qs_map_cmd->qs_group_id = grp_id;
1245 bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1247 return hns3_cmd_send(hw, &desc, 1);
1251 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1253 switch (hw->requested_fc_mode) {
1258 case HNS3_FC_RX_PAUSE:
1262 case HNS3_FC_TX_PAUSE:
1278 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1282 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1283 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1289 return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1293 hns3_pfc_setup_hw(struct hns3_hw *hw)
1297 if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1298 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1304 return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1308 * Each Tc has a 1024 queue sets to backpress, it divides to
1309 * 32 group, each group contains 32 queue sets, which can be
1310 * represented by uint32_t bitmap.
1313 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1319 for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1320 uint8_t grp, sub_grp;
1323 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1324 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1325 HNS3_BP_SUB_GRP_ID_S);
1327 qs_bitmap |= (1 << sub_grp);
1329 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1338 hns3_dcb_bp_setup(struct hns3_hw *hw)
1342 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1343 ret = hns3_bp_setup_hw(hw, i);
1352 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1354 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1355 struct hns3_pf *pf = &hns->pf;
1358 ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1360 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1364 ret = hns3_mac_pause_setup_hw(hw);
1366 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1370 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1371 if (!hns3_dev_get_support(hw, DCB))
1374 ret = hns3_pfc_setup_hw(hw);
1376 hns3_err(hw, "config pfc failed! ret = %d", ret);
1380 return hns3_dcb_bp_setup(hw);
1384 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1386 uint8_t pfc_map = 0;
1390 prio_tc = hw->dcb_info.prio_tc;
1391 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1392 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1393 if (prio_tc[j] == i && pfc_en & BIT(j)) {
1404 hns3_dcb_parse_num_tc(struct hns3_adapter *hns)
1406 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1407 struct hns3_hw *hw = &hns->hw;
1408 uint8_t max_tc_id = 0;
1411 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1412 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1413 if (dcb_rx_conf->dcb_tc[i] > max_tc_id)
1414 max_tc_id = dcb_rx_conf->dcb_tc[i];
1417 /* Number of TC is equal to max_tc_id plus 1. */
1418 return max_tc_id + 1;
1422 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1424 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1425 struct hns3_pf *pf = &hns->pf;
1426 struct hns3_hw *hw = &hns->hw;
1427 uint8_t tc_bw, bw_rest;
1431 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1432 pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1433 pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1436 memset(hw->dcb_info.pg_info, 0,
1437 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1438 hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1439 hw->dcb_info.pg_info[0].pg_id = 0;
1440 hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1441 hw->dcb_info.pg_info[0].bw_limit = hw->max_tm_rate;
1442 hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1444 /* Each tc has same bw for valid tc by default */
1445 tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1446 for (i = 0; i < hw->dcb_info.num_tc; i++)
1447 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1448 /* To ensure the sum of tc_dwrr is equal to 100 */
1449 bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1450 for (j = 0; j < bw_rest; j++)
1451 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1452 for (; i < dcb_rx_conf->nb_tcs; i++)
1453 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1455 /* All tcs map to pg0 */
1456 memset(hw->dcb_info.tc_info, 0,
1457 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1458 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1459 hw->dcb_info.tc_info[i].tc_id = i;
1460 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1461 hw->dcb_info.tc_info[i].pgid = 0;
1462 hw->dcb_info.tc_info[i].bw_limit =
1463 hw->dcb_info.pg_info[0].bw_limit;
1466 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1467 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1469 ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1470 hw->data->nb_tx_queues);
1472 hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1478 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1480 struct hns3_pf *pf = &hns->pf;
1481 struct hns3_hw *hw = &hns->hw;
1482 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1483 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1484 uint8_t bit_map = 0;
1487 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1488 hw->dcb_info.num_pg != 1)
1491 if (nb_rx_q < num_tc) {
1492 hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
1497 if (nb_tx_q < num_tc) {
1498 hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
1503 /* Currently not support uncontinuous tc */
1504 hw->dcb_info.num_tc = num_tc;
1505 for (i = 0; i < hw->dcb_info.num_tc; i++)
1510 hw->dcb_info.num_tc = 1;
1512 hw->hw_tc_map = bit_map;
1514 return hns3_dcb_info_cfg(hns);
1518 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1520 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1521 struct hns3_pf *pf = &hns->pf;
1522 struct hns3_hw *hw = &hns->hw;
1523 enum hns3_fc_status fc_status = hw->current_fc_status;
1524 enum hns3_fc_mode requested_fc_mode = hw->requested_fc_mode;
1525 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1526 uint8_t pfc_en = hw->dcb_info.pfc_en;
1529 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1530 pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1533 ret = hns3_dcb_schd_setup_hw(hw);
1535 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1539 if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
1540 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1541 if (dcb_rx_conf->nb_tcs == 0)
1542 hw->dcb_info.pfc_en = 1; /* tc0 only */
1544 hw->dcb_info.pfc_en =
1545 RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1547 hw->dcb_info.hw_pfc_map =
1548 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1550 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1551 hw->requested_fc_mode = HNS3_FC_FULL;
1553 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1554 hw->requested_fc_mode = HNS3_FC_NONE;
1555 hw->dcb_info.pfc_en = 0;
1556 hw->dcb_info.hw_pfc_map = 0;
1559 ret = hns3_buffer_alloc(hw);
1563 ret = hns3_dcb_pause_setup_hw(hw);
1565 hns3_err(hw, "setup pfc failed! ret = %d", ret);
1572 hw->requested_fc_mode = requested_fc_mode;
1573 hw->current_fc_status = fc_status;
1574 hw->dcb_info.pfc_en = pfc_en;
1575 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1581 * hns3_dcb_configure - setup dcb related config
1582 * @hns: pointer to hns3 adapter
1583 * Returns 0 on success, negative value on failure.
1586 hns3_dcb_configure(struct hns3_adapter *hns)
1588 struct hns3_hw *hw = &hns->hw;
1592 num_tc = hns3_dcb_parse_num_tc(hns);
1593 ret = hns3_dcb_info_update(hns, num_tc);
1595 hns3_err(hw, "dcb info update failed: %d", ret);
1599 ret = hns3_dcb_hw_configure(hns);
1601 hns3_err(hw, "dcb sw configure failed: %d", ret);
1609 hns3_dcb_init_hw(struct hns3_hw *hw)
1613 ret = hns3_dcb_schd_setup_hw(hw);
1615 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1619 ret = hns3_dcb_pause_setup_hw(hw);
1621 hns3_err(hw, "PAUSE setup failed: %d", ret);
1627 hns3_dcb_init(struct hns3_hw *hw)
1629 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1630 struct hns3_pf *pf = &hns->pf;
1631 uint16_t default_tqp_num;
1634 PMD_INIT_FUNC_TRACE();
1637 * According to the 'adapter_state' identifier, the following branch
1638 * is only executed to initialize default configurations of dcb during
1639 * the initializing driver process. Due to driver saving dcb-related
1640 * information before reset triggered, the reinit dev stage of the
1641 * reset process can not access to the branch, or those information
1644 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1645 hw->requested_fc_mode = HNS3_FC_NONE;
1646 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1647 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1649 ret = hns3_dcb_info_init(hw);
1651 hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1656 * The number of queues configured by default cannot exceed
1657 * the maximum number of queues for a single TC.
1659 default_tqp_num = RTE_MIN(hw->rss_size_max,
1660 hw->tqps_num / hw->dcb_info.num_tc);
1661 ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1665 "update tc queue mapping failed, ret = %d.",
1672 * DCB hardware will be configured by following the function during
1673 * the initializing driver process and the reset process. However,
1674 * driver will restore directly configurations of dcb hardware based
1675 * on dcb-related information soft maintained when driver
1676 * initialization has finished and reset is coming.
1678 ret = hns3_dcb_init_hw(hw);
1680 hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1688 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1690 struct hns3_hw *hw = &hns->hw;
1691 enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1692 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1693 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1696 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
1699 ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1701 hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1705 ret = hns3_q_to_qs_map(hw);
1707 hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1713 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
1716 case RTE_ETH_FC_NONE:
1717 hw->requested_fc_mode = HNS3_FC_NONE;
1719 case RTE_ETH_FC_RX_PAUSE:
1720 hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
1722 case RTE_ETH_FC_TX_PAUSE:
1723 hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
1725 case RTE_ETH_FC_FULL:
1726 hw->requested_fc_mode = HNS3_FC_FULL;
1729 hw->requested_fc_mode = HNS3_FC_NONE;
1730 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
1731 "configured to RTE_ETH_FC_NONE", mode);
1737 * hns3_dcb_pfc_enable - Enable priority flow control
1738 * @dev: pointer to ethernet device
1740 * Configures the pfc settings for one porority.
1743 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1745 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1746 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1747 enum hns3_fc_status fc_status = hw->current_fc_status;
1748 enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
1749 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1750 uint8_t pfc_en = hw->dcb_info.pfc_en;
1751 uint8_t priority = pfc_conf->priority;
1752 uint16_t pause_time = pf->pause_time;
1755 hw->dcb_info.pfc_en |= BIT(priority);
1756 hw->dcb_info.hw_pfc_map =
1757 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1758 ret = hns3_buffer_alloc(hw);
1760 hns3_err(hw, "update packet buffer failed, ret = %d", ret);
1761 goto buffer_alloc_fail;
1764 pf->pause_time = pfc_conf->fc.pause_time;
1765 hns3_get_fc_mode(hw, pfc_conf->fc.mode);
1766 if (hw->requested_fc_mode == HNS3_FC_NONE)
1767 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1769 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1772 * The flow control mode of all UPs will be changed based on
1773 * requested_fc_mode coming from user.
1775 ret = hns3_dcb_pause_setup_hw(hw);
1777 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1778 goto pfc_setup_fail;
1784 hw->requested_fc_mode = old_fc_mode;
1785 hw->current_fc_status = fc_status;
1786 pf->pause_time = pause_time;
1788 hw->dcb_info.pfc_en = pfc_en;
1789 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1795 * hns3_fc_enable - Enable MAC pause
1796 * @dev: pointer to ethernet device
1798 * Configures the MAC pause settings.
1801 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1803 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1804 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1805 enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
1806 enum hns3_fc_status fc_status = hw->current_fc_status;
1807 uint16_t pause_time = pf->pause_time;
1810 pf->pause_time = fc_conf->pause_time;
1811 hns3_get_fc_mode(hw, fc_conf->mode);
1814 * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1815 * of flow control is configured to be HNS3_FC_NONE.
1817 if (hw->requested_fc_mode == HNS3_FC_NONE)
1818 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1820 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1822 ret = hns3_dcb_pause_setup_hw(hw);
1824 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1831 hw->requested_fc_mode = old_fc_mode;
1832 hw->current_fc_status = fc_status;
1833 pf->pause_time = pause_time;