1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_common.h>
12 #include <rte_ethdev.h>
14 #include "hns3_logs.h"
15 #include "hns3_regs.h"
16 #include "hns3_ethdev.h"
19 #define HNS3_SHAPER_BS_U_DEF 5
20 #define HNS3_SHAPER_BS_S_DEF 20
21 #define BW_MAX_PERCENT 100
24 * hns3_shaper_para_calc: calculate ir parameter for the shaper
25 * @ir: Rate to be config, its unit is Mbps
26 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
27 * @shaper_para: shaper parameter of IR shaper
31 * IR_b * (2 ^ IR_u) * 8
32 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
35 * @return: 0: calculate sucessful, negative: fail
38 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
39 struct hns3_shaper_parameter *shaper_para)
41 #define SHAPER_DEFAULT_IR_B 126
42 #define DIVISOR_CLK (1000 * 8)
43 #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
45 const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
46 6 * 256, /* Prioriy level */
47 6 * 32, /* Prioriy group level */
48 6 * 8, /* Port level */
49 6 * 256 /* Qset level */
51 uint8_t ir_u_calc = 0;
52 uint8_t ir_s_calc = 0;
58 if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
60 "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)",
61 shaper_level, HNS3_SHAPER_LVL_CNT);
65 if (ir > HNS3_ETHER_MAX_RATE) {
66 hns3_err(hw, "rate(%d) exceeds the rate driver supported "
67 "HNS3_ETHER_MAX_RATE(%d)", ir, HNS3_ETHER_MAX_RATE);
71 tick = tick_array[shaper_level];
74 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
75 * the formula is changed to:
77 * ir_calc = ---------------- * 1000
80 ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
83 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
84 } else if (ir_calc > ir) {
85 /* Increasing the denominator to select ir_s value */
88 ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
89 } while (ir_calc > ir);
92 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
94 shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
95 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
98 * Increasing the numerator to select ir_u value. ir_u_calc will
99 * get maximum value when ir_calc is minimum and ir is maximum.
100 * ir_calc gets minimum value when tick is the maximum value.
101 * At the same time, value of ir_u_calc can only be increased up
102 * to eight after the while loop if the value of ir is equal
103 * to HNS3_ETHER_MAX_RATE.
108 numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
109 ir_calc = (numerator + (tick >> 1)) / tick;
110 } while (ir_calc < ir);
113 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
118 * The maximum value of ir_u_calc in this branch is
119 * seven in all cases. Thus, value of denominator can
122 denominator = DIVISOR_CLK * (1 << ir_u_calc);
124 (ir * tick + (denominator >> 1)) / denominator;
128 shaper_para->ir_u = ir_u_calc;
129 shaper_para->ir_s = ir_s_calc;
135 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
137 #define HNS3_HALF_BYTE_BIT_OFFSET 4
138 uint8_t tc = hw->dcb_info.prio_tc[pri_id];
140 if (tc >= hw->dcb_info.num_tc)
144 * The register for priority has four bytes, the first bytes includes
145 * priority0 and priority1, the higher 4bit stands for priority1
146 * while the lower 4bit stands for priority0, as below:
147 * first byte: | pri_1 | pri_0 |
148 * second byte: | pri_3 | pri_2 |
149 * third byte: | pri_5 | pri_4 |
150 * fourth byte: | pri_7 | pri_6 |
152 pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
158 hns3_up_to_tc_map(struct hns3_hw *hw)
160 struct hns3_cmd_desc desc;
161 uint8_t *pri = (uint8_t *)desc.data;
165 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
167 for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
168 ret = hns3_fill_pri_array(hw, pri, pri_id);
173 return hns3_cmd_send(hw, &desc, 1);
177 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
179 struct hns3_pg_to_pri_link_cmd *map;
180 struct hns3_cmd_desc desc;
182 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
184 map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
187 map->pri_bit_map = pri_bit_map;
189 return hns3_cmd_send(hw, &desc, 1);
193 hns3_pg_to_pri_map(struct hns3_hw *hw)
195 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
196 struct hns3_pf *pf = &hns->pf;
197 struct hns3_pg_info *pg_info;
200 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
203 for (i = 0; i < hw->dcb_info.num_pg; i++) {
204 /* Cfg pg to priority mapping */
205 pg_info = &hw->dcb_info.pg_info[i];
206 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
215 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
217 struct hns3_qs_to_pri_link_cmd *map;
218 struct hns3_cmd_desc desc;
220 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
222 map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
224 map->qs_id = rte_cpu_to_le_16(qs_id);
226 map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
228 return hns3_cmd_send(hw, &desc, 1);
232 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
234 struct hns3_qs_weight_cmd *weight;
235 struct hns3_cmd_desc desc;
237 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
239 weight = (struct hns3_qs_weight_cmd *)desc.data;
241 weight->qs_id = rte_cpu_to_le_16(qs_id);
244 return hns3_cmd_send(hw, &desc, 1);
248 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
250 #define DEFAULT_TC_WEIGHT 1
251 #define DEFAULT_TC_OFFSET 14
252 struct hns3_ets_tc_weight_cmd *ets_weight;
253 struct hns3_cmd_desc desc;
256 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
257 ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
259 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
260 struct hns3_pg_info *pg_info;
262 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
264 if (!(hw->hw_tc_map & BIT(i)))
267 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
268 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
271 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
273 return hns3_cmd_send(hw, &desc, 1);
277 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
279 struct hns3_priority_weight_cmd *weight;
280 struct hns3_cmd_desc desc;
282 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
284 weight = (struct hns3_priority_weight_cmd *)desc.data;
286 weight->pri_id = pri_id;
289 return hns3_cmd_send(hw, &desc, 1);
293 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
295 struct hns3_pg_weight_cmd *weight;
296 struct hns3_cmd_desc desc;
298 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
300 weight = (struct hns3_pg_weight_cmd *)desc.data;
302 weight->pg_id = pg_id;
305 return hns3_cmd_send(hw, &desc, 1);
308 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
310 struct hns3_cmd_desc desc;
312 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
314 if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
315 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
319 desc.data[0] = rte_cpu_to_le_32(pg_id);
321 return hns3_cmd_send(hw, &desc, 1);
325 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
326 uint8_t bs_b, uint8_t bs_s)
328 uint32_t shapping_para = 0;
330 hns3_dcb_set_field(shapping_para, IR_B, ir_b);
331 hns3_dcb_set_field(shapping_para, IR_U, ir_u);
332 hns3_dcb_set_field(shapping_para, IR_S, ir_s);
333 hns3_dcb_set_field(shapping_para, BS_B, bs_b);
334 hns3_dcb_set_field(shapping_para, BS_S, bs_s);
336 return shapping_para;
340 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
342 struct hns3_port_shapping_cmd *shap_cfg_cmd;
343 struct hns3_shaper_parameter shaper_parameter;
344 uint32_t shapping_para;
345 uint32_t ir_u, ir_b, ir_s;
346 struct hns3_cmd_desc desc;
349 ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
350 HNS3_SHAPER_LVL_PORT, &shaper_parameter);
352 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
356 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
357 shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
359 ir_b = shaper_parameter.ir_b;
360 ir_u = shaper_parameter.ir_u;
361 ir_s = shaper_parameter.ir_s;
362 shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
363 HNS3_SHAPER_BS_U_DEF,
364 HNS3_SHAPER_BS_S_DEF);
366 shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
368 return hns3_cmd_send(hw, &desc, 1);
372 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
373 uint8_t pg_id, uint32_t shapping_para)
375 struct hns3_pg_shapping_cmd *shap_cfg_cmd;
376 enum hns3_opcode_type opcode;
377 struct hns3_cmd_desc desc;
379 opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
380 HNS3_OPC_TM_PG_C_SHAPPING;
381 hns3_cmd_setup_basic_desc(&desc, opcode, false);
383 shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
385 shap_cfg_cmd->pg_id = pg_id;
387 shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
389 return hns3_cmd_send(hw, &desc, 1);
393 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
395 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
396 struct hns3_shaper_parameter shaper_parameter;
397 struct hns3_pf *pf = &hns->pf;
398 uint32_t ir_u, ir_b, ir_s;
399 uint32_t shaper_para;
404 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
408 for (i = 0; i < hw->dcb_info.num_pg; i++) {
409 /* Calc shaper para */
410 ret = hns3_shaper_para_calc(hw,
411 hw->dcb_info.pg_info[i].bw_limit,
415 hns3_err(hw, "calculate shaper parameter failed: %d",
420 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
421 HNS3_SHAPER_BS_U_DEF,
422 HNS3_SHAPER_BS_S_DEF);
424 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
428 "config PG CIR shaper parameter failed: %d",
433 ir_b = shaper_parameter.ir_b;
434 ir_u = shaper_parameter.ir_u;
435 ir_s = shaper_parameter.ir_s;
436 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
437 HNS3_SHAPER_BS_U_DEF,
438 HNS3_SHAPER_BS_S_DEF);
440 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
444 "config PG PIR shaper parameter failed: %d",
454 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
456 struct hns3_cmd_desc desc;
458 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
460 if (mode == HNS3_SCH_MODE_DWRR)
461 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
465 desc.data[0] = rte_cpu_to_le_32(qs_id);
467 return hns3_cmd_send(hw, &desc, 1);
471 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
473 struct hns3_cmd_desc desc;
475 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
477 if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
478 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
482 desc.data[0] = rte_cpu_to_le_32(pri_id);
484 return hns3_cmd_send(hw, &desc, 1);
488 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
489 uint8_t pri_id, uint32_t shapping_para)
491 struct hns3_pri_shapping_cmd *shap_cfg_cmd;
492 enum hns3_opcode_type opcode;
493 struct hns3_cmd_desc desc;
495 opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
496 HNS3_OPC_TM_PRI_C_SHAPPING;
498 hns3_cmd_setup_basic_desc(&desc, opcode, false);
500 shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
502 shap_cfg_cmd->pri_id = pri_id;
504 shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
506 return hns3_cmd_send(hw, &desc, 1);
510 hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
512 struct hns3_shaper_parameter shaper_parameter;
513 uint32_t ir_u, ir_b, ir_s;
514 uint32_t shaper_para;
517 for (i = 0; i < hw->dcb_info.num_tc; i++) {
518 ret = hns3_shaper_para_calc(hw,
519 hw->dcb_info.tc_info[i].bw_limit,
523 hns3_err(hw, "calculate shaper parameter failed: %d",
528 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
529 HNS3_SHAPER_BS_U_DEF,
530 HNS3_SHAPER_BS_S_DEF);
532 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
536 "config priority CIR shaper parameter failed: %d",
541 ir_b = shaper_parameter.ir_b;
542 ir_u = shaper_parameter.ir_u;
543 ir_s = shaper_parameter.ir_s;
544 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
545 HNS3_SHAPER_BS_U_DEF,
546 HNS3_SHAPER_BS_S_DEF);
548 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
552 "config priority PIR shaper parameter failed: %d",
563 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
565 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
566 struct hns3_pf *pf = &hns->pf;
569 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
572 ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
574 hns3_err(hw, "config port shaper failed: %d", ret);
580 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
582 struct hns3_rss_conf *rss_cfg = &hw->rss_info;
583 uint16_t rx_qnum_per_tc;
584 uint16_t used_rx_queues;
587 rx_qnum_per_tc = nb_rx_q / hw->num_tc;
588 if (rx_qnum_per_tc > hw->rss_size_max) {
589 hns3_err(hw, "rx queue number of per tc (%u) is greater than "
590 "value (%u) hardware supported.",
591 rx_qnum_per_tc, hw->rss_size_max);
595 used_rx_queues = hw->num_tc * rx_qnum_per_tc;
596 if (used_rx_queues != nb_rx_q) {
597 hns3_err(hw, "rx queue number (%u) configured must be an "
598 "integral multiple of valid tc number (%u).",
599 nb_rx_q, hw->num_tc);
602 hw->alloc_rss_size = rx_qnum_per_tc;
603 hw->used_rx_queues = used_rx_queues;
606 * When rss size is changed, we need to update rss redirection table
607 * maintained by driver. Besides, during the entire reset process, we
608 * need to ensure that the rss table information are not overwritten
609 * and configured directly to the hardware in the RESET_STAGE_RESTORE
610 * stage of the reset process.
612 if (rte_atomic16_read(&hw->reset.resetting) == 0) {
613 for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
614 rss_cfg->rss_indirection_tbl[i] =
615 i % hw->alloc_rss_size;
622 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
624 struct hns3_tc_queue_info *tc_queue;
625 uint16_t used_tx_queues;
626 uint16_t tx_qnum_per_tc;
629 tx_qnum_per_tc = nb_tx_q / hw->num_tc;
630 used_tx_queues = hw->num_tc * tx_qnum_per_tc;
631 if (used_tx_queues != nb_tx_q) {
632 hns3_err(hw, "tx queue number (%u) configured must be an "
633 "integral multiple of valid tc number (%u).",
634 nb_tx_q, hw->num_tc);
638 hw->used_tx_queues = used_tx_queues;
639 hw->tx_qnum_per_tc = tx_qnum_per_tc;
640 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
641 tc_queue = &hw->tc_queue[i];
642 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
643 tc_queue->enable = true;
644 tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
645 tc_queue->tqp_count = hw->tx_qnum_per_tc;
648 /* Set to default queue if TC is disable */
649 tc_queue->enable = false;
650 tc_queue->tqp_offset = 0;
651 tc_queue->tqp_count = 0;
660 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
664 ret = hns3_set_rss_size(hw, nb_rx_q);
668 return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
672 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
675 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
676 struct hns3_pf *pf = &hns->pf;
679 hw->num_tc = hw->dcb_info.num_tc;
680 ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
685 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
691 hns3_dcb_info_init(struct hns3_hw *hw)
693 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
694 struct hns3_pf *pf = &hns->pf;
697 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
698 hw->dcb_info.num_pg != 1)
701 /* Initializing PG information */
702 memset(hw->dcb_info.pg_info, 0,
703 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
704 for (i = 0; i < hw->dcb_info.num_pg; i++) {
705 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
706 hw->dcb_info.pg_info[i].pg_id = i;
707 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
708 hw->dcb_info.pg_info[i].bw_limit = HNS3_ETHER_MAX_RATE;
713 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
714 for (k = 0; k < hw->dcb_info.num_tc; k++)
715 hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
718 /* All UPs mapping to TC0 */
719 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
720 hw->dcb_info.prio_tc[i] = 0;
722 /* Initializing tc information */
723 memset(hw->dcb_info.tc_info, 0,
724 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
725 for (i = 0; i < hw->dcb_info.num_tc; i++) {
726 hw->dcb_info.tc_info[i].tc_id = i;
727 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
728 hw->dcb_info.tc_info[i].pgid = 0;
729 hw->dcb_info.tc_info[i].bw_limit =
730 hw->dcb_info.pg_info[0].bw_limit;
737 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
739 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
740 struct hns3_pf *pf = &hns->pf;
743 /* Only being config on TC-Based scheduler mode */
744 if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
747 for (i = 0; i < hw->dcb_info.num_pg; i++) {
748 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
757 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
759 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
760 struct hns3_pf *pf = &hns->pf;
764 if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
765 for (i = 0; i < hw->dcb_info.num_tc; i++) {
766 ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
770 ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
781 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
785 ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
787 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
791 ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
793 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
799 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
801 struct hns3_pg_info *pg_info;
805 for (i = 0; i < hw->dcb_info.num_tc; i++) {
806 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
807 dwrr = pg_info->tc_dwrr[i];
809 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
812 "fail to send priority weight cmd: %d, ret = %d",
817 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
819 hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
829 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
831 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
832 struct hns3_pf *pf = &hns->pf;
836 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
839 ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
843 if (!hns3_dev_dcb_supported(hw))
846 ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
847 if (ret == -EOPNOTSUPP) {
848 version = hw->fw_version;
850 "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
851 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
852 HNS3_FW_VERSION_BYTE3_S),
853 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
854 HNS3_FW_VERSION_BYTE2_S),
855 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
856 HNS3_FW_VERSION_BYTE1_S),
857 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
858 HNS3_FW_VERSION_BYTE0_S));
866 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
868 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
869 struct hns3_pf *pf = &hns->pf;
873 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
877 for (i = 0; i < hw->dcb_info.num_pg; i++) {
879 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
888 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
892 ret = hns3_dcb_pg_dwrr_cfg(hw);
894 hns3_err(hw, "config pg_dwrr failed: %d", ret);
898 ret = hns3_dcb_pri_dwrr_cfg(hw);
900 hns3_err(hw, "config pri_dwrr failed: %d", ret);
906 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
910 ret = hns3_dcb_port_shaper_cfg(hw);
912 hns3_err(hw, "config port shaper failed: %d", ret);
916 ret = hns3_dcb_pg_shaper_cfg(hw);
918 hns3_err(hw, "config pg shaper failed: %d", ret);
922 return hns3_dcb_pri_shaper_cfg(hw);
926 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
928 struct hns3_nq_to_qs_link_cmd *map;
929 struct hns3_cmd_desc desc;
930 uint16_t tmp_qs_id = 0;
934 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
936 map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
938 map->nq_id = rte_cpu_to_le_16(q_id);
941 * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
942 * configure qset_id. So we need to convert qs_id to the follow
943 * format to support qset_id > 1024.
944 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
947 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
948 * | qs_id_h | vld | qs_id_l |
950 qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
952 qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
954 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
956 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
957 HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
958 map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
960 return hns3_cmd_send(hw, &desc, 1);
964 hns3_q_to_qs_map(struct hns3_hw *hw)
966 struct hns3_tc_queue_info *tc_queue;
971 for (i = 0; i < hw->num_tc; i++) {
972 tc_queue = &hw->tc_queue[i];
973 for (j = 0; j < tc_queue->tqp_count; j++) {
974 q_id = tc_queue->tqp_offset + j;
975 ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
985 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
987 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
988 struct hns3_pf *pf = &hns->pf;
992 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
995 /* Cfg qs -> pri mapping */
996 for (i = 0; i < hw->num_tc; i++) {
997 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
999 hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1004 /* Cfg q -> qs mapping */
1005 ret = hns3_q_to_qs_map(hw);
1007 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1013 hns3_dcb_map_cfg(struct hns3_hw *hw)
1017 ret = hns3_up_to_tc_map(hw);
1019 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1023 ret = hns3_pg_to_pri_map(hw);
1025 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1029 return hns3_pri_q_qs_cfg(hw);
1033 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1037 /* Cfg dcb mapping */
1038 ret = hns3_dcb_map_cfg(hw);
1042 /* Cfg dcb shaper */
1043 ret = hns3_dcb_shaper_cfg(hw);
1048 ret = hns3_dcb_dwrr_cfg(hw);
1052 /* Cfg schd mode for each level schd */
1053 return hns3_dcb_schd_mode_cfg(hw);
1057 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1058 uint8_t pause_trans_gap, uint16_t pause_trans_time)
1060 struct hns3_cfg_pause_param_cmd *pause_param;
1061 struct hns3_cmd_desc desc;
1063 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1065 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1067 memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1068 memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1069 pause_param->pause_trans_gap = pause_trans_gap;
1070 pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1072 return hns3_cmd_send(hw, &desc, 1);
1076 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1078 struct hns3_cfg_pause_param_cmd *pause_param;
1079 struct hns3_cmd_desc desc;
1080 uint16_t trans_time;
1084 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1086 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1088 ret = hns3_cmd_send(hw, &desc, 1);
1092 trans_gap = pause_param->pause_trans_gap;
1093 trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1095 return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1099 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1101 #define PAUSE_TIME_DIV_BY 2
1102 #define PAUSE_TIME_MIN_VALUE 0x4
1104 struct hns3_mac *mac = &hw->mac;
1105 uint8_t pause_trans_gap;
1108 * Pause transmit gap must be less than "pause_time / 2", otherwise
1109 * the behavior of MAC is undefined.
1111 if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1112 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1113 else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1114 pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1115 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1117 hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time);
1118 pause_time = PAUSE_TIME_MIN_VALUE;
1119 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1122 return hns3_pause_param_cfg(hw, mac->mac_addr,
1123 pause_trans_gap, pause_time);
1127 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1129 struct hns3_cmd_desc desc;
1131 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1133 desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1134 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1136 return hns3_cmd_send(hw, &desc, 1);
1140 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1142 struct hns3_cmd_desc desc;
1143 struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1145 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1147 pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1148 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1150 pfc->pri_en_bitmap = pfc_bitmap;
1152 return hns3_cmd_send(hw, &desc, 1);
1156 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1158 struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1159 struct hns3_cmd_desc desc;
1161 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1163 bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1165 bp_to_qs_map_cmd->tc_id = tc;
1166 bp_to_qs_map_cmd->qs_group_id = grp_id;
1167 bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1169 return hns3_cmd_send(hw, &desc, 1);
1173 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1175 switch (hw->current_mode) {
1180 case HNS3_FC_RX_PAUSE:
1184 case HNS3_FC_TX_PAUSE:
1200 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1204 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1205 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1211 return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1215 hns3_pfc_setup_hw(struct hns3_hw *hw)
1219 if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1220 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1226 return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1230 * Each Tc has a 1024 queue sets to backpress, it divides to
1231 * 32 group, each group contains 32 queue sets, which can be
1232 * represented by uint32_t bitmap.
1235 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1241 for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1242 uint8_t grp, sub_grp;
1245 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1246 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1247 HNS3_BP_SUB_GRP_ID_S);
1249 qs_bitmap |= (1 << sub_grp);
1251 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1260 hns3_dcb_bp_setup(struct hns3_hw *hw)
1264 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1265 ret = hns3_bp_setup_hw(hw, i);
1274 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1276 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1277 struct hns3_pf *pf = &hns->pf;
1280 ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1282 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1286 ret = hns3_mac_pause_setup_hw(hw);
1288 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1292 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1293 if (!hns3_dev_dcb_supported(hw))
1296 ret = hns3_pfc_setup_hw(hw);
1298 hns3_err(hw, "config pfc failed! ret = %d", ret);
1302 return hns3_dcb_bp_setup(hw);
1306 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1308 uint8_t pfc_map = 0;
1312 prio_tc = hw->dcb_info.prio_tc;
1313 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1314 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1315 if (prio_tc[j] == i && pfc_en & BIT(j)) {
1326 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1328 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1329 struct hns3_hw *hw = &hns->hw;
1334 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1335 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1336 if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1339 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1340 max_tc = dcb_rx_conf->dcb_tc[i];
1343 if (*tc != hw->dcb_info.num_tc)
1347 * We ensure that dcb information can be reconfigured
1348 * after the hns3_priority_flow_ctrl_set function called.
1350 if (hw->current_mode != HNS3_FC_FULL)
1352 pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1353 if (hw->dcb_info.pfc_en != pfc_en)
1358 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1360 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1361 struct hns3_pf *pf = &hns->pf;
1362 struct hns3_hw *hw = &hns->hw;
1363 uint8_t tc_bw, bw_rest;
1367 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1368 pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1369 pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1372 memset(hw->dcb_info.pg_info, 0,
1373 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1374 hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1375 hw->dcb_info.pg_info[0].pg_id = 0;
1376 hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1377 hw->dcb_info.pg_info[0].bw_limit = HNS3_ETHER_MAX_RATE;
1378 hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1380 /* Each tc has same bw for valid tc by default */
1381 tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1382 for (i = 0; i < hw->dcb_info.num_tc; i++)
1383 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1384 /* To ensure the sum of tc_dwrr is equal to 100 */
1385 bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1386 for (j = 0; j < bw_rest; j++)
1387 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1388 for (; i < dcb_rx_conf->nb_tcs; i++)
1389 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1391 /* All tcs map to pg0 */
1392 memset(hw->dcb_info.tc_info, 0,
1393 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1394 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1395 hw->dcb_info.tc_info[i].tc_id = i;
1396 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1397 hw->dcb_info.tc_info[i].pgid = 0;
1398 hw->dcb_info.tc_info[i].bw_limit =
1399 hw->dcb_info.pg_info[0].bw_limit;
1402 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1403 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1405 ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1406 hw->data->nb_tx_queues);
1408 hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1414 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1416 struct hns3_pf *pf = &hns->pf;
1417 struct hns3_hw *hw = &hns->hw;
1418 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1419 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1420 uint8_t bit_map = 0;
1423 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1424 hw->dcb_info.num_pg != 1)
1427 if (nb_rx_q < num_tc) {
1428 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
1433 if (nb_tx_q < num_tc) {
1434 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
1439 /* Currently not support uncontinuous tc */
1440 hw->dcb_info.num_tc = num_tc;
1441 for (i = 0; i < hw->dcb_info.num_tc; i++)
1446 hw->dcb_info.num_tc = 1;
1448 hw->hw_tc_map = bit_map;
1450 return hns3_dcb_info_cfg(hns);
1454 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1456 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1457 struct hns3_pf *pf = &hns->pf;
1458 struct hns3_hw *hw = &hns->hw;
1459 enum hns3_fc_status fc_status = hw->current_fc_status;
1460 enum hns3_fc_mode current_mode = hw->current_mode;
1461 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1464 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1465 pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1468 ret = hns3_dcb_schd_setup_hw(hw);
1470 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1474 if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1475 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1476 if (dcb_rx_conf->nb_tcs == 0)
1477 hw->dcb_info.pfc_en = 1; /* tc0 only */
1479 hw->dcb_info.pfc_en =
1480 RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1482 hw->dcb_info.hw_pfc_map =
1483 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1485 ret = hns3_buffer_alloc(hw);
1489 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1490 hw->current_mode = HNS3_FC_FULL;
1491 ret = hns3_dcb_pause_setup_hw(hw);
1493 hns3_err(hw, "setup pfc failed! ret = %d", ret);
1494 goto pfc_setup_fail;
1498 * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1499 * flag, the DCB information is configured, such as tc numbers.
1500 * Therefore, refreshing the allocation of packet buffer is
1503 ret = hns3_buffer_alloc(hw);
1511 hw->current_mode = current_mode;
1512 hw->current_fc_status = fc_status;
1513 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1514 status = hns3_buffer_alloc(hw);
1516 hns3_err(hw, "recover packet buffer fail! status = %d", status);
1522 * hns3_dcb_configure - setup dcb related config
1523 * @hns: pointer to hns3 adapter
1524 * Returns 0 on success, negative value on failure.
1527 hns3_dcb_configure(struct hns3_adapter *hns)
1529 struct hns3_hw *hw = &hns->hw;
1530 bool map_changed = false;
1534 hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1535 if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
1536 ret = hns3_dcb_info_update(hns, num_tc);
1538 hns3_err(hw, "dcb info update failed: %d", ret);
1542 ret = hns3_dcb_hw_configure(hns);
1544 hns3_err(hw, "dcb sw configure failed: %d", ret);
1553 hns3_dcb_init_hw(struct hns3_hw *hw)
1557 ret = hns3_dcb_schd_setup_hw(hw);
1559 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1563 ret = hns3_dcb_pause_setup_hw(hw);
1565 hns3_err(hw, "PAUSE setup failed: %d", ret);
1571 hns3_dcb_init(struct hns3_hw *hw)
1573 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1574 struct hns3_pf *pf = &hns->pf;
1575 uint16_t default_tqp_num;
1578 PMD_INIT_FUNC_TRACE();
1581 * According to the 'adapter_state' identifier, the following branch
1582 * is only executed to initialize default configurations of dcb during
1583 * the initializing driver process. Due to driver saving dcb-related
1584 * information before reset triggered, the reinit dev stage of the
1585 * reset process can not access to the branch, or those information
1588 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1589 hw->requested_mode = HNS3_FC_NONE;
1590 hw->current_mode = hw->requested_mode;
1591 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1592 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1594 ret = hns3_dcb_info_init(hw);
1596 hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1601 * The number of queues configured by default cannot exceed
1602 * the maximum number of queues for a single TC.
1604 default_tqp_num = RTE_MIN(hw->rss_size_max,
1605 hw->tqps_num / hw->dcb_info.num_tc);
1606 ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1610 "update tc queue mapping failed, ret = %d.",
1617 * DCB hardware will be configured by following the function during
1618 * the initializing driver process and the reset process. However,
1619 * driver will restore directly configurations of dcb hardware based
1620 * on dcb-related information soft maintained when driver
1621 * initialization has finished and reset is coming.
1623 ret = hns3_dcb_init_hw(hw);
1625 hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1633 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1635 struct hns3_hw *hw = &hns->hw;
1636 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1637 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1640 ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1642 hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1646 ret = hns3_q_to_qs_map(hw);
1648 hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1654 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1656 struct hns3_hw *hw = &hns->hw;
1657 enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1660 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1661 ret = hns3_dcb_configure(hns);
1663 hns3_err(hw, "Failed to config dcb: %d", ret);
1666 * Update queue map without PFC configuration,
1667 * due to queues reconfigured by user.
1669 ret = hns3_update_queue_map_configure(hns);
1672 "Failed to update queue mapping configure: %d",
1680 * hns3_dcb_pfc_enable - Enable priority flow control
1681 * @dev: pointer to ethernet device
1683 * Configures the pfc settings for one porority.
1686 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1688 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1689 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1690 enum hns3_fc_status fc_status = hw->current_fc_status;
1691 enum hns3_fc_mode current_mode = hw->current_mode;
1692 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1693 uint8_t pfc_en = hw->dcb_info.pfc_en;
1694 uint8_t priority = pfc_conf->priority;
1695 uint16_t pause_time = pf->pause_time;
1698 pf->pause_time = pfc_conf->fc.pause_time;
1699 hw->current_mode = hw->requested_mode;
1700 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1701 hw->dcb_info.pfc_en |= BIT(priority);
1702 hw->dcb_info.hw_pfc_map =
1703 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1704 ret = hns3_buffer_alloc(hw);
1706 goto pfc_setup_fail;
1709 * The flow control mode of all UPs will be changed based on
1710 * current_mode coming from user.
1712 ret = hns3_dcb_pause_setup_hw(hw);
1714 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1715 goto pfc_setup_fail;
1721 hw->current_mode = current_mode;
1722 hw->current_fc_status = fc_status;
1723 pf->pause_time = pause_time;
1724 hw->dcb_info.pfc_en = pfc_en;
1725 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1726 status = hns3_buffer_alloc(hw);
1728 hns3_err(hw, "recover packet buffer fail: %d", status);
1734 * hns3_fc_enable - Enable MAC pause
1735 * @dev: pointer to ethernet device
1737 * Configures the MAC pause settings.
1740 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1742 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1743 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1744 enum hns3_fc_status fc_status = hw->current_fc_status;
1745 enum hns3_fc_mode current_mode = hw->current_mode;
1746 uint16_t pause_time = pf->pause_time;
1749 pf->pause_time = fc_conf->pause_time;
1750 hw->current_mode = hw->requested_mode;
1753 * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1754 * of flow control is configured to be HNS3_FC_NONE.
1756 if (hw->current_mode == HNS3_FC_NONE)
1757 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1759 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1761 ret = hns3_dcb_pause_setup_hw(hw);
1763 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1770 hw->current_mode = current_mode;
1771 hw->current_fc_status = fc_status;
1772 pf->pause_time = pause_time;