/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2019 Hisilicon Limited.
+ * Copyright(c) 2018-2021 HiSilicon Limited.
*/
#include <rte_io.h>
return shapping_para;
}
-int
-hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
+static int
+hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed)
{
struct hns3_port_shapping_cmd *shap_cfg_cmd;
struct hns3_shaper_parameter shaper_parameter;
struct hns3_cmd_desc desc;
int ret;
- ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
+ ret = hns3_shaper_para_calc(hw, speed,
HNS3_SHAPER_LVL_PORT, &shaper_parameter);
if (ret) {
hns3_err(hw, "calculate shaper parameter failed: %d", ret);
* depends on the firmware version. But driver still needs to
* calculate it and configure to firmware for better compatibility.
*/
- shap_cfg_cmd->port_rate = rte_cpu_to_le_32(hw->mac.link_speed);
+ shap_cfg_cmd->port_rate = rte_cpu_to_le_32(speed);
hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
return hns3_cmd_send(hw, &desc, 1);
}
+int
+hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed)
+{
+ int ret;
+
+ ret = hns3_dcb_port_shaper_cfg(hw, speed);
+ if (ret)
+ hns3_err(hw, "configure port shappering failed: ret = %d", ret);
+
+ return ret;
+}
+
static int
hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
return hns3_cmd_send(hw, &desc, 1);
}
-int
+static int
hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
{
struct hns3_shaper_parameter shaper_parameter;
return hns3_cmd_send(hw, &desc, 1);
}
-int
+static int
hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
{
struct hns3_shaper_parameter shaper_parameter;
* stage of the reset process.
*/
if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
- for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hw->rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
}
{
int ret;
+ if (nb_rx_q < hw->num_tc) {
+ hns3_err(hw, "number of Rx queues(%u) is less than number of TC(%u).",
+ nb_rx_q, hw->num_tc);
+ return -EINVAL;
+ }
+
+ if (nb_tx_q < hw->num_tc) {
+ hns3_err(hw, "number of Tx queues(%u) is less than number of TC(%u).",
+ nb_tx_q, hw->num_tc);
+ return -EINVAL;
+ }
+
ret = hns3_set_rss_size(hw, nb_rx_q);
if (ret)
return ret;
if (ret)
return ret;
- if (!hns3_dev_dcb_supported(hw))
+ if (!hns3_dev_get_support(hw, DCB))
return 0;
ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
{
int ret;
- ret = hns3_dcb_port_shaper_cfg(hw);
+ ret = hns3_dcb_port_shaper_cfg(hw, hw->mac.link_speed);
if (ret) {
hns3_err(hw, "config port shaper failed: %d", ret);
return ret;
static void
hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
{
- switch (hw->current_mode) {
+ switch (hw->requested_fc_mode) {
case HNS3_FC_NONE:
*tx_en = false;
*rx_en = false;
}
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
- if (!hns3_dev_dcb_supported(hw))
+ if (!hns3_dev_get_support(hw, DCB))
return 0;
ret = hns3_pfc_setup_hw(hw);
return pfc_map;
}
-static void
-hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
+static uint8_t
+hns3_dcb_parse_num_tc(struct hns3_adapter *hns)
{
struct rte_eth_dcb_rx_conf *dcb_rx_conf;
struct hns3_hw *hw = &hns->hw;
- uint16_t nb_rx_q = hw->data->nb_rx_queues;
- uint16_t nb_tx_q = hw->data->nb_tx_queues;
- uint8_t max_tc = 0;
- uint8_t pfc_en;
+ uint8_t max_tc_id = 0;
int i;
dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
- if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
- *changed = true;
-
- if (dcb_rx_conf->dcb_tc[i] > max_tc)
- max_tc = dcb_rx_conf->dcb_tc[i];
+ if (dcb_rx_conf->dcb_tc[i] > max_tc_id)
+ max_tc_id = dcb_rx_conf->dcb_tc[i];
}
- *tc = max_tc + 1;
- if (*tc != hw->dcb_info.num_tc)
- *changed = true;
- /*
- * We ensure that dcb information can be reconfigured
- * after the hns3_priority_flow_ctrl_set function called.
- */
- if (hw->current_mode != HNS3_FC_FULL)
- *changed = true;
- pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
- if (hw->dcb_info.pfc_en != pfc_en)
- *changed = true;
-
- /* tx/rx queue number is reconfigured. */
- if (nb_rx_q != hw->used_rx_queues || nb_tx_q != hw->used_tx_queues)
- *changed = true;
+ /* Number of TC is equal to max_tc_id plus 1. */
+ return max_tc_id + 1;
}
static int
struct hns3_pf *pf = &hns->pf;
struct hns3_hw *hw = &hns->hw;
enum hns3_fc_status fc_status = hw->current_fc_status;
- enum hns3_fc_mode current_mode = hw->current_mode;
+ enum hns3_fc_mode requested_fc_mode = hw->requested_fc_mode;
uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
- int ret, status;
+ uint8_t pfc_en = hw->dcb_info.pfc_en;
+ int ret;
if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
return ret;
}
- if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
if (dcb_rx_conf->nb_tcs == 0)
hw->dcb_info.pfc_en = 1; /* tc0 only */
hw->dcb_info.hw_pfc_map =
hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
- ret = hns3_buffer_alloc(hw);
- if (ret)
- return ret;
-
hw->current_fc_status = HNS3_FC_STATUS_PFC;
- hw->current_mode = HNS3_FC_FULL;
- ret = hns3_dcb_pause_setup_hw(hw);
- if (ret) {
- hns3_err(hw, "setup pfc failed! ret = %d", ret);
- goto pfc_setup_fail;
- }
+ hw->requested_fc_mode = HNS3_FC_FULL;
} else {
- /*
- * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
- * flag, the DCB information is configured, such as tc numbers.
- * Therefore, refreshing the allocation of packet buffer is
- * necessary.
- */
- ret = hns3_buffer_alloc(hw);
- if (ret)
- return ret;
+ hw->current_fc_status = HNS3_FC_STATUS_NONE;
+ hw->requested_fc_mode = HNS3_FC_NONE;
+ hw->dcb_info.pfc_en = 0;
+ hw->dcb_info.hw_pfc_map = 0;
+ }
+
+ ret = hns3_buffer_alloc(hw);
+ if (ret)
+ goto cfg_fail;
+
+ ret = hns3_dcb_pause_setup_hw(hw);
+ if (ret) {
+ hns3_err(hw, "setup pfc failed! ret = %d", ret);
+ goto cfg_fail;
}
return 0;
-pfc_setup_fail:
- hw->current_mode = current_mode;
+cfg_fail:
+ hw->requested_fc_mode = requested_fc_mode;
hw->current_fc_status = fc_status;
+ hw->dcb_info.pfc_en = pfc_en;
hw->dcb_info.hw_pfc_map = hw_pfc_map;
- status = hns3_buffer_alloc(hw);
- if (status)
- hns3_err(hw, "recover packet buffer fail! status = %d", status);
return ret;
}
hns3_dcb_configure(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
- bool map_changed = false;
- uint8_t num_tc = 0;
+ uint8_t num_tc;
int ret;
- hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
- if (map_changed ||
- __atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
- ret = hns3_dcb_info_update(hns, num_tc);
- if (ret) {
- hns3_err(hw, "dcb info update failed: %d", ret);
- return ret;
- }
+ num_tc = hns3_dcb_parse_num_tc(hns);
+ ret = hns3_dcb_info_update(hns, num_tc);
+ if (ret) {
+ hns3_err(hw, "dcb info update failed: %d", ret);
+ return ret;
+ }
- ret = hns3_dcb_hw_configure(hns);
- if (ret) {
- hns3_err(hw, "dcb sw configure failed: %d", ret);
- return ret;
- }
+ ret = hns3_dcb_hw_configure(hns);
+ if (ret) {
+ hns3_err(hw, "dcb sw configure failed: %d", ret);
+ return ret;
}
return 0;
* will be changed.
*/
if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
- hw->requested_mode = HNS3_FC_NONE;
- hw->current_mode = hw->requested_mode;
+ hw->requested_fc_mode = HNS3_FC_NONE;
pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
hw->current_fc_status = HNS3_FC_STATUS_NONE;
return 0;
}
-static int
+int
hns3_update_queue_map_configure(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
+ enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
uint16_t nb_rx_q = hw->data->nb_rx_queues;
uint16_t nb_tx_q = hw->data->nb_tx_queues;
int ret;
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
+ return 0;
+
ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
if (ret) {
hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
return ret;
}
-int
-hns3_dcb_cfg_update(struct hns3_adapter *hns)
+static void
+hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
{
- struct hns3_hw *hw = &hns->hw;
- enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
- int ret;
-
- if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
- ret = hns3_dcb_configure(hns);
- if (ret)
- hns3_err(hw, "Failed to config dcb: %d", ret);
- } else {
- /*
- * Update queue map without PFC configuration,
- * due to queues reconfigured by user.
- */
- ret = hns3_update_queue_map_configure(hns);
- if (ret)
- hns3_err(hw,
- "Failed to update queue mapping configure: %d",
- ret);
+ switch (mode) {
+ case RTE_ETH_FC_NONE:
+ hw->requested_fc_mode = HNS3_FC_NONE;
+ break;
+ case RTE_ETH_FC_RX_PAUSE:
+ hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
+ break;
+ case RTE_ETH_FC_TX_PAUSE:
+ hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
+ break;
+ case RTE_ETH_FC_FULL:
+ hw->requested_fc_mode = HNS3_FC_FULL;
+ break;
+ default:
+ hw->requested_fc_mode = HNS3_FC_NONE;
+ hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
+ "configured to RTE_ETH_FC_NONE", mode);
+ break;
}
-
- return ret;
}
/*
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
enum hns3_fc_status fc_status = hw->current_fc_status;
- enum hns3_fc_mode current_mode = hw->current_mode;
+ enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
uint8_t pfc_en = hw->dcb_info.pfc_en;
uint8_t priority = pfc_conf->priority;
uint16_t pause_time = pf->pause_time;
- int ret, status;
+ int ret;
- pf->pause_time = pfc_conf->fc.pause_time;
- hw->current_mode = hw->requested_mode;
- hw->current_fc_status = HNS3_FC_STATUS_PFC;
hw->dcb_info.pfc_en |= BIT(priority);
hw->dcb_info.hw_pfc_map =
hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
ret = hns3_buffer_alloc(hw);
- if (ret)
- goto pfc_setup_fail;
+ if (ret) {
+ hns3_err(hw, "update packet buffer failed, ret = %d", ret);
+ goto buffer_alloc_fail;
+ }
+
+ pf->pause_time = pfc_conf->fc.pause_time;
+ hns3_get_fc_mode(hw, pfc_conf->fc.mode);
+ if (hw->requested_fc_mode == HNS3_FC_NONE)
+ hw->current_fc_status = HNS3_FC_STATUS_NONE;
+ else
+ hw->current_fc_status = HNS3_FC_STATUS_PFC;
/*
* The flow control mode of all UPs will be changed based on
- * current_mode coming from user.
+ * requested_fc_mode coming from user.
*/
ret = hns3_dcb_pause_setup_hw(hw);
if (ret) {
return 0;
pfc_setup_fail:
- hw->current_mode = current_mode;
+ hw->requested_fc_mode = old_fc_mode;
hw->current_fc_status = fc_status;
pf->pause_time = pause_time;
+buffer_alloc_fail:
hw->dcb_info.pfc_en = pfc_en;
hw->dcb_info.hw_pfc_map = hw_pfc_map;
- status = hns3_buffer_alloc(hw);
- if (status)
- hns3_err(hw, "recover packet buffer fail: %d", status);
return ret;
}
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
enum hns3_fc_status fc_status = hw->current_fc_status;
- enum hns3_fc_mode current_mode = hw->current_mode;
uint16_t pause_time = pf->pause_time;
int ret;
pf->pause_time = fc_conf->pause_time;
- hw->current_mode = hw->requested_mode;
+ hns3_get_fc_mode(hw, fc_conf->mode);
/*
* In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
* of flow control is configured to be HNS3_FC_NONE.
*/
- if (hw->current_mode == HNS3_FC_NONE)
+ if (hw->requested_fc_mode == HNS3_FC_NONE)
hw->current_fc_status = HNS3_FC_STATUS_NONE;
else
hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
return 0;
setup_fc_fail:
- hw->current_mode = current_mode;
+ hw->requested_fc_mode = old_fc_mode;
hw->current_fc_status = fc_status;
pf->pause_time = pause_time;