static int hns3_query_dev_fec_info(struct hns3_hw *hw);
static int hns3_do_stop(struct hns3_adapter *hns);
static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds);
+static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable);
void hns3_ether_format_addr(char *buf, uint16_t size,
const struct rte_ether_addr *ether_addr)
hns3_clear_all_event_cause(struct hns3_hw *hw)
{
uint32_t vector0_int_stats;
- vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
+ vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
hns3_warn(hw, "Probe during IMP reset interrupt");
vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
+ hns3_clear_event_cause(hw, event_cause, clearval);
/* vector 0 interrupt is shared with reset and mailbox source events. */
if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x "
vector0_int, ras_int, cmdq_int);
}
- hns3_clear_event_cause(hw, event_cause, clearval);
/* Enable interrupt if it is not cause by reset */
hns3_pf_enable_irq0(hw);
}
ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
if (ret) {
- hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret);
+ hns3_err(hw, "%s strip rx vtag failed, ret = %d.",
+ enable ? "enable" : "disable", ret);
return ret;
}
static int
hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
struct hns3_mac_vlan_tbl_entry_cmd *req,
- struct hns3_cmd_desc *desc, bool is_mc)
+ struct hns3_cmd_desc *desc, uint8_t desc_num)
{
uint8_t resp_code;
uint16_t retval;
int ret;
+ int i;
- hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true);
- if (is_mc) {
- desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
- memcpy(desc[0].data, req,
- sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
- hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD,
- true);
- desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
- hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD,
+ if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) {
+ for (i = 0; i < desc_num - 1; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i],
+ HNS3_OPC_MAC_VLAN_ADD, true);
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ if (i == 0)
+ memcpy(desc[i].data, req,
+ sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
+ }
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD,
true);
- ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
} else {
+ hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD,
+ true);
memcpy(desc[0].data, req,
sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
- ret = hns3_cmd_send(hw, desc, 1);
}
+ ret = hns3_cmd_send(hw, desc, desc_num);
if (ret) {
hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
ret);
static int
hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
struct hns3_mac_vlan_tbl_entry_cmd *req,
- struct hns3_cmd_desc *mc_desc)
+ struct hns3_cmd_desc *desc, uint8_t desc_num)
{
uint8_t resp_code;
uint16_t retval;
int cfg_status;
int ret;
+ int i;
- if (mc_desc == NULL) {
- struct hns3_cmd_desc desc;
-
- hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false);
- memcpy(desc.data, req,
+ if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) {
+ hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false);
+ memcpy(desc->data, req,
sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
- ret = hns3_cmd_send(hw, &desc, 1);
- resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
- retval = rte_le_to_cpu_16(desc.retval);
+ ret = hns3_cmd_send(hw, desc, desc_num);
+ resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff;
+ retval = rte_le_to_cpu_16(desc->retval);
cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
HNS3_MAC_VLAN_ADD);
} else {
- hns3_cmd_reuse_desc(&mc_desc[0], false);
- mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
- hns3_cmd_reuse_desc(&mc_desc[1], false);
- mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
- hns3_cmd_reuse_desc(&mc_desc[2], false);
- mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
- memcpy(mc_desc[0].data, req,
+ for (i = 0; i < desc_num; i++) {
+ hns3_cmd_reuse_desc(&desc[i], false);
+ if (i == desc_num - 1)
+ desc[i].flag &=
+ rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
+ else
+ desc[i].flag |=
+ rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+ memcpy(desc[0].data, req,
sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
- mc_desc[0].retval = 0;
- ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
- resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff;
- retval = rte_le_to_cpu_16(mc_desc[0].retval);
+ desc[0].retval = 0;
+ ret = hns3_cmd_send(hw, desc, desc_num);
+ resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
+ retval = rte_le_to_cpu_16(desc[0].retval);
cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
HNS3_MAC_VLAN_ADD);
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
struct hns3_mac_vlan_tbl_entry_cmd req;
struct hns3_pf *pf = &hns->pf;
- struct hns3_cmd_desc desc[3];
+ struct hns3_cmd_desc desc;
char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
uint16_t egress_port = 0;
uint8_t vf_id;
* it if the entry is inexistent. Repeated unicast entry
* is not allowed in the mac vlan table.
*/
- ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false);
+ ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc,
+ HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
if (ret == -ENOENT) {
if (!hns3_is_umv_space_full(hw)) {
- ret = hns3_add_mac_vlan_tbl(hw, &req, NULL);
+ ret = hns3_add_mac_vlan_tbl(hw, &req, &desc,
+ HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
if (!ret)
hns3_update_umv_space(hw, false);
return ret;
static int
hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
- uint32_t idx, __rte_unused uint32_t pool)
+ __rte_unused uint32_t idx, __rte_unused uint32_t pool)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
return ret;
}
- if (idx == 0)
- hw->mac.default_addr_setted = true;
rte_spinlock_unlock(&hw->lock);
return ret;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_ether_addr *oaddr;
char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
- bool default_addr_setted;
- bool rm_succes = false;
int ret, ret_val;
- /*
- * It has been guaranteed that input parameter named mac_addr is valid
- * address in the rte layer of DPDK framework.
- */
+ rte_spinlock_lock(&hw->lock);
oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
- default_addr_setted = hw->mac.default_addr_setted;
- if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr))
- return 0;
+ ret = hns3_remove_uc_addr_common(hw, oaddr);
+ if (ret) {
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ oaddr);
+ hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
+ mac_str, ret);
- rte_spinlock_lock(&hw->lock);
- if (default_addr_setted) {
- ret = hns3_remove_uc_addr_common(hw, oaddr);
- if (ret) {
- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
- oaddr);
- hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
- mac_str, ret);
- rm_succes = false;
- } else
- rm_succes = true;
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
}
ret = hns3_add_uc_addr_common(hw, mac_addr);
rte_ether_addr_copy(mac_addr,
(struct rte_ether_addr *)hw->mac.mac_addr);
- hw->mac.default_addr_setted = true;
rte_spinlock_unlock(&hw->lock);
return 0;
}
err_add_uc_addr:
- if (rm_succes) {
- ret_val = hns3_add_uc_addr_common(hw, oaddr);
- if (ret_val) {
- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
- oaddr);
- hns3_warn(hw,
- "Failed to restore old uc mac addr(%s): %d",
+ ret_val = hns3_add_uc_addr_common(hw, oaddr);
+ if (ret_val) {
+ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr);
+ hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d",
mac_str, ret_val);
- hw->mac.default_addr_setted = false;
- }
}
rte_spinlock_unlock(&hw->lock);
static int
hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
{
+ struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM];
struct hns3_mac_vlan_tbl_entry_cmd req;
- struct hns3_cmd_desc desc[3];
char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
uint8_t vf_id;
int ret;
memset(&req, 0, sizeof(req));
hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
- ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
+ ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
+ HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
if (ret) {
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
*/
vf_id = HNS3_PF_FUNC_ID;
hns3_update_desc_vfid(desc, vf_id, false);
- ret = hns3_add_mac_vlan_tbl(hw, &req, desc);
+ ret = hns3_add_mac_vlan_tbl(hw, &req, desc,
+ HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
if (ret) {
if (ret == -ENOSPC)
hns3_err(hw, "mc mac vlan table is full");
memset(&req, 0, sizeof(req));
hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
- ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
+ ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
+ HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
if (ret == 0) {
/*
* This mac addr exist, remove this handle's VFID for it.
return 0;
}
-static int
-hns3_check_dcb_cfg(struct rte_eth_dev *dev)
-{
- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (!hns3_dev_dcb_supported(hw)) {
- hns3_err(hw, "this port does not support dcb configurations.");
- return -EOPNOTSUPP;
- }
-
- if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
- hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static int
hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en,
enum hns3_ring_type queue_type, uint16_t queue_id)
}
static int
-hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf)
+hns3_setup_dcb(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- uint32_t max_rx_pkt_len;
- uint16_t mtu;
int ret;
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME))
- return 0;
+ if (!hns3_dev_get_support(hw, DCB)) {
+ hns3_err(hw, "this port does not support dcb configurations.");
+ return -EOPNOTSUPP;
+ }
- /*
- * If jumbo frames are enabled, MTU needs to be refreshed
- * according to the maximum RX packet length.
- */
- max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
- if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
- max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
- hns3_err(hw, "maximum Rx packet length must be greater than %u "
- "and no more than %u when jumbo frame enabled.",
- (uint16_t)HNS3_DEFAULT_FRAME_LEN,
- (uint16_t)HNS3_MAX_FRAME_LEN);
- return -EINVAL;
+ if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
+ hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
+ return -EOPNOTSUPP;
}
- mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
- ret = hns3_dev_mtu_set(dev, mtu);
+ ret = hns3_dcb_configure(hns);
if (ret)
- return ret;
- dev->data->mtu = mtu;
+ hns3_err(hw, "failed to config dcb: %d", ret);
- return 0;
+ return ret;
}
static int
/*
* Some hardware doesn't support auto-negotiation, but users may not
* configure link_speeds (default 0), which means auto-negotiation.
- * In this case, a warning message need to be printed, instead of
- * an error.
+ * In this case, it should return success.
*/
if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
- hw->mac.support_autoneg == 0) {
- hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!");
+ hw->mac.support_autoneg == 0)
return 0;
- }
if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
ret = hns3_check_port_speed(hw, link_speeds);
* work as usual. But these fake queues are imperceptible, and can not
* be used by upper applications.
*/
- if (!hns3_dev_indep_txrx_supported(hw)) {
- ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
- if (ret) {
- hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.",
- ret);
- return ret;
- }
+ ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+ if (ret) {
+ hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
+ hw->cfg_max_queues = 0;
+ return ret;
}
hw->adapter_state = HNS3_NIC_CONFIGURING;
goto cfg_err;
if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
- ret = hns3_check_dcb_cfg(dev);
+ ret = hns3_setup_dcb(dev);
if (ret)
goto cfg_err;
}
goto cfg_err;
}
- ret = hns3_refresh_mtu(dev, conf);
- if (ret)
+ ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu);
+ if (ret != 0)
goto cfg_err;
ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
return 0;
cfg_err:
+ hw->cfg_max_queues = 0;
(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
hw->adapter_state = HNS3_NIC_INITIALIZED;
struct hns3_adapter *hns = dev->data->dev_private;
uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
struct hns3_hw *hw = &hns->hw;
- bool is_jumbo_frame;
int ret;
if (dev->data->dev_started) {
}
rte_spinlock_lock(&hw->lock);
- is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false;
frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
/*
return ret;
}
- if (is_jumbo_frame)
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
rte_spinlock_unlock(&hw->lock);
return 0;
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_TCP_LRO);
info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
- if (hns3_dev_outer_udp_cksum_supported(hw))
+ if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
- if (hns3_dev_indep_txrx_supported(hw))
+ if (hns3_dev_get_support(hw, INDEP_TXRX))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
- if (hns3_dev_ptp_supported(hw))
+ if (hns3_dev_get_support(hw, PTP))
info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
info->rx_desc_lim = (struct rte_eth_desc_lim) {
return rte_eth_linkstatus_set(eth_dev, &new_link);
}
+static int
+hns3_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ /*
+ * The "tx_pkt_burst" will be restored. But the secondary process does
+ * not support the mechanism for notifying the primary process.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ hns3_err(hw, "secondary process does not support to set link up.");
+ return -ENOTSUP;
+ }
+
+ /*
+ * If device isn't started Rx/Tx function is still disabled, setting
+ * link up is not allowed. But it is probably better to return success
+ * to reduce the impact on the upper layer.
+ */
+ if (hw->adapter_state != HNS3_NIC_STARTED) {
+ hns3_info(hw, "device isn't started, can't set link up.");
+ return 0;
+ }
+
+ if (!hw->set_link_down)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_cfg_mac_mode(hw, true);
+ if (ret) {
+ rte_spinlock_unlock(&hw->lock);
+ hns3_err(hw, "failed to set link up, ret = %d", ret);
+ return ret;
+ }
+
+ hw->set_link_down = false;
+ hns3_start_tx_datapath(dev);
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ /*
+ * The "tx_pkt_burst" will be set to dummy function. But the secondary
+ * process does not support the mechanism for notifying the primary
+ * process.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ hns3_err(hw, "secondary process does not support to set link down.");
+ return -ENOTSUP;
+ }
+
+ /*
+ * If device isn't started or the API has been called, link status is
+ * down, return success.
+ */
+ if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down)
+ return 0;
+
+ rte_spinlock_lock(&hw->lock);
+ hns3_stop_tx_datapath(dev);
+ ret = hns3_cfg_mac_mode(hw, false);
+ if (ret) {
+ hns3_start_tx_datapath(dev);
+ rte_spinlock_unlock(&hw->lock);
+ hns3_err(hw, "failed to set link down, ret = %d", ret);
+ return ret;
+ }
+
+ hw->set_link_down = true;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
static int
hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
{
ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]),
HNS3_CFG_EXT_RSS_SIZE_M,
HNS3_CFG_EXT_RSS_SIZE_S);
-
/*
* Field ext_rss_size_max obtained from firmware will be more flexible
* for future changes and expansions, which is an exponent of 2, instead
pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
hw->rss_info.ipv6_sctp_offload_supported = false;
hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
+ pf->support_multi_tc_pause = false;
return 0;
}
pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
hw->rss_info.ipv6_sctp_offload_supported = true;
hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
+ pf->support_multi_tc_pause = true;
return 0;
}
switch (media_type) {
case HNS3_MEDIA_TYPE_COPPER:
- if (!hns3_dev_copper_supported(hw)) {
+ if (!hns3_dev_get_support(hw, COPPER)) {
PMD_INIT_LOG(ERR,
"Media type is copper, not supported.");
ret = -EOPNOTSUPP;
hw->rss_dis_flag = false;
memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
hw->mac.phy_addr = cfg.phy_addr;
- hw->mac.default_addr_setted = false;
hw->num_tx_desc = cfg.tqp_desc_num;
hw->num_rx_desc = cfg.tqp_desc_num;
hw->dcb_info.num_pg = 1;
}
/* Dev does not support DCB */
- if (!hns3_dev_dcb_supported(hw)) {
+ if (!hns3_dev_get_support(hw, DCB)) {
pf->tc_max = 1;
pf->pfc_max = 0;
} else
tc_num = hns3_get_tc_num(hw);
aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
- if (hns3_dev_dcb_supported(hw))
+ if (hns3_dev_get_support(hw, DCB))
shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
pf->dv_buf_size;
else
shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
buf_alloc->s_buf.buf_size = shared_buf;
- if (hns3_dev_dcb_supported(hw)) {
+ if (hns3_dev_get_support(hw, DCB)) {
buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
- roundup(aligned_mps / HNS3_BUF_DIV_BY,
buf_alloc->s_buf.self.low = aligned_mps;
}
- if (hns3_dev_dcb_supported(hw)) {
+ if (hns3_dev_get_support(hw, DCB)) {
hi_thrd = shared_buf - pf->dv_buf_size;
if (tc_num <= NEED_RESERVE_TC_NUM)
for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
priv = &buf_alloc->priv_buf[i];
mask = BIT((uint8_t)i);
-
if (hw->hw_tc_map & mask &&
!(hw->dcb_info.hw_pfc_map & mask)) {
/* Clear the no pfc TC private buffer */
COMPENSATE_HALF_MPS_NUM * half_mps;
min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
-
if (rx_priv < min_rx_priv)
return false;
hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
{
/* When DCB is not supported, rx private buffer is not allocated. */
- if (!hns3_dev_dcb_supported(hw)) {
+ if (!hns3_dev_get_support(hw, DCB)) {
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
struct hns3_pf *pf = &hns->pf;
uint32_t rx_all = pf->pkt_buf_size;
return ret;
}
- if (hns3_dev_dcb_supported(hw)) {
+ if (hns3_dev_get_support(hw, DCB)) {
ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
if (ret) {
PMD_INIT_LOG(ERR,
goto err_cmd_init;
}
+ hns3_tx_push_init(eth_dev);
+
/*
* To ensure that the hardware environment is clean during
* initialization, the driver actively clear the hardware environment
hns3_rss_uninit(hns);
(void)hns3_config_gro(hw, false);
hns3_promisc_uninit(hw);
+ hns3_flow_uninit(eth_dev);
hns3_fdir_filter_uninit(hns);
hns3_uninit_umv_space(hw);
hns3_tqp_stats_uninit(hw);
/*
* Some hardware doesn't support auto-negotiation, but users may not
* configure link_speeds (default 0), which means auto-negotiation.
- * In this case, it should return success.
+ * In this case, a warning message need to be printed, instead of
+ * an error.
*/
- if (cfg->autoneg)
+ if (cfg->autoneg) {
+ hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!");
return 0;
+ }
return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex);
}
hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
{
struct hns3_hw *hw = &hns->hw;
+ bool link_en;
int ret;
- ret = hns3_dcb_cfg_update(hns);
- if (ret)
+ ret = hns3_update_queue_map_configure(hns);
+ if (ret) {
+ hns3_err(hw, "failed to update queue mapping configuration, ret = %d",
+ ret);
return ret;
+ }
- /*
- * The hns3_dcb_cfg_update may configure TM module, so
- * hns3_tm_conf_update must called later.
- */
+ /* Note: hns3_tm_conf_update must be called after configuring DCB. */
ret = hns3_tm_conf_update(hw);
if (ret) {
PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret);
return ret;
}
- ret = hns3_cfg_mac_mode(hw, true);
+ link_en = hw->set_link_down ? false : true;
+ ret = hns3_cfg_mac_mode(hw, link_en);
if (ret) {
PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret);
goto err_config_mac_mode;
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ bool old_state = hw->set_link_down;
int ret;
PMD_INIT_FUNC_TRACE();
rte_spinlock_lock(&hw->lock);
hw->adapter_state = HNS3_NIC_STARTING;
+ /*
+ * If the dev_set_link_down() API has been called, the "set_link_down"
+ * flag can be cleared by dev_start() API. In addition, the flag should
+ * also be cleared before calling hns3_do_start() so that MAC can be
+ * enabled in dev_start stage.
+ */
+ hw->set_link_down = false;
ret = hns3_do_start(hns, true);
- if (ret) {
- hw->adapter_state = HNS3_NIC_CONFIGURED;
- rte_spinlock_unlock(&hw->lock);
- return ret;
- }
+ if (ret)
+ goto do_start_fail;
+
ret = hns3_map_rx_interrupt(dev);
if (ret)
goto map_rx_inter_err;
hns3_stop_all_txqs(dev);
map_rx_inter_err:
(void)hns3_do_stop(hns);
+do_start_fail:
+ hw->set_link_down = old_state;
hw->adapter_state = HNS3_NIC_CONFIGURED;
rte_spinlock_unlock(&hw->lock);
return ret;
}
}
- hw->mac.default_addr_setted = false;
+
return 0;
}
/* Disable datapath on secondary process. */
hns3_mp_req_stop_rxtx(dev);
/* Prevent crashes when queues are still in use. */
- rte_delay_ms(hw->tqps_num);
+ rte_delay_ms(hw->cfg_max_queues);
rte_spinlock_lock(&hw->lock);
if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
struct hns3_hw *hw = &hns->hw;
int ret = 0;
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- rte_free(eth_dev->process_private);
- eth_dev->process_private = NULL;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- }
if (hw->adapter_state == HNS3_NIC_STARTED)
ret = hns3_dev_stop(eth_dev);
hns3_uninit_pf(eth_dev);
hns3_free_all_queues(eth_dev);
rte_free(hw->reset.wait_data);
- rte_free(eth_dev->process_private);
- eth_dev->process_private = NULL;
hns3_mp_uninit_primary();
hns3_warn(hw, "Close port %u finished", hw->data->port_id);
return 0;
}
-static void
-hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
-{
- switch (mode) {
- case RTE_FC_NONE:
- hw->requested_fc_mode = HNS3_FC_NONE;
- break;
- case RTE_FC_RX_PAUSE:
- hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
- break;
- case RTE_FC_TX_PAUSE:
- hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
- break;
- case RTE_FC_FULL:
- hw->requested_fc_mode = HNS3_FC_FULL;
- break;
- default:
- hw->requested_fc_mode = HNS3_FC_NONE;
- hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
- "configured to RTE_FC_NONE", mode);
- break;
- }
-}
-
static int
hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg)
{
hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int ret;
if (fc_conf->high_water || fc_conf->low_water ||
return -EOPNOTSUPP;
}
- if (hw->num_tc > 1) {
+ if (hw->num_tc > 1 && !pf->support_multi_tc_pause) {
hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported.");
return -EOPNOTSUPP;
}
- hns3_get_fc_mode(hw, fc_conf->mode);
-
rte_spinlock_lock(&hw->lock);
ret = hns3_fc_enable(dev, fc_conf);
rte_spinlock_unlock(&hw->lock);
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
- if (!hns3_dev_dcb_supported(hw)) {
+ if (!hns3_dev_get_support(hw, DCB)) {
hns3_err(hw, "This port does not support dcb configurations.");
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
- hns3_get_fc_mode(hw, pfc_conf->fc.mode);
-
rte_spinlock_lock(&hw->lock);
ret = hns3_dcb_pfc_enable(dev, pfc_conf);
rte_spinlock_unlock(&hw->lock);
hns3_check_event_cause(hns, NULL);
reset = hns3_get_reset_level(hns, &hw->reset.pending);
-
if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
hw->reset.level < reset) {
hns3_warn(hw, "High level reset %d is pending", reset);
rte_wmb();
/* Disable datapath on secondary process. */
hns3_mp_req_stop_rxtx(eth_dev);
- rte_delay_ms(hw->tqps_num);
+ rte_delay_ms(hw->cfg_max_queues);
rte_spinlock_lock(&hw->lock);
if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
.mac_addr_set = hns3_set_default_mac_addr,
.set_mc_addr_list = hns3_set_mc_mac_addr_list,
.link_update = hns3_dev_link_update,
+ .dev_set_link_up = hns3_dev_set_link_up,
+ .dev_set_link_down = hns3_dev_set_link_down,
.rss_hash_update = hns3_dev_rss_hash_update,
.rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
.reta_update = hns3_dev_rss_reta_update,
PMD_INIT_FUNC_TRACE();
- eth_dev->process_private = (struct hns3_process_private *)
- rte_zmalloc_socket("hns3_filter_list",
- sizeof(struct hns3_process_private),
- RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
- if (eth_dev->process_private == NULL) {
- PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
- return -ENOMEM;
- }
-
hns3_flow_init(eth_dev);
hns3_set_rxtx_function(eth_dev);
"process, ret = %d", ret);
goto err_mp_init_secondary;
}
-
hw->secondary_cnt++;
+ hns3_tx_push_init(eth_dev);
return 0;
}
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
eth_dev->tx_descriptor_status = NULL;
- rte_free(eth_dev->process_private);
- eth_dev->process_private = NULL;
return ret;
}
PMD_INIT_FUNC_TRACE();
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- rte_free(eth_dev->process_private);
- eth_dev->process_private = NULL;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- }
if (hw->adapter_state < HNS3_NIC_CLOSING)
hns3_dev_close(eth_dev);