X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev.c;h=9cbcc13de8dbdd651f4905bcd694aa9e45b22b29;hb=46413898cf1df5798b8d1ea04522da0f0ce97205;hp=bf633a3d713a9dc2d92a4d1a11c914b4445501c3;hpb=3ec35c64be8f93619a04e725bf4af9ecb5329ef0;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index bf633a3d71..9cbcc13de8 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -4,8 +4,7 @@ #include #include -#include -#include +#include #include #include "hns3_ethdev.h" @@ -20,6 +19,7 @@ #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ +#define HNS3_SERVICE_QUICK_INTERVAL 10 #define HNS3_INVALID_PVID 0xFFFF #define HNS3_FILTER_TYPE_VF 0 @@ -93,7 +93,8 @@ static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on); -static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); +static int hns3_update_link_info(struct rte_eth_dev *eth_dev); +static bool hns3_update_link_status(struct hns3_hw *hw); static int hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr); @@ -101,6 +102,7 @@ static int hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr); static int hns3_restore_fec(struct hns3_hw *hw); static int hns3_query_dev_fec_info(struct hns3_hw *hw); +static int hns3_do_stop(struct hns3_adapter *hns); void hns3_ether_format_addr(char *buf, uint16_t size, const struct rte_ether_addr *ether_addr) @@ -123,6 +125,47 @@ hns3_pf_enable_irq0(struct hns3_hw *hw) hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); } +static enum hns3_evt_cause +hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, + uint32_t *vec_val) +{ + struct hns3_hw *hw = &hns->hw; + + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); + *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); + if (!is_delay) { + hw->reset.stats.imp_cnt++; + hns3_warn(hw, "IMP reset detected, clear reset status"); + } else { + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "IMP reset detected, don't clear reset status"); + } + + return HNS3_VECTOR0_EVENT_RST; +} + +static enum hns3_evt_cause +hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, + uint32_t *vec_val) +{ + struct hns3_hw *hw = &hns->hw; + + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); + *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); + if (!is_delay) { + hw->reset.stats.global_cnt++; + hns3_warn(hw, "Global reset detected, clear reset status"); + } else { + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, + "Global reset detected, don't clear reset status"); + } + + return HNS3_VECTOR0_EVENT_RST; +} + static enum hns3_evt_cause hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) { @@ -132,12 +175,14 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) uint32_t hw_err_src_reg; uint32_t val; enum hns3_evt_cause ret; + bool is_delay; /* fetch the events from their corresponding regs */ vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); + is_delay = clearval == NULL ? true : false; /* * Assumption: If by any chance reset and mailbox events are reported * together then we will only process reset event and defer the @@ -146,35 +191,13 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) * from H/W just for the mailbox. */ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ - rte_atomic16_set(&hw->reset.disable_cmd, 1); - hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); - val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); - if (clearval) { - hw->reset.stats.imp_cnt++; - hns3_warn(hw, "IMP reset detected, clear reset status"); - } else { - hns3_schedule_delayed_reset(hns); - hns3_warn(hw, "IMP reset detected, don't clear reset status"); - } - - ret = HNS3_VECTOR0_EVENT_RST; + ret = hns3_proc_imp_reset_event(hns, is_delay, &val); goto out; } /* Global reset */ if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { - rte_atomic16_set(&hw->reset.disable_cmd, 1); - hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); - val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); - if (clearval) { - hw->reset.stats.global_cnt++; - hns3_warn(hw, "Global reset detected, clear reset status"); - } else { - hns3_schedule_delayed_reset(hns); - hns3_warn(hw, "Global reset detected, don't clear reset status"); - } - - ret = HNS3_VECTOR0_EVENT_RST; + ret = hns3_proc_global_reset_event(hns, is_delay, &val); goto out; } @@ -194,9 +217,6 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) goto out; } - if (clearval && (vector0_int_stats || cmdq_src_val || hw_err_src_reg)) - hns3_warn(hw, "vector0_int_stats:0x%x cmdq_src_val:0x%x hw_err_src_reg:0x%x", - vector0_int_stats, cmdq_src_val, hw_err_src_reg); val = vector0_int_stats; ret = HNS3_VECTOR0_EVENT_OTHER; out: @@ -234,6 +254,34 @@ hns3_clear_all_event_cause(struct hns3_hw *hw) hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); } +static void +hns3_handle_mac_tnl(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc; + uint32_t status; + int ret; + + /* query and clear mac tnl interruptions */ + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); + return; + } + + status = rte_le_to_cpu_32(desc.data[0]); + if (status) { + hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, + false); + desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "failed to clear mac tnl int, ret = %d.", + ret); + } +} + static void hns3_interrupt_handler(void *param) { @@ -242,24 +290,36 @@ hns3_interrupt_handler(void *param) struct hns3_hw *hw = &hns->hw; enum hns3_evt_cause event_cause; uint32_t clearval = 0; + uint32_t vector0_int; + uint32_t ras_int; + uint32_t cmdq_int; /* Disable interrupt */ hns3_pf_disable_irq0(hw); event_cause = hns3_check_event_cause(hns, &clearval); + vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); + ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); + cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); /* vector 0 interrupt is shared with reset and mailbox source events. */ if (event_cause == HNS3_VECTOR0_EVENT_ERR) { - hns3_warn(hw, "Received err interrupt"); + hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " + "ras_int_stat:0x%x cmdq_int_stat:0x%x", + vector0_int, ras_int, cmdq_int); hns3_handle_msix_error(hns, &hw->reset.request); hns3_handle_ras_error(hns, &hw->reset.request); + hns3_handle_mac_tnl(hw); hns3_schedule_reset(hns); } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { - hns3_warn(hw, "Received reset interrupt"); + hns3_warn(hw, "received reset interrupt"); hns3_schedule_reset(hns); - } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) + } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { hns3_dev_handle_mbx_msg(hw); - else - hns3_err(hw, "Received unknown event"); + } else { + hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " + "ras_int_stat:0x%x cmdq_int_stat:0x%x", + vector0_int, ras_int, cmdq_int); + } hns3_clear_event_cause(hw, event_cause, clearval); /* Enable interrupt if it is not cause by reset */ @@ -1017,7 +1077,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns) * ensure that the hardware configuration remains unchanged before and * after reset. */ - if (rte_atomic16_read(&hw->reset.resetting) == 0) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; } @@ -1041,7 +1101,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns) * we will restore configurations to hardware in hns3_restore_vlan_table * and hns3_restore_vlan_conf later. */ - if (rte_atomic16_read(&hw->reset.resetting) == 0) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); if (ret) { hns3_err(hw, "pvid set fail in pf, ret =%d", ret); @@ -2212,7 +2272,7 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev) } static int -hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, +hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, enum hns3_ring_type queue_type, uint16_t queue_id) { struct hns3_cmd_desc desc; @@ -2221,13 +2281,15 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, enum hns3_cmd_status status; enum hns3_opcode_type op; uint16_t tqp_type_and_id = 0; - const char *op_str; uint16_t type; uint16_t gl; - op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; + op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; hns3_cmd_setup_basic_desc(&desc, op, false); - req->int_vector_id = vector_id; + req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, + HNS3_TQP_INT_ID_L_S); + req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, + HNS3_TQP_INT_ID_H_S); if (queue_type == HNS3_RING_TYPE_RX) gl = HNS3_RING_GL_RX; @@ -2243,11 +2305,10 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, gl); req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); req->int_cause_num = 1; - op_str = mmap ? "Map" : "Unmap"; status = hns3_cmd_send(hw, &desc, 1); if (status) { hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.", - op_str, queue_id, req->int_vector_id, status); + en ? "Map" : "Unmap", queue_id, vector_id, status); return status; } @@ -2317,10 +2378,10 @@ hns3_dev_configure(struct rte_eth_dev *dev) struct rte_eth_conf *conf = &dev->data->dev_conf; enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; struct hns3_hw *hw = &hns->hw; - struct hns3_rss_conf *rss_cfg = &hw->rss_info; uint16_t nb_rx_q = dev->data->nb_rx_queues; uint16_t nb_tx_q = dev->data->nb_tx_queues; struct rte_eth_rss_conf rss_conf; + uint32_t max_rx_pkt_len; uint16_t mtu; bool gro_en; int ret; @@ -2364,11 +2425,6 @@ hns3_dev_configure(struct rte_eth_dev *dev) conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; rss_conf = conf->rx_adv_conf.rss_conf; hw->rss_dis_flag = false; - if (rss_conf.rss_key == NULL) { - rss_conf.rss_key = rss_cfg->key; - rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE; - } - ret = hns3_dev_rss_hash_update(dev, &rss_conf); if (ret) goto cfg_err; @@ -2379,12 +2435,18 @@ hns3_dev_configure(struct rte_eth_dev *dev) * according to the maximum RX packet length. */ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - /* - * Security of max_rx_pkt_len is guaranteed in dpdk frame. - * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it - * can safely assign to "uint16_t" type variable. - */ - mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); + max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; + if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || + max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { + hns3_err(hw, "maximum Rx packet length must be greater " + "than %u and less than %u when jumbo frame enabled.", + (uint16_t)HNS3_DEFAULT_FRAME_LEN, + (uint16_t)HNS3_MAX_FRAME_LEN); + ret = -EINVAL; + goto cfg_err; + } + + mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); ret = hns3_dev_mtu_set(dev, mtu); if (ret) goto cfg_err; @@ -2436,17 +2498,33 @@ hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) static int hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) { + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + uint16_t original_mps = hns->pf.mps; + int err; int ret; ret = hns3_set_mac_mtu(hw, mps); if (ret) { - hns3_err(hw, "Failed to set mtu, ret = %d", ret); + hns3_err(hw, "failed to set mtu, ret = %d", ret); return ret; } + hns->pf.mps = mps; ret = hns3_buffer_alloc(hw); - if (ret) - hns3_err(hw, "Failed to allocate buffer, ret = %d", ret); + if (ret) { + hns3_err(hw, "failed to allocate buffer, ret = %d", ret); + goto rollback; + } + + return 0; + +rollback: + err = hns3_set_mac_mtu(hw, original_mps); + if (err) { + hns3_err(hw, "fail to rollback MTU, err = %d", err); + return ret; + } + hns->pf.mps = original_mps; return ret; } @@ -2481,7 +2559,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) dev->data->port_id, mtu, ret); return ret; } - hns->pf.mps = (uint16_t)frame_size; + if (is_jumbo_frame) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -2494,7 +2572,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return 0; } -static int +int hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) { struct hns3_adapter *hns = eth_dev->data->dev_private; @@ -2576,7 +2654,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) info->vmdq_queue_num = 0; - info->reta_size = HNS3_RSS_IND_TBL_SIZE; + info->reta_size = hw->rss_ind_tbl_size; info->hash_key_size = HNS3_RSS_KEY_SIZE; info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; @@ -2625,8 +2703,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, struct rte_eth_link new_link; if (!hns3_is_reset_pending(hns)) { - hns3_update_speed_duplex(eth_dev); hns3_update_link_status(hw); + hns3_update_link_info(eth_dev); } memset(&new_link, 0, sizeof(new_link)); @@ -2966,6 +3044,20 @@ hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); } +static int +hns3_check_dev_specifications(struct hns3_hw *hw) +{ + if (hw->rss_ind_tbl_size == 0 || + hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { + hns3_err(hw, "the size of hash lookup table configured (%u)" + " exceeds the maximum(%u)", hw->rss_ind_tbl_size, + HNS3_RSS_IND_TBL_SIZE_MAX); + return -EINVAL; + } + + return 0; +} + static int hns3_query_dev_specifications(struct hns3_hw *hw) { @@ -2986,7 +3078,7 @@ hns3_query_dev_specifications(struct hns3_hw *hw) hns3_parse_dev_specifications(hw, desc); - return 0; + return hns3_check_dev_specifications(hw); } static int @@ -3051,6 +3143,37 @@ hns3_get_capability(struct hns3_hw *hw) return 0; } +static int +hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) +{ + int ret; + + switch (media_type) { + case HNS3_MEDIA_TYPE_COPPER: + if (!hns3_dev_copper_supported(hw)) { + PMD_INIT_LOG(ERR, + "Media type is copper, not supported."); + ret = -EOPNOTSUPP; + } else { + ret = 0; + } + break; + case HNS3_MEDIA_TYPE_FIBER: + ret = 0; + break; + case HNS3_MEDIA_TYPE_BACKPLANE: + PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); + ret = -EOPNOTSUPP; + break; + default: + PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); + ret = -EINVAL; + break; + } + + return ret; +} + static int hns3_get_board_configuration(struct hns3_hw *hw) { @@ -3065,11 +3188,9 @@ hns3_get_board_configuration(struct hns3_hw *hw) return ret; } - if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER && - !hns3_dev_copper_supported(hw)) { - PMD_INIT_LOG(ERR, "media type is copper, not supported."); - return -EOPNOTSUPP; - } + ret = hns3_check_media_type(hw, cfg.media_type); + if (ret) + return ret; hw->mac.media_type = cfg.media_type; hw->rss_size_max = cfg.rss_size_max; @@ -3901,6 +4022,28 @@ hns3_buffer_alloc(struct hns3_hw *hw) return ret; } +static int +hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) +{ + struct hns3_firmware_compat_cmd *req; + struct hns3_cmd_desc desc; + uint32_t compat = 0; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false); + req = (struct hns3_firmware_compat_cmd *)desc.data; + + if (is_init) { + hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1); + hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0); + if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) + hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1); + } + + req->compat = rte_cpu_to_le_32(compat); + + return hns3_cmd_send(hw, &desc, 1); +} + static int hns3_mac_init(struct hns3_hw *hw) { @@ -4326,7 +4469,6 @@ static int hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) { struct hns3_mac *mac = &hw->mac; - uint32_t cur_speed = mac->link_speed; int ret; duplex = hns3_check_speed_dup(duplex, speed); @@ -4337,25 +4479,20 @@ hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) if (ret) return ret; - mac->link_speed = speed; - ret = hns3_dcb_port_shaper_cfg(hw); - if (ret) { - hns3_err(hw, "failed to configure port shaper, ret = %d.", ret); - mac->link_speed = cur_speed; + ret = hns3_port_shaper_update(hw, speed); + if (ret) return ret; - } + mac->link_speed = speed; mac->link_duplex = duplex; return 0; } static int -hns3_update_speed_duplex(struct rte_eth_dev *eth_dev) +hns3_update_fiber_link_info(struct hns3_hw *hw) { - struct hns3_adapter *hns = eth_dev->data->dev_private; - struct hns3_hw *hw = &hns->hw; - struct hns3_pf *pf = &hns->pf; + struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); uint32_t speed; int ret; @@ -4377,6 +4514,93 @@ hns3_update_speed_duplex(struct rte_eth_dev *eth_dev) return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX); } +static void +hns3_parse_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) +{ + struct hns3_phy_params_bd0_cmd *req; + + req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; + mac->link_speed = rte_le_to_cpu_32(req->speed); + mac->link_duplex = hns3_get_bit(req->duplex, + HNS3_PHY_DUPLEX_CFG_B); + mac->link_autoneg = hns3_get_bit(req->autoneg, + HNS3_PHY_AUTONEG_CFG_B); + mac->supported_capa = rte_le_to_cpu_32(req->supported); + mac->advertising = rte_le_to_cpu_32(req->advertising); + mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); + mac->support_autoneg = !!(mac->supported_capa & + HNS3_PHY_LINK_MODE_AUTONEG_BIT); +} + +static int +hns3_get_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) +{ + struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; + uint16_t i; + int ret; + + for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, + true); + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + } + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); + + ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); + if (ret) { + hns3_err(hw, "get phy parameters failed, ret = %d.", ret); + return ret; + } + + hns3_parse_phy_params(desc, mac); + + return 0; +} + +static int +hns3_update_phy_link_info(struct hns3_hw *hw) +{ + struct hns3_mac *mac = &hw->mac; + struct hns3_mac mac_info; + int ret; + + memset(&mac_info, 0, sizeof(struct hns3_mac)); + ret = hns3_get_phy_params(hw, &mac_info); + if (ret) + return ret; + + if (mac_info.link_speed != mac->link_speed) { + ret = hns3_port_shaper_update(hw, mac_info.link_speed); + if (ret) + return ret; + } + + mac->link_speed = mac_info.link_speed; + mac->link_duplex = mac_info.link_duplex; + mac->link_autoneg = mac_info.link_autoneg; + mac->supported_capa = mac_info.supported_capa; + mac->advertising = mac_info.advertising; + mac->lp_advertising = mac_info.lp_advertising; + mac->support_autoneg = mac_info.support_autoneg; + + return 0; +} + +static int +hns3_update_link_info(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret = 0; + + if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) + ret = hns3_update_phy_link_info(hw); + else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) + ret = hns3_update_fiber_link_info(hw); + + return ret; +} + static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) { @@ -4443,7 +4667,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw) return !!link_status; } -void +static bool hns3_update_link_status(struct hns3_hw *hw) { int state; @@ -4452,7 +4676,38 @@ hns3_update_link_status(struct hns3_hw *hw) if (state != hw->mac.link_status) { hw->mac.link_status = state; hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); + hns3_config_mac_tnl_int(hw, + state == ETH_LINK_UP ? true : false); + return true; } + + return false; +} + +/* + * Current, the PF driver get link status by two ways: + * 1) Periodic polling in the intr thread context, driver call + * hns3_update_link_status to update link status. + * 2) Firmware report async interrupt, driver process the event in the intr + * thread context, and call hns3_update_link_status to update link status. + * + * If detect link status changed, driver need report LSE. One method is add the + * report LSE logic in hns3_update_link_status. + * + * But the PF driver ops(link_update) also call hns3_update_link_status to + * update link status. + * If we report LSE in hns3_update_link_status, it may lead to deadlock in the + * bonding application. + * + * So add the one new API which used only in intr thread context. + */ +void +hns3_update_link_status_and_event(struct hns3_hw *hw) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + bool changed = hns3_update_link_status(hw); + if (changed) + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } static void @@ -4463,10 +4718,11 @@ hns3_service_handler(void *param) struct hns3_hw *hw = &hns->hw; if (!hns3_is_reset_pending(hns)) { - hns3_update_speed_duplex(eth_dev); - hns3_update_link_status(hw); - } else + hns3_update_link_status_and_event(hw); + hns3_update_link_info(eth_dev); + } else { hns3_warn(hw, "Cancel the query when reset is pending"); + } rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); } @@ -4550,6 +4806,15 @@ hns3_init_hardware(struct hns3_adapter *hns) goto err_mac_init; } + /* + * Requiring firmware to enable some features, driver can + * still work without it. + */ + ret = hns3_firmware_compat_config(hw, true); + if (ret) + PMD_INIT_LOG(WARNING, "firmware compatible features not " + "supported, ret = %d.", ret); + return 0; err_mac_init: @@ -4632,6 +4897,13 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_cmd_init; } + /* Hardware statistics of imissed registers cleared. */ + ret = hns3_update_imissed_stats(hw, true); + if (ret) { + hns3_err(hw, "clear imissed stats failed, ret = %d", ret); + return ret; + } + hns3_config_all_msix_error(hw, true); ret = rte_intr_callback_register(&pci_dev->intr_handle, @@ -4670,7 +4942,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_fdir; } - hns3_set_default_rss_args(hw); + hns3_rss_set_default_args(hw); ret = hns3_enable_hw_error_intr(hns, true); if (ret) { @@ -4679,11 +4951,14 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_enable_intr; } + hns3_tm_conf_init(eth_dev); + return 0; err_enable_intr: hns3_fdir_filter_uninit(hns); err_fdir: + (void)hns3_firmware_compat_config(hw, false); hns3_uninit_umv_space(hw); err_init_hw: hns3_tqp_stats_uninit(hw); @@ -4712,13 +4987,16 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); + hns3_tm_conf_uninit(eth_dev); hns3_enable_hw_error_intr(hns, false); hns3_rss_uninit(hns); (void)hns3_config_gro(hw, false); hns3_promisc_uninit(hw); hns3_fdir_filter_uninit(hns); + (void)hns3_firmware_compat_config(hw, false); hns3_uninit_umv_space(hw); hns3_tqp_stats_uninit(hw); + hns3_config_mac_tnl_int(hw, false); hns3_pf_disable_irq0(hw); rte_intr_disable(&pci_dev->intr_handle); hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, @@ -4739,6 +5017,18 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) if (ret) return ret; + /* + * The hns3_dcb_cfg_update may configure TM module, so + * hns3_tm_conf_update must called later. + */ + ret = hns3_tm_conf_update(hw); + if (ret) { + PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); + return ret; + } + + hns3_enable_rxd_adv_layout(hw); + ret = hns3_init_queues(hns, reset_queue); if (ret) { PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); @@ -4770,33 +5060,34 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; - uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; + uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; uint32_t intr_vector; uint16_t q_id; int ret; - if (dev->data->dev_conf.intr_conf.rxq == 0) + /* + * hns3 needs a separate interrupt to be used as event interrupt which + * could not be shared with task queue pair, so KERNEL drivers need + * support multiple interrupt vectors. + */ + if (dev->data->dev_conf.intr_conf.rxq == 0 || + !rte_intr_cap_multiple(intr_handle)) return 0; - /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); + intr_vector = hw->used_rx_queues; + /* creates event fd for each intr vector when MSIX is used */ + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL; - /* check and configure queue intr-vector mapping */ - if (rte_intr_cap_multiple(intr_handle) || - !RTE_ETH_DEV_SRIOV(dev).active) { - intr_vector = hw->used_rx_queues; - /* creates event fd for each intr vector when MSIX is used */ - if (rte_intr_efd_enable(intr_handle, intr_vector)) - return -EINVAL; - } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + if (intr_handle->intr_vec == NULL) { intr_handle->intr_vec = rte_zmalloc("intr_vec", hw->used_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { - hns3_err(hw, "Failed to allocate %u rx_queues" - " intr_vec", hw->used_rx_queues); + hns3_err(hw, "failed to allocate %u rx_queues intr_vec", + hw->used_rx_queues); ret = -ENOMEM; goto alloc_intr_vec_error; } @@ -4806,28 +5097,26 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) vec = RTE_INTR_VEC_RXTX_OFFSET; base = RTE_INTR_VEC_RXTX_OFFSET; } - if (rte_intr_dp_is_en(intr_handle)) { - for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { - ret = hns3_bind_ring_with_vector(hw, vec, true, - HNS3_RING_TYPE_RX, - q_id); - if (ret) - goto bind_vector_error; - intr_handle->intr_vec[q_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3_bind_ring_with_vector(hw, vec, true, + HNS3_RING_TYPE_RX, q_id); + if (ret) + goto bind_vector_error; + intr_handle->intr_vec[q_id] = vec; + /* + * If there are not enough efds (e.g. not enough interrupt), + * remaining queues will be bond to the last interrupt. + */ + if (vec < base + intr_handle->nb_efd - 1) + vec++; } rte_intr_enable(intr_handle); return 0; bind_vector_error: - rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec) { - free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } - return ret; + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; alloc_intr_vec_error: rte_intr_efd_disable(intr_handle); return ret; @@ -4872,7 +5161,7 @@ hns3_dev_start(struct rte_eth_dev *dev) int ret; PMD_INIT_FUNC_TRACE(); - if (rte_atomic16_read(&hw->reset.resetting)) + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) return -EBUSY; rte_spinlock_lock(&hw->lock); @@ -4885,11 +5174,8 @@ hns3_dev_start(struct rte_eth_dev *dev) return ret; } ret = hns3_map_rx_interrupt(dev); - if (ret) { - hw->adapter_state = HNS3_NIC_CONFIGURED; - rte_spinlock_unlock(&hw->lock); - return ret; - } + if (ret) + goto map_rx_inter_err; /* * There are three register used to control the status of a TQP @@ -4903,19 +5189,12 @@ hns3_dev_start(struct rte_eth_dev *dev) * status of queue in the dpdk framework. */ ret = hns3_start_all_txqs(dev); - if (ret) { - hw->adapter_state = HNS3_NIC_CONFIGURED; - rte_spinlock_unlock(&hw->lock); - return ret; - } + if (ret) + goto map_rx_inter_err; ret = hns3_start_all_rxqs(dev); - if (ret) { - hns3_stop_all_txqs(dev); - hw->adapter_state = HNS3_NIC_CONFIGURED; - rte_spinlock_unlock(&hw->lock); - return ret; - } + if (ret) + goto start_all_rxqs_fail; hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); @@ -4936,8 +5215,20 @@ hns3_dev_start(struct rte_eth_dev *dev) */ hns3_start_tqps(hw); + hns3_tm_dev_start_proc(hw); + hns3_info(hw, "hns3 dev start successful!"); + return 0; + +start_all_rxqs_fail: + hns3_stop_all_txqs(dev); +map_rx_inter_err: + (void)hns3_do_stop(hns); + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + + return ret; } static int @@ -4946,12 +5237,23 @@ hns3_do_stop(struct hns3_adapter *hns) struct hns3_hw *hw = &hns->hw; int ret; + /* + * The "hns3_do_stop" function will also be called by .stop_service to + * prepare reset. At the time of global or IMP reset, the command cannot + * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be + * accessed during the reset process. So the mbuf can not be released + * during reset and is required to be released after the reset is + * completed. + */ + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) + hns3_dev_release_mbufs(hns); + ret = hns3_cfg_mac_mode(hw, false); if (ret) return ret; hw->mac.link_status = ETH_LINK_DOWN; - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { hns3_configure_all_mac_addr(hns, true); ret = hns3_reset_all_tqps(hns); if (ret) { @@ -5018,11 +5320,12 @@ hns3_dev_stop(struct rte_eth_dev *dev) rte_delay_ms(hw->tqps_num); rte_spinlock_lock(&hw->lock); - if (rte_atomic16_read(&hw->reset.resetting) == 0) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { + hns3_tm_dev_stop_proc(hw); + hns3_config_mac_tnl_int(hw, false); hns3_stop_tqps(hw); hns3_do_stop(hns); hns3_unmap_rx_interrupt(dev); - hns3_dev_release_mbufs(hns); hw->adapter_state = HNS3_NIC_CONFIGURED; } hns3_rx_scattered_reset(dev); @@ -5493,7 +5796,7 @@ hns3_prepare_reset(struct hns3_adapter *hns) * any mailbox handling or command to firmware is only valid * after hns3_cmd_init is called. */ - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); hw->reset.stats.request_cnt++; break; case HNS3_IMP_RESET: @@ -5527,8 +5830,10 @@ hns3_stop_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev; eth_dev = &rte_eth_devices[hw->data->port_id]; - if (hw->adapter_state == HNS3_NIC_STARTED) + if (hw->adapter_state == HNS3_NIC_STARTED) { rte_eal_alarm_cancel(hns3_service_handler, eth_dev); + hns3_update_link_status_and_event(hw); + } hw->mac.link_status = ETH_LINK_DOWN; hns3_set_rxtx_function(eth_dev); @@ -5551,7 +5856,7 @@ hns3_stop_service(struct hns3_adapter *hns) * from table space. Hence, for function reset software intervention is * required to delete the entries */ - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) hns3_configure_all_mc_mac_addr(hns, true); rte_spinlock_unlock(&hw->lock); @@ -5571,7 +5876,15 @@ hns3_start_service(struct hns3_adapter *hns) hns3_set_rxtx_function(eth_dev); hns3_mp_req_start_rxtx(eth_dev); if (hw->adapter_state == HNS3_NIC_STARTED) { - hns3_service_handler(eth_dev); + /* + * This API parent function already hold the hns3_hw.lock, the + * hns3_service_handler may report lse, in bonding application + * it will call driver's ops which may acquire the hns3_hw.lock + * again, thus lead to deadlock. + * We defer calls hns3_service_handler to avoid the deadlock. + */ + rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, + hns3_service_handler, eth_dev); /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); @@ -5665,8 +5978,10 @@ hns3_reset_service(void *param) * The interrupt may have been lost. It is necessary to handle * the interrupt to recover from the error. */ - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) { - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_DEFERRED) { + __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, + __ATOMIC_RELAXED); hns3_err(hw, "Handling interrupts in delayed tasks"); hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); reset_level = hns3_get_reset_level(hns, &hw->reset.pending); @@ -5675,7 +5990,7 @@ hns3_reset_service(void *param) hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); } } - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); + __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); /* * Check if there is any ongoing reset in the hardware. This status can @@ -6033,6 +6348,163 @@ hns3_query_dev_fec_info(struct hns3_hw *hw) return ret; } +static bool +hns3_optical_module_existed(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc; + bool existed; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, + "fail to get optical module exist state, ret = %d.\n", + ret); + return false; + } + existed = !!desc.data[0]; + + return existed; +} + +static int +hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, + uint32_t len, uint8_t *data) +{ +#define HNS3_SFP_INFO_CMD_NUM 6 +#define HNS3_SFP_INFO_MAX_LEN \ + (HNS3_SFP_INFO_BD0_LEN + \ + (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) + struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; + struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; + uint16_t read_len; + uint16_t copy_len; + int ret; + int i; + + for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, + true); + if (i < HNS3_SFP_INFO_CMD_NUM - 1) + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + } + + sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; + sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); + read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); + sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); + + ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); + if (ret) { + hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", + ret); + return ret; + } + + /* The data format in BD0 is different with the others. */ + copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); + memcpy(data, sfp_info_bd0->data, copy_len); + read_len = copy_len; + + for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { + if (read_len >= len) + break; + + copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); + memcpy(data + read_len, desc[i].data, copy_len); + read_len += copy_len; + } + + return (int)read_len; +} + +static int +hns3_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); + uint32_t offset = info->offset; + uint32_t len = info->length; + uint8_t *data = info->data; + uint32_t read_len = 0; + + if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) + return -ENOTSUP; + + if (!hns3_optical_module_existed(hw)) { + hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); + return -EIO; + } + + while (read_len < len) { + int ret; + ret = hns3_get_module_eeprom_data(hw, offset + read_len, + len - read_len, + data + read_len); + if (ret < 0) + return -EIO; + read_len += ret; + } + + return 0; +} + +static int +hns3_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ +#define HNS3_SFF8024_ID_SFP 0x03 +#define HNS3_SFF8024_ID_QSFP_8438 0x0c +#define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d +#define HNS3_SFF8024_ID_QSFP28_8636 0x11 +#define HNS3_SFF_8636_V1_3 0x03 + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); + struct rte_dev_eeprom_info info; + struct hns3_sfp_type sfp_type; + int ret; + + memset(&sfp_type, 0, sizeof(sfp_type)); + memset(&info, 0, sizeof(info)); + info.data = (uint8_t *)&sfp_type; + info.length = sizeof(sfp_type); + ret = hns3_get_module_eeprom(dev, &info); + if (ret) + return ret; + + switch (sfp_type.type) { + case HNS3_SFF8024_ID_SFP: + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + break; + case HNS3_SFF8024_ID_QSFP_8438: + modinfo->type = RTE_ETH_MODULE_SFF_8436; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; + break; + case HNS3_SFF8024_ID_QSFP_8436_8636: + if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { + modinfo->type = RTE_ETH_MODULE_SFF_8436; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; + } else { + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; + } + break; + case HNS3_SFF8024_ID_QSFP28_8636: + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", + sfp_type.type, sfp_type.ext_type); + return -EINVAL; + } + + return 0; +} + static const struct eth_dev_ops hns3_eth_dev_ops = { .dev_configure = hns3_dev_configure, .dev_start = hns3_dev_start, @@ -6084,11 +6556,15 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .vlan_offload_set = hns3_vlan_offload_set, .vlan_pvid_set = hns3_vlan_pvid_set, .get_reg = hns3_get_regs, + .get_module_info = hns3_get_module_info, + .get_module_eeprom = hns3_get_module_eeprom, .get_dcb_info = hns3_get_dcb_info, .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, .fec_get_capability = hns3_fec_get_capability, .fec_get = hns3_fec_get, .fec_set = hns3_fec_set, + .tm_ops_get = hns3_tm_ops_get, + .tx_done_cleanup = hns3_tx_done_cleanup, }; static const struct hns3_reset_ops hns3_reset_ops = { @@ -6138,8 +6614,6 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) return 0; } - eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - ret = hns3_mp_init_primary(); if (ret) { PMD_INIT_LOG(ERR, @@ -6196,7 +6670,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_INITIALIZED; - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_PENDING) { hns3_err(hw, "Reschedule reset service after dev_init"); hns3_schedule_reset(hns); } else { @@ -6235,8 +6710,11 @@ hns3_dev_uninit(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + return 0; + } if (hw->adapter_state < HNS3_NIC_CLOSING) hns3_dev_close(eth_dev);