X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev.c;h=73d504253d2b5a42a3631a5323f18077df83200d;hb=a3d4f4d291d79e2801397055067d903ef5e4d4aa;hp=3e3a8c5f4e6f4536ba066cf15270a9a0c93fd736;hpb=40b5448d045573d4d99c866665cd1a9350390f6c;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 3e3a8c5f4e..73d504253d 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -35,8 +35,6 @@ #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ -#define HNS3_PORT_BASE_VLAN_DISABLE 0 -#define HNS3_PORT_BASE_VLAN_ENABLE 1 #define HNS3_INVLID_PVID 0xFFFF #define HNS3_FILTER_TYPE_VF 0 @@ -58,13 +56,13 @@ #define HNS3_FUN_RST_ING_B 0 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 +#define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U +#define HNS3_VECTOR0_IMP_RD_POISON_B 5U +#define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U #define HNS3_RESET_WAIT_MS 100 #define HNS3_RESET_WAIT_CNT 200 -int hns3_logtype_init; -int hns3_logtype_driver; - enum hns3_evt_cause { HNS3_VECTOR0_EVENT_RST, HNS3_VECTOR0_EVENT_MBX, @@ -102,12 +100,14 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) struct hns3_hw *hw = &hns->hw; uint32_t vector0_int_stats; uint32_t cmdq_src_val; + uint32_t hw_err_src_reg; uint32_t val; enum hns3_evt_cause ret; /* fetch the events from their corresponding regs */ vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); + hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); /* * Assumption: If by any chance reset and mailbox events are reported @@ -150,8 +150,9 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) } /* check for vector0 msix event source */ - if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK) { - val = vector0_int_stats; + if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || + hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { + val = vector0_int_stats | hw_err_src_reg; ret = HNS3_VECTOR0_EVENT_ERR; goto out; } @@ -164,9 +165,9 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) goto out; } - if (clearval && (vector0_int_stats || cmdq_src_val)) - hns3_warn(hw, "surprise irq ector0_int_stats:0x%x cmdq_src_val:0x%x", - vector0_int_stats, cmdq_src_val); + if (clearval && (vector0_int_stats || cmdq_src_val || hw_err_src_reg)) + hns3_warn(hw, "vector0_int_stats:0x%x cmdq_src_val:0x%x hw_err_src_reg:0x%x", + vector0_int_stats, cmdq_src_val, hw_err_src_reg); val = vector0_int_stats; ret = HNS3_VECTOR0_EVENT_OTHER; out: @@ -220,11 +221,14 @@ hns3_interrupt_handler(void *param) /* vector 0 interrupt is shared with reset and mailbox source events. */ if (event_cause == HNS3_VECTOR0_EVENT_ERR) { + hns3_warn(hw, "Received err interrupt"); hns3_handle_msix_error(hns, &hw->reset.request); + hns3_handle_ras_error(hns, &hw->reset.request); hns3_schedule_reset(hns); - } else if (event_cause == HNS3_VECTOR0_EVENT_RST) + } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { + hns3_warn(hw, "Received reset interrupt"); hns3_schedule_reset(hns); - else if (event_cause == HNS3_VECTOR0_EVENT_MBX) + } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) hns3_dev_handle_mbx_msg(hw); else hns3_err(hw, "Received unknown event"); @@ -313,13 +317,14 @@ static int hns3_restore_vlan_table(struct hns3_adapter *hns) { struct hns3_user_vlan_table *vlan_entry; + struct hns3_hw *hw = &hns->hw; struct hns3_pf *pf = &hns->pf; uint16_t vlan_id; int ret = 0; - if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) + if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) return hns3_vlan_pvid_configure(hns, - pf->port_base_vlan_cfg.pvid, 1); + hw->port_base_vlan_cfg.pvid, 1); LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { if (vlan_entry->hd_tbl_status) { @@ -336,7 +341,7 @@ hns3_restore_vlan_table(struct hns3_adapter *hns) static int hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) { - struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; bool writen_to_tbl = false; int ret = 0; @@ -354,7 +359,7 @@ hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) * vlan list. The vlan id in vlan list will be writen in vlan filter * table until port base vlan disabled */ - if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { + if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { ret = hns3_set_port_vlan_filter(hns, vlan_id, on); writen_to_tbl = true; } @@ -474,10 +479,9 @@ hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vport_id 0. + * driver, just need to configure parameters for PF vport. */ - vport_id = 0; + vport_id = HNS3_PF_FUNC_ID; req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); req->vf_bitmap[req->vf_offset] = bitmap; @@ -508,11 +512,10 @@ static int hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) { struct hns3_rx_vtag_cfg rxvlan_cfg; - struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; int ret; - if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { + if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { rxvlan_cfg.strip_tag1_en = false; rxvlan_cfg.strip_tag2_en = enable; } else { @@ -564,14 +567,16 @@ hns3_vlan_filter_init(struct hns3_adapter *hns) int ret; ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, - HNS3_FILTER_FE_EGRESS, false, 0); + HNS3_FILTER_FE_EGRESS, false, + HNS3_PF_FUNC_ID); if (ret) { hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); return ret; } ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, - HNS3_FILTER_FE_INGRESS, false, 0); + HNS3_FILTER_FE_INGRESS, false, + HNS3_PF_FUNC_ID); if (ret) hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); @@ -585,7 +590,8 @@ hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) int ret; ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, - HNS3_FILTER_FE_INGRESS, enable, 0); + HNS3_FILTER_FE_INGRESS, enable, + HNS3_PF_FUNC_ID); if (ret) hns3_err(hw, "failed to %s port vlan filter, ret = %d", enable ? "enable" : "disable", ret); @@ -674,10 +680,9 @@ hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vport_id 0. + * driver, just need to configure parameters for PF vport. */ - vport_id = 0; + vport_id = HNS3_PF_FUNC_ID; req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); req->vf_bitmap[req->vf_offset] = bitmap; @@ -727,12 +732,12 @@ hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, static void hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on) { - struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; - pf->port_base_vlan_cfg.state = on ? + hw->port_base_vlan_cfg.state = on ? HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; - pf->port_base_vlan_cfg.pvid = pvid; + hw->port_base_vlan_cfg.pvid = pvid; } static void @@ -776,13 +781,12 @@ static void hns3_remove_all_vlan_table(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - struct hns3_pf *pf = &hns->pf; int ret; hns3_rm_all_vlan_table(hns, true); - if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) { + if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) { ret = hns3_set_port_vlan_filter(hns, - pf->port_base_vlan_cfg.pvid, 0); + hw->port_base_vlan_cfg.pvid, 0); if (ret) { hns3_err(hw, "Failed to remove all vlan table, ret =%d", ret); @@ -796,7 +800,6 @@ hns3_update_vlan_filter_entries(struct hns3_adapter *hns, uint16_t port_base_vlan_state, uint16_t new_pvid, uint16_t old_pvid) { - struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; int ret = 0; @@ -824,32 +827,35 @@ hns3_update_vlan_filter_entries(struct hns3_adapter *hns, } } - if (new_pvid == pf->port_base_vlan_cfg.pvid) + if (new_pvid == hw->port_base_vlan_cfg.pvid) hns3_add_all_vlan_table(hns); return ret; } static int -hns3_en_rx_strip_all(struct hns3_adapter *hns, int on) +hns3_en_pvid_strip(struct hns3_adapter *hns, int on) { + struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; struct hns3_rx_vtag_cfg rx_vlan_cfg; - struct hns3_hw *hw = &hns->hw; bool rx_strip_en; int ret; - rx_strip_en = on ? true : false; - rx_vlan_cfg.strip_tag1_en = rx_strip_en; - rx_vlan_cfg.strip_tag2_en = rx_strip_en; + rx_strip_en = old_cfg->rx_vlan_offload_en ? true : false; + if (on) { + rx_vlan_cfg.strip_tag1_en = rx_strip_en; + rx_vlan_cfg.strip_tag2_en = true; + } else { + rx_vlan_cfg.strip_tag1_en = false; + rx_vlan_cfg.strip_tag2_en = rx_strip_en; + } rx_vlan_cfg.vlan1_vlan_prionly = false; rx_vlan_cfg.vlan2_vlan_prionly = false; - rx_vlan_cfg.rx_vlan_offload_en = rx_strip_en; + rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); - if (ret) { - hns3_err(hw, "enable strip rx failed, ret =%d", ret); + if (ret) return ret; - } hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); return ret; @@ -858,17 +864,16 @@ hns3_en_rx_strip_all(struct hns3_adapter *hns, int on) static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) { - struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; uint16_t port_base_vlan_state; uint16_t old_pvid; int ret; - if (on == 0 && pvid != pf->port_base_vlan_cfg.pvid) { - if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) + if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { + if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) hns3_warn(hw, "Invalid operation! As current pvid set " "is %u, disable pvid %u is invalid", - pf->port_base_vlan_cfg.pvid, pvid); + hw->port_base_vlan_cfg.pvid, pvid); return 0; } @@ -876,19 +881,21 @@ hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) HNS3_PORT_BASE_VLAN_DISABLE; ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); if (ret) { - hns3_err(hw, "Failed to config tx vlan, ret =%d", ret); + hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", + ret); return ret; } - ret = hns3_en_rx_strip_all(hns, on); + ret = hns3_en_pvid_strip(hns, on); if (ret) { - hns3_err(hw, "Failed to config rx vlan strip, ret =%d", ret); + hns3_err(hw, "failed to config rx vlan strip for pvid, " + "ret = %d", ret); return ret; } if (pvid == HNS3_INVLID_PVID) goto out; - old_pvid = pf->port_base_vlan_cfg.pvid; + old_pvid = hw->port_base_vlan_cfg.pvid; ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid, old_pvid); if (ret) { @@ -907,6 +914,8 @@ hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + bool pvid_en_state_change; + uint16_t pvid_state; int ret; if (pvid > RTE_ETHER_MAX_VLAN_ID) { @@ -915,20 +924,34 @@ hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) return -EINVAL; } + /* + * If PVID configuration state change, should refresh the PVID + * configuration state in struct hns3_tx_queue/hns3_rx_queue. + */ + pvid_state = hw->port_base_vlan_cfg.state; + if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || + (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) + pvid_en_state_change = false; + else + pvid_en_state_change = true; + rte_spinlock_lock(&hw->lock); ret = hns3_vlan_pvid_configure(hns, pvid, on); rte_spinlock_unlock(&hw->lock); - return ret; + if (ret) + return ret; + + if (pvid_en_state_change) + hns3_update_all_queues_pvid_state(hw); + + return 0; } static void init_port_base_vlan_info(struct hns3_hw *hw) { - struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); - struct hns3_pf *pf = &hns->pf; - - pf->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; - pf->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID; + hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; + hw->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID; } static int @@ -1059,6 +1082,13 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev) return ret; } + /* + * If pvid config is not set in rte_eth_conf, driver needn't to set + * VLAN pvid related configuration to hardware. + */ + if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) + return 0; + /* Apply pvid setting */ ret = hns3_vlan_pvid_set(dev, txmode->pvid, txmode->hw_vlan_insert_pvid); @@ -1094,25 +1124,6 @@ hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, return hns3_cmd_send(hw, &desc, 1); } -int -hns3_config_gro(struct hns3_hw *hw, bool en) -{ - struct hns3_cfg_gro_status_cmd *req; - struct hns3_cmd_desc desc; - int ret; - - hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false); - req = (struct hns3_cfg_gro_status_cmd *)desc.data; - - req->gro_en = rte_cpu_to_le_16(en ? 1 : 0); - - ret = hns3_cmd_send(hw, &desc, 1); - if (ret) - hns3_err(hw, "GRO hardware config cmd failed, ret = %d", ret); - - return ret; -} - static int hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, uint16_t *allocated_size, bool is_alloc) @@ -1399,7 +1410,7 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct hns3_mac_vlan_tbl_entry_cmd req; struct hns3_pf *pf = &hns->pf; - struct hns3_cmd_desc desc; + struct hns3_cmd_desc desc[3]; char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; uint16_t egress_port = 0; uint8_t vf_id; @@ -1418,10 +1429,9 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vf_id 0. + * driver, just need to configure parameters for PF vport. */ - vf_id = 0; + vf_id = HNS3_PF_FUNC_ID; hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, HNS3_MAC_EPORT_VFID_S, vf_id); @@ -1434,7 +1444,7 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) * it if the entry is inexistent. Repeated unicast entry * is not allowed in the mac vlan table. */ - ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, false); + ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false); if (ret == -ENOENT) { if (!hns3_is_umv_space_full(hw)) { ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); @@ -1774,10 +1784,9 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vf_id 0. + * driver, just need to configure parameters for PF vport. */ - vf_id = 0; + vf_id = HNS3_PF_FUNC_ID; hns3_update_desc_vfid(desc, vf_id, false); ret = hns3_add_mac_vlan_tbl(hw, &req, desc); if (ret) { @@ -1817,10 +1826,9 @@ hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) /* * This mac addr exist, remove this handle's VFID for it. * In current version VF is not supported when PF is driven by - * DPDK driver, the PF-related vf_id is 0, just need to - * configure parameters for vf_id 0. + * DPDK driver, just need to configure parameters for PF vport. */ - vf_id = 0; + vf_id = HNS3_PF_FUNC_ID; hns3_update_desc_vfid(desc, vf_id, true); /* All the vfid is zero, so need to delete this entry */ @@ -2064,8 +2072,8 @@ hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) err = ret; rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); - hns3_dbg(hw, "%s mc mac addr: %s failed", - del ? "Remove" : "Restore", mac_str); + hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", + del ? "Remove" : "Restore", mac_str, ret); } } return err; @@ -2209,7 +2217,7 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, static int hns3_init_ring_with_vector(struct hns3_hw *hw) { - uint8_t vec; + uint16_t vec; int ret; int i; @@ -2220,26 +2228,23 @@ hns3_init_ring_with_vector(struct hns3_hw *hw) * vector. In the initialization clearing the all hardware mapping * relationship configurations between queues and interrupt vectors is * needed, so some error caused by the residual configurations, such as - * the unexpected Tx interrupt, can be avoid. Because of the hardware - * constraints in hns3 hardware engine, we have to implement clearing - * the mapping relationship configurations by binding all queues to the - * last interrupt vector and reserving the last interrupt vector. This - * method results in a decrease of the maximum queues when upper - * applications call the rte_eth_dev_configure API function to enable - * Rx interrupt. + * the unexpected Tx interrupt, can be avoid. */ vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ - hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */ + if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) + vec = vec - 1; /* the last interrupt is reserved */ + hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); for (i = 0; i < hw->intr_tqps_num; i++) { /* - * Set gap limiter and rate limiter configuration of queue's - * interrupt. + * Set gap limiter/rate limiter/quanity limiter algorithm + * configuration for interrupt coalesce of queue's interrupt. */ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, HNS3_TQP_INTR_GL_DEFAULT); hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, HNS3_TQP_INTR_GL_DEFAULT); hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); + hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); ret = hns3_bind_ring_with_vector(hw, vec, false, HNS3_RING_TYPE_TX, i); @@ -2273,6 +2278,7 @@ hns3_dev_configure(struct rte_eth_dev *dev) uint16_t nb_tx_q = dev->data->nb_tx_queues; struct rte_eth_rss_conf rss_conf; uint16_t mtu; + bool gro_en; int ret; /* @@ -2306,6 +2312,7 @@ hns3_dev_configure(struct rte_eth_dev *dev) /* When RSS is not configured, redirect the packet queue 0 */ if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { + conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; rss_conf = conf->rx_adv_conf.rss_conf; if (rss_conf.rss_key == NULL) { rss_conf.rss_key = rss_cfg->key; @@ -2338,6 +2345,18 @@ hns3_dev_configure(struct rte_eth_dev *dev) if (ret) goto cfg_err; + /* config hardware GRO */ + gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; + ret = hns3_config_gro(hw, gro_en); + if (ret) + goto cfg_err; + + hns->rx_simple_allowed = true; + hns->rx_vec_allowed = true; + hns->tx_simple_allowed = true; + hns->tx_vec_allowed = true; + + hns3_init_rx_ptype_tble(dev); hw->adapter_state = HNS3_NIC_CONFIGURED; return 0; @@ -2442,9 +2461,10 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) info->max_rx_queues = queue_num; info->max_tx_queues = hw->tqps_num; info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ - info->min_rx_bufsize = hw->rx_buf_len; + info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; info->max_mac_addrs = HNS3_UC_MACADDR_NUM; info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; + info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -2454,25 +2474,22 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) DEV_RX_OFFLOAD_KEEP_CRC | DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_QINQ_STRIP | DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_VLAN_EXTEND | DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_RSS_HASH); - info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; + DEV_RX_OFFLOAD_RSS_HASH | + DEV_RX_OFFLOAD_TCP_LRO); info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_QINQ_INSERT | DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO | - info->tx_queue_offload_capa); + DEV_TX_OFFLOAD_MBUF_FAST_FREE | + hns3_txvlan_cap_get(hw)); info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = HNS3_MAX_RING_DESC, @@ -2484,6 +2501,23 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) .nb_max = HNS3_MAX_RING_DESC, .nb_min = HNS3_MIN_RING_DESC, .nb_align = HNS3_ALIGN_RING_DESC, + .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, + .nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT, + }; + + info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, + /* + * If there are no available Rx buffer descriptors, incoming + * packets are always dropped by hardware based on hns3 network + * engine. + */ + .rx_drop_en = 1, + .offloads = 0, + }; + info->default_txconf = (struct rte_eth_txconf) { + .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, + .offloads = 0, }; info->vmdq_queue_num = 0; @@ -2508,9 +2542,18 @@ hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, { struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + uint32_t version = hw->fw_version; int ret; - ret = snprintf(fw_version, fw_size, "0x%08x", hw->fw_version); + ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", + hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, + HNS3_FW_VERSION_BYTE3_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, + HNS3_FW_VERSION_BYTE2_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, + HNS3_FW_VERSION_BYTE1_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, + HNS3_FW_VERSION_BYTE0_S)); ret += 1; /* add the size of '\0' */ if (fw_size < (uint32_t)ret) return ret; @@ -2542,6 +2585,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, case ETH_SPEED_NUM_40G: case ETH_SPEED_NUM_50G: case ETH_SPEED_NUM_100G: + case ETH_SPEED_NUM_200G: new_link.link_speed = mac->link_speed; break; default: @@ -2609,7 +2653,6 @@ hns3_query_pf_resource(struct hns3_hw *hw) struct hns3_pf *pf = &hns->pf; struct hns3_pf_res_cmd *req; struct hns3_cmd_desc desc; - uint16_t num_msi; int ret; hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); @@ -2641,9 +2684,9 @@ hns3_query_pf_resource(struct hns3_hw *hw) pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); - num_msi = hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number), - HNS3_VEC_NUM_M, HNS3_VEC_NUM_S); - hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi; + hw->num_msi = + hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), + HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); return 0; } @@ -2772,6 +2815,9 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed) case HNS3_CFG_SPEED_100G: *speed = ETH_SPEED_NUM_100G; break; + case HNS3_CFG_SPEED_200G: + *speed = ETH_SPEED_NUM_200G; + break; default: return -EINVAL; } @@ -2779,6 +2825,105 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed) return 0; } +static void +hns3_set_default_dev_specifications(struct hns3_hw *hw) +{ + hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; + hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; + hw->rss_key_size = HNS3_RSS_KEY_SIZE; + hw->max_tm_rate = HNS3_ETHER_MAX_RATE; +} + +static void +hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) +{ + struct hns3_dev_specs_0_cmd *req0; + + req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; + + hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; + hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); + hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); + hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); +} + +static int +hns3_query_dev_specifications(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; + int ret; + int i; + + for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, + true); + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + } + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); + + ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); + if (ret) + return ret; + + hns3_parse_dev_specifications(hw, desc); + + return 0; +} + +static int +hns3_get_capability(struct hns3_hw *hw) +{ + struct rte_pci_device *pci_dev; + struct rte_eth_dev *eth_dev; + uint16_t device_id; + uint8_t revision; + int ret; + + eth_dev = &rte_eth_devices[hw->data->port_id]; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + device_id = pci_dev->id.device_id; + + if (device_id == HNS3_DEV_ID_25GE_RDMA || + device_id == HNS3_DEV_ID_50GE_RDMA || + device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || + device_id == HNS3_DEV_ID_200G_RDMA) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); + + /* Get PCI revision id */ + ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, + HNS3_PCI_REVISION_ID); + if (ret != HNS3_PCI_REVISION_ID_LEN) { + PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", + ret); + return -EIO; + } + hw->revision = revision; + + if (revision < PCI_REVISION_ID_HIP09_A) { + hns3_set_default_dev_specifications(hw); + hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; + hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL; + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; + hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; + return 0; + } + + ret = hns3_query_dev_specifications(hw); + if (ret) { + PMD_INIT_LOG(ERR, + "failed to query dev specifications, ret = %d", + ret); + return ret; + } + + hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; + hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL; + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; + hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; + + return 0; +} + static int hns3_get_board_configuration(struct hns3_hw *hw) { @@ -2793,7 +2938,8 @@ hns3_get_board_configuration(struct hns3_hw *hw) return ret; } - if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER) { + if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER && + !hns3_dev_copper_supported(hw)) { PMD_INIT_LOG(ERR, "media type is copper, not supported."); return -EOPNOTSUPP; } @@ -2801,7 +2947,6 @@ hns3_get_board_configuration(struct hns3_hw *hw) hw->mac.media_type = cfg.media_type; hw->rss_size_max = cfg.rss_size_max; hw->rss_dis_flag = false; - hw->rx_buf_len = cfg.rx_buf_len; memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); hw->mac.phy_addr = cfg.phy_addr; hw->mac.default_addr_setted = false; @@ -2854,6 +2999,13 @@ hns3_get_configuration(struct hns3_hw *hw) return ret; } + /* Get device capability */ + ret = hns3_get_capability(hw); + if (ret) { + PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); + return ret; + } + /* Get pf resource */ ret = hns3_query_pf_resource(hw); if (ret) { @@ -2910,8 +3062,8 @@ hns3_map_tqp(struct hns3_hw *hw) */ tqp_id = 0; num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC); - for (func_id = 0; func_id < num; func_id++) { - is_pf = func_id == 0 ? true : false; + for (func_id = HNS3_PF_FUNC_ID; func_id < num; func_id++) { + is_pf = func_id == HNS3_PF_FUNC_ID ? true : false; for (i = 0; i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) { ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i, @@ -2970,6 +3122,10 @@ hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); break; + case ETH_SPEED_NUM_200G: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); + break; default: PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); return -EINVAL; @@ -3140,7 +3296,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, + pf->dv_buf_size; shared_buf_tc = tc_num * aligned_mps + aligned_mps; - shared_std = roundup(max_t(uint32_t, shared_buf_min, shared_buf_tc), + shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), HNS3_BUF_SIZE_UNIT); rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); @@ -3170,8 +3326,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, if (tc_num) hi_thrd = hi_thrd / tc_num; - hi_thrd = max_t(uint32_t, hi_thrd, - HNS3_BUF_MUL_BY * aligned_mps); + hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; } else { @@ -3803,17 +3958,16 @@ hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vf_id 0. + * driver, just need to configure parameters for PF vport. */ - vf_id = 0; + vf_id = HNS3_PF_FUNC_ID; hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); return hns3_cmd_set_promisc_mode(hw, ¶m); } static int -hns3_clear_all_vfs_promisc_mode(struct hns3_hw *hw) +hns3_promisc_init(struct hns3_hw *hw) { struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct hns3_pf *pf = &hns->pf; @@ -3821,17 +3975,54 @@ hns3_clear_all_vfs_promisc_mode(struct hns3_hw *hw) uint16_t func_id; int ret; - /* func_id 0 is denoted PF, the VFs start from 1 */ - for (func_id = 1; func_id < pf->func_num; func_id++) { + ret = hns3_set_promisc_mode(hw, false, false); + if (ret) { + PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); + return ret; + } + + /* + * In current version VFs are not supported when PF is driven by DPDK + * driver. After PF has been taken over by DPDK, the original VF will + * be invalid. So, there is a possibility of entry residues. It should + * clear VFs's promisc mode to avoid unnecessary bandwidth usage + * during init. + */ + for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { hns3_promisc_param_init(¶m, false, false, false, func_id); ret = hns3_cmd_set_promisc_mode(hw, ¶m); - if (ret) + if (ret) { + PMD_INIT_LOG(ERR, "failed to clear vf:%d promisc mode," + " ret = %d", func_id, ret); return ret; + } } return 0; } +static void +hns3_promisc_uninit(struct hns3_hw *hw) +{ + struct hns3_promisc_param param; + uint16_t func_id; + int ret; + + func_id = HNS3_PF_FUNC_ID; + + /* + * In current version VFs are not supported when PF is driven by + * DPDK driver, and VFs' promisc mode status has been cleared during + * init and their status will not change. So just clear PF's promisc + * mode status during uninit. + */ + hns3_promisc_param_init(¶m, false, false, false, func_id); + ret = hns3_cmd_set_promisc_mode(hw, ¶m); + if (ret) + PMD_INIT_LOG(ERR, "failed to clear promisc status during" + " uninit, ret = %d", ret); +} + static int hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) { @@ -4082,7 +4273,15 @@ hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); - hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); + + /* + * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC + * when receiving frames. Otherwise, CRC will be stripped. + */ + if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); + else + hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); @@ -4174,15 +4373,9 @@ hns3_init_hardware(struct hns3_adapter *hns) goto err_mac_init; } - ret = hns3_set_promisc_mode(hw, false, false); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to set promisc mode: %d", ret); - goto err_mac_init; - } - - ret = hns3_clear_all_vfs_promisc_mode(hw); + ret = hns3_promisc_init(hw); if (ret) { - PMD_INIT_LOG(ERR, "Failed to clear all vfs promisc mode: %d", + PMD_INIT_LOG(ERR, "Failed to init promisc: %d", ret); goto err_mac_init; } @@ -4236,6 +4429,39 @@ err_mac_init: return ret; } +static int +hns3_clear_hw(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret && ret != -EOPNOTSUPP) + return ret; + + return 0; +} + +static void +hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) +{ + uint32_t val; + + /* + * The new firmware support report more hardware error types by + * msix mode. These errors are defined as RAS errors in hardware + * and belong to a different type from the MSI-x errors processed + * by the network driver. + * + * Network driver should open the new error report on initialition + */ + val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); + hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); + hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); +} + static int hns3_init_pf(struct rte_eth_dev *eth_dev) { @@ -4266,6 +4492,20 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_cmd_init; } + /* + * To ensure that the hardware environment is clean during + * initialization, the driver actively clear the hardware environment + * during initialization, including PF and corresponding VFs' vlan, mac, + * flow table configurations, etc. + */ + ret = hns3_clear_hw(hw); + if (ret) { + PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); + goto err_cmd_init; + } + + hns3_config_all_msix_error(hw, true); + ret = rte_intr_callback_register(&pci_dev->intr_handle, hns3_interrupt_handler, eth_dev); @@ -4341,12 +4581,15 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) hns3_enable_hw_error_intr(hns, false); hns3_rss_uninit(hns); + (void)hns3_config_gro(hw, false); + hns3_promisc_uninit(hw); hns3_fdir_filter_uninit(hns); hns3_uninit_umv_space(hw); hns3_pf_disable_irq0(hw); rte_intr_disable(&pci_dev->intr_handle); hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, eth_dev); + hns3_config_all_msix_error(hw, false); hns3_cmd_uninit(hw); hns3_cmd_destroy_queue(hw); hw->io_base = NULL; @@ -4512,6 +4755,7 @@ hns3_dev_start(struct rte_eth_dev *dev) hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); + hns3_rx_scattered_calc(dev); hns3_set_rxtx_function(dev); hns3_mp_req_start_rxtx(dev); rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); @@ -4610,6 +4854,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) hns3_dev_release_mbufs(hns); hw->adapter_state = HNS3_NIC_CONFIGURED; } + hns3_rx_scattered_reset(dev); rte_eal_alarm_cancel(hns3_service_handler, dev); rte_spinlock_unlock(&hw->lock); } @@ -5031,6 +5276,28 @@ hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) return reset_level; } +static void +hns3_record_imp_error(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + uint32_t reg_val; + + reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); + if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { + hns3_warn(hw, "Detected IMP RD poison!"); + hns3_error_int_stats_add(hns, "IMP_RD_POISON_INT_STS"); + hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); + hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); + } + + if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { + hns3_warn(hw, "Detected IMP CMDQ error!"); + hns3_error_int_stats_add(hns, "CMDQ_MEM_ECC_INT_STS"); + hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); + hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); + } +} + static int hns3_prepare_reset(struct hns3_adapter *hns) { @@ -5040,7 +5307,7 @@ hns3_prepare_reset(struct hns3_adapter *hns) switch (hw->reset.level) { case HNS3_FUNC_RESET: - ret = hns3_func_reset_cmd(hw, 0); + ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); if (ret) return ret; @@ -5054,6 +5321,7 @@ hns3_prepare_reset(struct hns3_adapter *hns) hw->reset.stats.request_cnt++; break; case HNS3_IMP_RESET: + hns3_record_imp_error(hns); reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); @@ -5174,6 +5442,10 @@ hns3_restore_conf(struct hns3_adapter *hns) if (ret) goto err_promisc; + ret = hns3_restore_gro_conf(hw); + if (ret) + goto err_promisc; + if (hns->hw.adapter_state == HNS3_NIC_STARTED) { ret = hns3_do_start(hns, false); if (ret) @@ -5253,6 +5525,7 @@ hns3_reset_service(void *param) } static const struct eth_dev_ops hns3_eth_dev_ops = { + .dev_configure = hns3_dev_configure, .dev_start = hns3_dev_start, .dev_stop = hns3_dev_stop, .dev_close = hns3_dev_close, @@ -5276,7 +5549,10 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .tx_queue_release = hns3_dev_tx_queue_release, .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, - .dev_configure = hns3_dev_configure, + .rxq_info_get = hns3_rxq_info_get, + .txq_info_get = hns3_txq_info_get, + .rx_burst_mode_get = hns3_rx_burst_mode_get, + .tx_burst_mode_get = hns3_tx_burst_mode_get, .flow_ctrl_get = hns3_flow_ctrl_get, .flow_ctrl_set = hns3_flow_ctrl_set, .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, @@ -5312,14 +5588,14 @@ static const struct hns3_reset_ops hns3_reset_ops = { static int hns3_dev_init(struct rte_eth_dev *eth_dev) { - struct rte_device *dev = eth_dev->device; - struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); struct hns3_adapter *hns = eth_dev->data->dev_private; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *eth_addr; struct hns3_hw *hw = &hns->hw; - uint16_t device_id = pci_dev->id.device_id; int ret; PMD_INIT_FUNC_TRACE(); + eth_dev->process_private = (struct hns3_process_private *) rte_zmalloc_socket("hns3_filter_list", sizeof(struct hns3_process_private), @@ -5334,19 +5610,26 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) hns3_set_rxtx_function(eth_dev); eth_dev->dev_ops = &hns3_eth_dev_ops; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - hns3_mp_init_secondary(); + ret = hns3_mp_init_secondary(); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init for secondary " + "process, ret = %d", ret); + goto err_mp_init_secondary; + } + hw->secondary_cnt++; return 0; } - hns3_mp_init_primary(); - hw->adapter_state = HNS3_NIC_UNINITIALIZED; - - if (device_id == HNS3_DEV_ID_25GE_RDMA || - device_id == HNS3_DEV_ID_50GE_RDMA || - device_id == HNS3_DEV_ID_100G_RDMA_MACSEC) - hns3_set_bit(hw->flag, HNS3_DEV_SUPPORT_DCB_B, 1); + ret = hns3_mp_init_primary(); + if (ret) { + PMD_INIT_LOG(ERR, + "Failed to init for primary process, ret = %d", + ret); + goto err_mp_init_primary; + } + hw->adapter_state = HNS3_NIC_UNINITIALIZED; hns->is_vf = false; hw->data = eth_dev->data; @@ -5380,6 +5663,15 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) goto err_rte_zmalloc; } + eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; + if (!rte_is_valid_assigned_ether_addr(eth_addr)) { + rte_eth_random_addr(hw->mac.mac_addr); + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + (struct rte_ether_addr *)hw->mac.mac_addr); + hns3_warn(hw, "default mac_addr from firmware is an invalid " + "unicast address, using random MAC address %s", + mac_str); + } rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, ð_dev->data->mac_addrs[0]); @@ -5406,7 +5698,12 @@ err_rte_zmalloc: err_init_pf: rte_free(hw->reset.wait_data); + err_init_reset: + hns3_mp_uninit_primary(); + +err_mp_init_primary: +err_mp_init_secondary: eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; @@ -5459,6 +5756,7 @@ static const struct rte_pci_id pci_id_hns3_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -5472,13 +5770,5 @@ static struct rte_pci_driver rte_hns3_pmd = { RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); - -RTE_INIT(hns3_init_log) -{ - hns3_logtype_init = rte_log_register("pmd.net.hns3.init"); - if (hns3_logtype_init >= 0) - rte_log_set_level(hns3_logtype_init, RTE_LOG_NOTICE); - hns3_logtype_driver = rte_log_register("pmd.net.hns3.driver"); - if (hns3_logtype_driver >= 0) - rte_log_set_level(hns3_logtype_driver, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE); +RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);