X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev.c;h=4afee5d475b7d80cefacfa550517404953bf292c;hb=ab2e2e34416371817a21a41fc3def231675d4487;hp=66a1d00b7bdb99aa414a46223547ab4db5ba8dad;hpb=19a3ca4c99cfdada514425df909df9b7e016246c;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 66a1d00b7b..4afee5d475 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -17,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -24,15 +26,15 @@ #include "hns3_ethdev.h" #include "hns3_logs.h" #include "hns3_rxtx.h" +#include "hns3_intr.h" #include "hns3_regs.h" #include "hns3_dcb.h" +#include "hns3_mp.h" #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ -#define HNS3_PORT_BASE_VLAN_DISABLE 0 -#define HNS3_PORT_BASE_VLAN_ENABLE 1 #define HNS3_INVLID_PVID 0xFFFF #define HNS3_FILTER_TYPE_VF 0 @@ -47,33 +49,208 @@ #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ | HNS3_FILTER_FE_ROCE_INGRESS_B) -int hns3_logtype_init; -int hns3_logtype_driver; +/* Reset related Registers */ +#define HNS3_GLOBAL_RESET_BIT 0 +#define HNS3_CORE_RESET_BIT 1 +#define HNS3_IMP_RESET_BIT 2 +#define HNS3_FUN_RST_ING_B 0 +#define HNS3_VECTOR0_IMP_RESET_INT_B 1 + +#define HNS3_RESET_WAIT_MS 100 +#define HNS3_RESET_WAIT_CNT 200 + +enum hns3_evt_cause { + HNS3_VECTOR0_EVENT_RST, + HNS3_VECTOR0_EVENT_MBX, + HNS3_VECTOR0_EVENT_ERR, + HNS3_VECTOR0_EVENT_OTHER, +}; + +static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, + uint64_t *levels); static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on); +static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); + +static int hns3_add_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); +static int hns3_remove_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); + +static void +hns3_pf_disable_irq0(struct hns3_hw *hw) +{ + hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); +} + +static void +hns3_pf_enable_irq0(struct hns3_hw *hw) +{ + hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); +} + +static enum hns3_evt_cause +hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) +{ + struct hns3_hw *hw = &hns->hw; + uint32_t vector0_int_stats; + uint32_t cmdq_src_val; + uint32_t val; + enum hns3_evt_cause ret; + + /* fetch the events from their corresponding regs */ + vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); + cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); + + /* + * Assumption: If by any chance reset and mailbox events are reported + * together then we will only process reset event and defer the + * processing of the mailbox events. Since, we would have not cleared + * RX CMDQ event this time we would receive again another interrupt + * from H/W just for the mailbox. + */ + if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); + val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); + if (clearval) { + hw->reset.stats.imp_cnt++; + hns3_warn(hw, "IMP reset detected, clear reset status"); + } else { + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "IMP reset detected, don't clear reset status"); + } + + ret = HNS3_VECTOR0_EVENT_RST; + goto out; + } + + /* Global reset */ + if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); + val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); + if (clearval) { + hw->reset.stats.global_cnt++; + hns3_warn(hw, "Global reset detected, clear reset status"); + } else { + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "Global reset detected, don't clear reset status"); + } + + ret = HNS3_VECTOR0_EVENT_RST; + goto out; + } + + /* check for vector0 msix event source */ + if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK) { + val = vector0_int_stats; + ret = HNS3_VECTOR0_EVENT_ERR; + goto out; + } + + /* check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { + cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); + val = cmdq_src_val; + ret = HNS3_VECTOR0_EVENT_MBX; + goto out; + } + + if (clearval && (vector0_int_stats || cmdq_src_val)) + hns3_warn(hw, "surprise irq ector0_int_stats:0x%x cmdq_src_val:0x%x", + vector0_int_stats, cmdq_src_val); + val = vector0_int_stats; + ret = HNS3_VECTOR0_EVENT_OTHER; +out: + + if (clearval) + *clearval = val; + return ret; +} + +static void +hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) +{ + if (event_type == HNS3_VECTOR0_EVENT_RST) + hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); + else if (event_type == HNS3_VECTOR0_EVENT_MBX) + hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); +} + +static void +hns3_clear_all_event_cause(struct hns3_hw *hw) +{ + uint32_t vector0_int_stats; + vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); + + if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) + hns3_warn(hw, "Probe during IMP reset interrupt"); + + if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) + hns3_warn(hw, "Probe during Global reset interrupt"); + + hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, + BIT(HNS3_VECTOR0_IMPRESET_INT_B) | + BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | + BIT(HNS3_VECTOR0_CORERESET_INT_B)); + hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); +} + +static void +hns3_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + enum hns3_evt_cause event_cause; + uint32_t clearval = 0; + + /* Disable interrupt */ + hns3_pf_disable_irq0(hw); + + event_cause = hns3_check_event_cause(hns, &clearval); + + /* vector 0 interrupt is shared with reset and mailbox source events. */ + if (event_cause == HNS3_VECTOR0_EVENT_ERR) { + hns3_handle_msix_error(hns, &hw->reset.request); + hns3_schedule_reset(hns); + } else if (event_cause == HNS3_VECTOR0_EVENT_RST) + hns3_schedule_reset(hns); + else if (event_cause == HNS3_VECTOR0_EVENT_MBX) + hns3_dev_handle_mbx_msg(hw); + else + hns3_err(hw, "Received unknown event"); + + hns3_clear_event_cause(hw, event_cause, clearval); + /* Enable interrupt if it is not cause by reset */ + hns3_pf_enable_irq0(hw); +} static int hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) { -#define HNS3_VLAN_OFFSET_160 160 +#define HNS3_VLAN_ID_OFFSET_STEP 160 +#define HNS3_VLAN_BYTE_SIZE 8 struct hns3_vlan_filter_pf_cfg_cmd *req; struct hns3_hw *hw = &hns->hw; uint8_t vlan_offset_byte_val; struct hns3_cmd_desc desc; uint8_t vlan_offset_byte; - uint8_t vlan_offset_160; + uint8_t vlan_offset_base; int ret; hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); - vlan_offset_160 = vlan_id / HNS3_VLAN_OFFSET_160; - vlan_offset_byte = (vlan_id % HNS3_VLAN_OFFSET_160) / 8; - vlan_offset_byte_val = 1 << (vlan_id % 8); + vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; + vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / + HNS3_VLAN_BYTE_SIZE; + vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; - req->vlan_offset = vlan_offset_160; + req->vlan_offset = vlan_offset_base; req->vlan_cfg = on ? 0 : 1; req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; @@ -110,6 +287,11 @@ hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, struct hns3_hw *hw = &hns->hw; struct hns3_pf *pf = &hns->pf; + LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { + if (vlan_entry->vlan_id == vlan_id) + return; + } + vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); if (vlan_entry == NULL) { hns3_err(hw, "Failed to malloc hns3 vlan table"); @@ -123,9 +305,34 @@ hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, } static int -hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) +hns3_restore_vlan_table(struct hns3_adapter *hns) { + struct hns3_user_vlan_table *vlan_entry; + struct hns3_hw *hw = &hns->hw; struct hns3_pf *pf = &hns->pf; + uint16_t vlan_id; + int ret = 0; + + if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) + return hns3_vlan_pvid_configure(hns, + hw->port_base_vlan_cfg.pvid, 1); + + LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { + if (vlan_entry->hd_tbl_status) { + vlan_id = vlan_entry->vlan_id; + ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); + if (ret) + break; + } + } + + return ret; +} + +static int +hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) +{ + struct hns3_hw *hw = &hns->hw; bool writen_to_tbl = false; int ret = 0; @@ -143,7 +350,7 @@ hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) * vlan list. The vlan id in vlan list will be writen in vlan filter * table until port base vlan disabled */ - if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { + if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { ret = hns3_set_port_vlan_filter(hns, vlan_id, on); writen_to_tbl = true; } @@ -263,10 +470,9 @@ hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vport_id 0. + * driver, just need to configure parameters for PF vport. */ - vport_id = 0; + vport_id = HNS3_PF_FUNC_ID; req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); req->vf_bitmap[req->vf_offset] = bitmap; @@ -297,11 +503,10 @@ static int hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) { struct hns3_rx_vtag_cfg rxvlan_cfg; - struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; int ret; - if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { + if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { rxvlan_cfg.strip_tag1_en = false; rxvlan_cfg.strip_tag2_en = enable; } else { @@ -347,22 +552,40 @@ hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, } static int -hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) +hns3_vlan_filter_init(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; int ret; ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, - HNS3_FILTER_FE_EGRESS, false, 0); + HNS3_FILTER_FE_EGRESS, false, + HNS3_PF_FUNC_ID); if (ret) { - hns3_err(hw, "hns3 enable filter fail, ret =%d", ret); + hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); return ret; } ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, - HNS3_FILTER_FE_INGRESS, enable, 0); + HNS3_FILTER_FE_INGRESS, false, + HNS3_PF_FUNC_ID); + if (ret) + hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); + + return ret; +} + +static int +hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, + HNS3_FILTER_FE_INGRESS, enable, + HNS3_PF_FUNC_ID); if (ret) - hns3_err(hw, "hns3 enable filter fail, ret =%d", ret); + hns3_err(hw, "failed to %s port vlan filter, ret = %d", + enable ? "enable" : "disable", ret); return ret; } @@ -380,6 +603,23 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) rte_spinlock_lock(&hw->lock); rxmode = &dev->data->dev_conf.rxmode; tmp_mask = (unsigned int)mask; + if (tmp_mask & ETH_VLAN_FILTER_MASK) { + /* ignore vlan filter configuration during promiscuous mode */ + if (!dev->data->promiscuous) { + /* Enable or disable VLAN filter */ + enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? + true : false; + + ret = hns3_enable_vlan_filter(hns, enable); + if (ret) { + rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "failed to %s rx filter, ret = %d", + enable ? "enable" : "disable", ret); + return ret; + } + } + } + if (tmp_mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? @@ -388,7 +628,8 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) ret = hns3_en_hw_strip_rxvtag(hns, enable); if (ret) { rte_spinlock_unlock(&hw->lock); - hns3_err(hw, "failed to enable rx strip, ret =%d", ret); + hns3_err(hw, "failed to %s rx strip, ret = %d", + enable ? "enable" : "disable", ret); return ret; } } @@ -430,10 +671,9 @@ hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vport_id 0. + * driver, just need to configure parameters for PF vport. */ - vport_id = 0; + vport_id = HNS3_PF_FUNC_ID; req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); req->vf_bitmap[req->vf_offset] = bitmap; @@ -483,12 +723,12 @@ hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, static void hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on) { - struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; - pf->port_base_vlan_cfg.state = on ? + hw->port_base_vlan_cfg.state = on ? HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; - pf->port_base_vlan_cfg.pvid = pvid; + hw->port_base_vlan_cfg.pvid = pvid; } static void @@ -532,13 +772,12 @@ static void hns3_remove_all_vlan_table(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - struct hns3_pf *pf = &hns->pf; int ret; hns3_rm_all_vlan_table(hns, true); - if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) { + if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) { ret = hns3_set_port_vlan_filter(hns, - pf->port_base_vlan_cfg.pvid, 0); + hw->port_base_vlan_cfg.pvid, 0); if (ret) { hns3_err(hw, "Failed to remove all vlan table, ret =%d", ret); @@ -552,7 +791,6 @@ hns3_update_vlan_filter_entries(struct hns3_adapter *hns, uint16_t port_base_vlan_state, uint16_t new_pvid, uint16_t old_pvid) { - struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; int ret = 0; @@ -580,32 +818,35 @@ hns3_update_vlan_filter_entries(struct hns3_adapter *hns, } } - if (new_pvid == pf->port_base_vlan_cfg.pvid) + if (new_pvid == hw->port_base_vlan_cfg.pvid) hns3_add_all_vlan_table(hns); return ret; } static int -hns3_en_rx_strip_all(struct hns3_adapter *hns, int on) +hns3_en_pvid_strip(struct hns3_adapter *hns, int on) { + struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; struct hns3_rx_vtag_cfg rx_vlan_cfg; - struct hns3_hw *hw = &hns->hw; bool rx_strip_en; int ret; - rx_strip_en = on ? true : false; - rx_vlan_cfg.strip_tag1_en = rx_strip_en; - rx_vlan_cfg.strip_tag2_en = rx_strip_en; + rx_strip_en = old_cfg->rx_vlan_offload_en ? true : false; + if (on) { + rx_vlan_cfg.strip_tag1_en = rx_strip_en; + rx_vlan_cfg.strip_tag2_en = true; + } else { + rx_vlan_cfg.strip_tag1_en = false; + rx_vlan_cfg.strip_tag2_en = rx_strip_en; + } rx_vlan_cfg.vlan1_vlan_prionly = false; rx_vlan_cfg.vlan2_vlan_prionly = false; - rx_vlan_cfg.rx_vlan_offload_en = rx_strip_en; + rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); - if (ret) { - hns3_err(hw, "enable strip rx failed, ret =%d", ret); + if (ret) return ret; - } hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); return ret; @@ -614,17 +855,16 @@ hns3_en_rx_strip_all(struct hns3_adapter *hns, int on) static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) { - struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; uint16_t port_base_vlan_state; uint16_t old_pvid; int ret; - if (on == 0 && pvid != pf->port_base_vlan_cfg.pvid) { - if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) + if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { + if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) hns3_warn(hw, "Invalid operation! As current pvid set " "is %u, disable pvid %u is invalid", - pf->port_base_vlan_cfg.pvid, pvid); + hw->port_base_vlan_cfg.pvid, pvid); return 0; } @@ -632,19 +872,21 @@ hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) HNS3_PORT_BASE_VLAN_DISABLE; ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); if (ret) { - hns3_err(hw, "Failed to config tx vlan, ret =%d", ret); + hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", + ret); return ret; } - ret = hns3_en_rx_strip_all(hns, on); + ret = hns3_en_pvid_strip(hns, on); if (ret) { - hns3_err(hw, "Failed to config rx vlan strip, ret =%d", ret); + hns3_err(hw, "failed to config rx vlan strip for pvid, " + "ret = %d", ret); return ret; } if (pvid == HNS3_INVLID_PVID) goto out; - old_pvid = pf->port_base_vlan_cfg.pvid; + old_pvid = hw->port_base_vlan_cfg.pvid; ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid, old_pvid); if (ret) { @@ -663,22 +905,44 @@ hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + bool pvid_en_state_change; + uint16_t pvid_state; int ret; + if (pvid > RTE_ETHER_MAX_VLAN_ID) { + hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, + RTE_ETHER_MAX_VLAN_ID); + return -EINVAL; + } + + /* + * If PVID configuration state change, should refresh the PVID + * configuration state in struct hns3_tx_queue/hns3_rx_queue. + */ + pvid_state = hw->port_base_vlan_cfg.state; + if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || + (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) + pvid_en_state_change = false; + else + pvid_en_state_change = true; + rte_spinlock_lock(&hw->lock); ret = hns3_vlan_pvid_configure(hns, pvid, on); rte_spinlock_unlock(&hw->lock); - return ret; + if (ret) + return ret; + + if (pvid_en_state_change) + hns3_update_all_queues_pvid_state(hw); + + return 0; } static void init_port_base_vlan_info(struct hns3_hw *hw) { - struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); - struct hns3_pf *pf = &hns->pf; - - pf->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; - pf->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID; + hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; + hw->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID; } static int @@ -709,7 +973,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns) if (rte_atomic16_read(&hw->reset.resetting) == 0) init_port_base_vlan_info(hw); - ret = hns3_enable_vlan_filter(hns, true); + ret = hns3_vlan_filter_init(hns); if (ret) { hns3_err(hw, "vlan init fail in pf, ret =%d", ret); return ret; @@ -746,6 +1010,40 @@ hns3_init_vlan_config(struct hns3_adapter *hns) return hns3_default_vlan_config(hns); } +static int +hns3_restore_vlan_conf(struct hns3_adapter *hns) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + uint64_t offloads; + bool enable; + int ret; + + if (!hw->data->promiscuous) { + /* restore vlan filter states */ + offloads = hw->data->dev_conf.rxmode.offloads; + enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false; + ret = hns3_enable_vlan_filter(hns, enable); + if (ret) { + hns3_err(hw, "failed to restore vlan rx filter conf, " + "ret = %d", ret); + return ret; + } + } + + ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); + if (ret) { + hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); + return ret; + } + + ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); + if (ret) + hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); + + return ret; +} + static int hns3_dev_configure_vlan(struct rte_eth_dev *dev) { @@ -753,6 +1051,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev) struct rte_eth_dev_data *data = dev->data; struct rte_eth_txmode *txmode; struct hns3_hw *hw = &hns->hw; + int mask; int ret; txmode = &data->dev_conf.txmode; @@ -766,17 +1065,26 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev) txmode->hw_vlan_reject_untagged); /* Apply vlan offload setting */ - ret = hns3_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; + ret = hns3_vlan_offload_set(dev, mask); if (ret) { - hns3_err(hw, "dev config vlan Strip failed, ret =%d", ret); + hns3_err(hw, "dev config rx vlan offload failed, ret = %d", + ret); return ret; } + /* + * If pvid config is not set in rte_eth_conf, driver needn't to set + * VLAN pvid related configuration to hardware. + */ + if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) + return 0; + /* Apply pvid setting */ ret = hns3_vlan_pvid_set(dev, txmode->pvid, txmode->hw_vlan_insert_pvid); if (ret) - hns3_err(hw, "dev config vlan pvid(%d) failed, ret =%d", + hns3_err(hw, "dev config vlan pvid(%d) failed, ret = %d", txmode->pvid, ret); return ret; @@ -807,25 +1115,6 @@ hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, return hns3_cmd_send(hw, &desc, 1); } -int -hns3_config_gro(struct hns3_hw *hw, bool en) -{ - struct hns3_cfg_gro_status_cmd *req; - struct hns3_cmd_desc desc; - int ret; - - hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false); - req = (struct hns3_cfg_gro_status_cmd *)desc.data; - - req->gro_en = rte_cpu_to_le_16(en ? 1 : 0); - - ret = hns3_cmd_send(hw, &desc, 1); - if (ret) - hns3_err(hw, "GRO hardware config cmd failed, ret = %d", ret); - - return ret; -} - static int hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, uint16_t *allocated_size, bool is_alloc) @@ -1131,10 +1420,9 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vf_id 0. + * driver, just need to configure parameters for PF vport. */ - vf_id = 0; + vf_id = HNS3_PF_FUNC_ID; hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, HNS3_MAC_EPORT_VFID_S, vf_id); @@ -1175,21 +1463,83 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) return ret; } +static int +hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; + int ret; + int i; + + for (i = 0; i < hw->mc_addrs_num; i++) { + addr = &hw->mc_addrs[i]; + /* Check if there are duplicate addresses */ + if (rte_is_same_ether_addr(addr, mac_addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to add mc mac addr, same addrs" + "(%s) is added by the set_mc_mac_addr_list " + "API", mac_str); + return -EINVAL; + } + } + + ret = hns3_add_mc_addr(hw, mac_addr); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", + mac_str, ret); + } + return ret; +} + +static int +hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + ret = hns3_remove_mc_addr(hw, mac_addr); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", + mac_str, ret); + } + return ret; +} + static int hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, - uint32_t idx, __attribute__ ((unused)) uint32_t pool) + uint32_t idx, __rte_unused uint32_t pool) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; int ret; rte_spinlock_lock(&hw->lock); - ret = hns3_add_uc_addr_common(hw, mac_addr); + + /* + * In hns3 network engine adding UC and MC mac address with different + * commands with firmware. We need to determine whether the input + * address is a UC or a MC address to call different commands. + * By the way, it is recommended calling the API function named + * rte_eth_dev_set_mc_addr_list to set the MC mac address, because + * using the rte_eth_dev_mac_addr_add API function to set MC mac address + * may affect the specifications of UC mac addresses. + */ + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3_add_mc_addr_common(hw, mac_addr); + else + ret = hns3_add_uc_addr_common(hw, mac_addr); + if (ret) { rte_spinlock_unlock(&hw->lock); rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Failed to add mac addr(%s): %d", mac_str, ret); + hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, + ret); return ret; } @@ -1211,7 +1561,7 @@ hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) if (!rte_is_valid_assigned_ether_addr(mac_addr)) { rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Remove unicast mac addr err! addr(%s) invalid", + hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", mac_str); return -EINVAL; } @@ -1238,18 +1588,18 @@ hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) int ret; rte_spinlock_lock(&hw->lock); - ret = hns3_remove_uc_addr_common(hw, mac_addr); + + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3_remove_mc_addr_common(hw, mac_addr); + else + ret = hns3_remove_uc_addr_common(hw, mac_addr); + rte_spinlock_unlock(&hw->lock); if (ret) { - rte_spinlock_unlock(&hw->lock); rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Failed to remove mac addr(%s): %d", mac_str, ret); - return; + hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, + ret); } - - if (idx == 0) - hw->mac.default_addr_setted = false; - rte_spinlock_unlock(&hw->lock); } static int @@ -1263,15 +1613,10 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, bool rm_succes = false; int ret, ret_val; - /* check if mac addr is valid */ - if (!rte_is_valid_assigned_ether_addr(mac_addr)) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - mac_addr); - hns3_err(hw, "Failed to set mac addr, addr(%s) invalid", - mac_str); - return -EINVAL; - } - + /* + * It has been guaranteed that input parameter named mac_addr is valid + * address in the rte layer of DPDK framework. + */ oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; default_addr_setted = hw->mac.default_addr_setted; if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) @@ -1350,19 +1695,22 @@ hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { addr = &hw->data->mac_addrs[i]; - if (!rte_is_valid_assigned_ether_addr(addr)) + if (rte_is_zero_ether_addr(addr)) continue; - if (del) - ret = hns3_remove_uc_addr_common(hw, addr); + if (rte_is_multicast_ether_addr(addr)) + ret = del ? hns3_remove_mc_addr(hw, addr) : + hns3_add_mc_addr(hw, addr); else - ret = hns3_add_uc_addr_common(hw, addr); + ret = del ? hns3_remove_uc_addr_common(hw, addr) : + hns3_add_uc_addr_common(hw, addr); + if (ret) { err = ret; rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); - hns3_dbg(hw, - "Failed to %s mac addr(%s). ret:%d i:%d", - del ? "remove" : "restore", mac_str, ret, i); + hns3_err(hw, "failed to %s mac addr(%s) index:%d " + "ret = %d.", del ? "remove" : "restore", + mac_str, i, ret); } } return err; @@ -1409,7 +1757,7 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) if (!rte_is_multicast_ether_addr(mac_addr)) { rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Failed to add mc mac addr, addr(%s) invalid", + hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", mac_str); return -EINVAL; } @@ -1427,10 +1775,9 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vf_id 0. + * driver, just need to configure parameters for PF vport. */ - vf_id = 0; + vf_id = HNS3_PF_FUNC_ID; hns3_update_desc_vfid(desc, vf_id, false); ret = hns3_add_mac_vlan_tbl(hw, &req, desc); if (ret) { @@ -1438,7 +1785,7 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) hns3_err(hw, "mc mac vlan table is full"); rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Failed to add mc mac addr(%s): %d", mac_str, ret); + hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); } return ret; @@ -1470,10 +1817,9 @@ hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) /* * This mac addr exist, remove this handle's VFID for it. * In current version VF is not supported when PF is driven by - * DPDK driver, the PF-related vf_id is 0, just need to - * configure parameters for vf_id 0. + * DPDK driver, just need to configure parameters for PF vport. */ - vf_id = 0; + vf_id = HNS3_PF_FUNC_ID; hns3_update_desc_vfid(desc, vf_id, true); /* All the vfid is zero, so need to delete this entry */ @@ -1503,7 +1849,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, uint32_t j; if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { - hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) " + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) " "invalid. valid range: 0~%d", nb_mc_addr, HNS3_MC_MACADDR_NUM); return -EINVAL; @@ -1516,7 +1862,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, - "Failed to set mc mac addr, addr(%s) invalid.", + "failed to set mc mac addr, addr(%s) invalid.", mac_str); return -EINVAL; } @@ -1527,12 +1873,30 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); - hns3_err(hw, "Failed to set mc mac addr, " + hns3_err(hw, "failed to set mc mac addr, " "addrs invalid. two same addrs(%s).", mac_str); return -EINVAL; } } + + /* + * Check if there are duplicate addresses between mac_addrs + * and mc_addr_set + */ + for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { + if (rte_is_same_ether_addr(addr, + &hw->data->mac_addrs[j])) { + rte_ether_format_addr(mac_str, + RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to set mc mac addr, " + "addrs invalid. addrs(%s) has already " + "configured in mac_addr add API", + mac_str); + return -EINVAL; + } + } } return 0; @@ -1699,8 +2063,8 @@ hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) err = ret; rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); - hns3_dbg(hw, "%s mc mac addr: %s failed", - del ? "Remove" : "Restore", mac_str); + hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", + del ? "Remove" : "Restore", mac_str, ret); } } return err; @@ -1799,36 +2163,142 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev) } static int -hns3_dev_configure(struct rte_eth_dev *dev) +hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, + enum hns3_ring_type queue_type, uint16_t queue_id) { - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct hns3_rss_conf *rss_cfg = &hw->rss_info; - struct rte_eth_conf *conf = &dev->data->dev_conf; - enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; - uint16_t nb_rx_q = dev->data->nb_rx_queues; - uint16_t nb_tx_q = dev->data->nb_tx_queues; - struct rte_eth_rss_conf rss_conf; - uint16_t mtu; - int ret; + struct hns3_cmd_desc desc; + struct hns3_ctrl_vector_chain_cmd *req = + (struct hns3_ctrl_vector_chain_cmd *)desc.data; + enum hns3_cmd_status status; + enum hns3_opcode_type op; + uint16_t tqp_type_and_id = 0; + const char *op_str; + uint16_t type; + uint16_t gl; + + op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; + hns3_cmd_setup_basic_desc(&desc, op, false); + req->int_vector_id = vector_id; + + if (queue_type == HNS3_RING_TYPE_RX) + gl = HNS3_RING_GL_RX; + else + gl = HNS3_RING_GL_TX; + + type = queue_type; + + hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, + type); + hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); + hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, + gl); + req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); + req->int_cause_num = 1; + op_str = mmap ? "Map" : "Unmap"; + status = hns3_cmd_send(hw, &desc, 1); + if (status) { + hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.", + op_str, queue_id, req->int_vector_id, status); + return status; + } + + return 0; +} + +static int +hns3_init_ring_with_vector(struct hns3_hw *hw) +{ + uint8_t vec; + int ret; + int i; /* - * Hardware does not support where the number of rx and tx queues is - * not equal in hip08. + * In hns3 network engine, vector 0 is always the misc interrupt of this + * function, vector 1~N can be used respectively for the queues of the + * function. Tx and Rx queues with the same number share the interrupt + * vector. In the initialization clearing the all hardware mapping + * relationship configurations between queues and interrupt vectors is + * needed, so some error caused by the residual configurations, such as + * the unexpected Tx interrupt, can be avoid. Because of the hardware + * constraints in hns3 hardware engine, we have to implement clearing + * the mapping relationship configurations by binding all queues to the + * last interrupt vector and reserving the last interrupt vector. This + * method results in a decrease of the maximum queues when upper + * applications call the rte_eth_dev_configure API function to enable + * Rx interrupt. */ - if (nb_rx_q != nb_tx_q) { - hns3_err(hw, - "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! " - "Hardware does not support this configuration!", - nb_rx_q, nb_tx_q); - return -EINVAL; + vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ + /* vec - 1: the last interrupt is reserved */ + hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1; + for (i = 0; i < hw->intr_tqps_num; i++) { + /* + * Set gap limiter and rate limiter configuration of queue's + * interrupt. + */ + hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, + HNS3_TQP_INTR_GL_DEFAULT); + hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, + HNS3_TQP_INTR_GL_DEFAULT); + hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); + + ret = hns3_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_TX, i); + if (ret) { + PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " + "vector: %d, ret=%d", i, vec, ret); + return ret; + } + + ret = hns3_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_RX, i); + if (ret) { + PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " + "vector: %d, ret=%d", i, vec, ret); + return ret; + } + } + + return 0; +} + +static int +hns3_dev_configure(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct rte_eth_conf *conf = &dev->data->dev_conf; + enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + struct rte_eth_rss_conf rss_conf; + uint16_t mtu; + bool gro_en; + int ret; + + /* + * Hardware does not support individually enable/disable/reset the Tx or + * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx + * and Rx queues at the same time. When the numbers of Tx queues + * allocated by upper applications are not equal to the numbers of Rx + * queues, driver needs to setup fake Tx or Rx queues to adjust numbers + * of Tx/Rx queues. otherwise, network engine can not work as usual. But + * these fake queues are imperceptible, and can not be used by upper + * applications. + */ + ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); + if (ret) { + hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret); + return ret; } + hw->adapter_state = HNS3_NIC_CONFIGURING; if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { hns3_err(hw, "setting link speed/duplex not supported"); - return -EINVAL; + ret = -EINVAL; + goto cfg_err; } - hw->adapter_state = HNS3_NIC_CONFIGURING; if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) { ret = hns3_check_dcb_cfg(dev); if (ret) @@ -1837,6 +2307,7 @@ hns3_dev_configure(struct rte_eth_dev *dev) /* When RSS is not configured, redirect the packet queue 0 */ if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { + conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; rss_conf = conf->rx_adv_conf.rss_conf; if (rss_conf.rss_key == NULL) { rss_conf.rss_key = rss_cfg->key; @@ -1869,12 +2340,20 @@ hns3_dev_configure(struct rte_eth_dev *dev) if (ret) goto cfg_err; + /* config hardware GRO */ + gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; + ret = hns3_config_gro(hw, gro_en); + if (ret) + goto cfg_err; + hw->adapter_state = HNS3_NIC_CONFIGURED; return 0; cfg_err: + (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); hw->adapter_state = HNS3_NIC_INITIALIZED; + return ret; } @@ -1888,7 +2367,7 @@ hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) req = (struct hns3_config_max_frm_size_cmd *)desc.data; req->max_frm_size = rte_cpu_to_le_16(new_mps); - req->min_frm_size = HNS3_MIN_FRAME_LEN; + req->min_frm_size = RTE_ETHER_MIN_LEN; return hns3_cmd_send(hw, &desc, 1); } @@ -1905,12 +2384,10 @@ hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) } ret = hns3_buffer_alloc(hw); - if (ret) { + if (ret) hns3_err(hw, "Failed to allocate buffer, ret = %d", ret); - return ret; - } - return 0; + return ret; } static int @@ -1961,13 +2438,22 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) { struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + uint16_t queue_num = hw->tqps_num; + + /* + * In interrupt mode, 'max_rx_queues' is set based on the number of + * MSI-X interrupt resources of the hardware. + */ + if (hw->data->dev_conf.intr_conf.rxq == 1) + queue_num = hw->intr_tqps_num; - info->max_rx_queues = hw->tqps_num; + info->max_rx_queues = queue_num; info->max_tx_queues = hw->tqps_num; info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ info->min_rx_bufsize = hw->rx_buf_len; info->max_mac_addrs = HNS3_UC_MACADDR_NUM; info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; + info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -1977,20 +2463,23 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) DEV_RX_OFFLOAD_KEEP_CRC | DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_QINQ_STRIP | DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_VLAN_EXTEND | - DEV_RX_OFFLOAD_JUMBO_FRAME); + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH | + DEV_RX_OFFLOAD_TCP_LRO); info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_QINQ_INSERT | DEV_TX_OFFLOAD_MULTI_SEGS | - info->tx_queue_offload_capa); + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + info->tx_queue_offload_capa | + hns3_txvlan_cap_get(hw)); info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = HNS3_MAX_RING_DESC, @@ -2026,9 +2515,18 @@ hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, { struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + uint32_t version = hw->fw_version; int ret; - ret = snprintf(fw_version, fw_size, "0x%08x", hw->fw_version); + ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", + hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, + HNS3_FW_VERSION_BYTE3_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, + HNS3_FW_VERSION_BYTE2_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, + HNS3_FW_VERSION_BYTE1_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, + HNS3_FW_VERSION_BYTE0_S)); ret += 1; /* add the size of '\0' */ if (fw_size < (uint32_t)ret) return ret; @@ -2045,6 +2543,11 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, struct hns3_mac *mac = &hw->mac; struct rte_eth_link new_link; + if (!hns3_is_reset_pending(hns)) { + hns3_update_speed_duplex(eth_dev); + hns3_update_link_status(hw); + } + memset(&new_link, 0, sizeof(new_link)); switch (mac->link_speed) { case ETH_SPEED_NUM_10M: @@ -2135,6 +2638,7 @@ hns3_query_pf_resource(struct hns3_hw *hw) hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num); pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC); + pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); if (req->tx_buf_size) pf->tx_buf_size = @@ -2154,7 +2658,7 @@ hns3_query_pf_resource(struct hns3_hw *hw) hw->num_msi = hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number), - HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); + HNS3_VEC_NUM_M, HNS3_VEC_NUM_S); return 0; } @@ -2290,6 +2794,36 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed) return 0; } +static int +hns3_get_capability(struct hns3_hw *hw) +{ + struct rte_pci_device *pci_dev; + struct rte_eth_dev *eth_dev; + uint16_t device_id; + uint8_t revision; + int ret; + + eth_dev = &rte_eth_devices[hw->data->port_id]; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + device_id = pci_dev->id.device_id; + + if (device_id == HNS3_DEV_ID_25GE_RDMA || + device_id == HNS3_DEV_ID_50GE_RDMA || + device_id == HNS3_DEV_ID_100G_RDMA_MACSEC) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); + + /* Get PCI revision id */ + ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, + HNS3_PCI_REVISION_ID); + if (ret != HNS3_PCI_REVISION_ID_LEN) { + PMD_INIT_LOG(ERR, "failed to read pci revision id: %d", ret); + return -EIO; + } + hw->revision = revision; + + return 0; +} + static int hns3_get_board_configuration(struct hns3_hw *hw) { @@ -2311,6 +2845,7 @@ hns3_get_board_configuration(struct hns3_hw *hw) hw->mac.media_type = cfg.media_type; hw->rss_size_max = cfg.rss_size_max; + hw->rss_dis_flag = false; hw->rx_buf_len = cfg.rx_buf_len; memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); hw->mac.phy_addr = cfg.phy_addr; @@ -2364,6 +2899,13 @@ hns3_get_configuration(struct hns3_hw *hw) return ret; } + /* Get device capability */ + ret = hns3_get_capability(hw); + if (ret) { + PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); + return ret; + } + /* Get pf resource */ ret = hns3_query_pf_resource(hw); if (ret) { @@ -2372,12 +2914,10 @@ hns3_get_configuration(struct hns3_hw *hw) } ret = hns3_get_board_configuration(hw); - if (ret) { + if (ret) PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret); - return ret; - } - return 0; + return ret; } static int @@ -2411,6 +2951,7 @@ hns3_map_tqp(struct hns3_hw *hw) uint16_t tqps_num = hw->total_tqps_num; uint16_t func_id; uint16_t tqp_id; + bool is_pf; int num; int ret; int i; @@ -2421,11 +2962,12 @@ hns3_map_tqp(struct hns3_hw *hw) */ tqp_id = 0; num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC); - for (func_id = 0; func_id < num; func_id++) { + for (func_id = HNS3_PF_FUNC_ID; func_id < num; func_id++) { + is_pf = func_id == HNS3_PF_FUNC_ID ? true : false; for (i = 0; i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) { ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i, - true); + is_pf); if (ret) return ret; } @@ -3192,6 +3734,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) "add mac ethertype failed for undefined, code=%d.", resp_code); return_status = -EIO; + break; } return return_status; @@ -3298,7 +3841,7 @@ hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) ret = hns3_cmd_send(hw, &desc, 1); if (ret) - PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret); + PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); return ret; } @@ -3309,36 +3852,114 @@ hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) struct hns3_promisc_param param; bool en_bc_pmc = true; uint8_t vf_id; - int ret; /* * In current version VF is not supported when PF is driven by DPDK - * driver, the PF-related vf_id is 0, just need to configure parameters - * for vf_id 0. + * driver, just need to configure parameters for PF vport. */ - vf_id = 0; + vf_id = HNS3_PF_FUNC_ID; hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); - ret = hns3_cmd_set_promisc_mode(hw, ¶m); - if (ret) + return hns3_cmd_set_promisc_mode(hw, ¶m); +} + +static int +hns3_promisc_init(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + struct hns3_promisc_param param; + uint16_t func_id; + int ret; + + ret = hns3_set_promisc_mode(hw, false, false); + if (ret) { + PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); return ret; + } + + /* + * In current version VFs are not supported when PF is driven by DPDK + * driver. After PF has been taken over by DPDK, the original VF will + * be invalid. So, there is a possibility of entry residues. It should + * clear VFs's promisc mode to avoid unnecessary bandwidth usage + * during init. + */ + for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { + hns3_promisc_param_init(¶m, false, false, false, func_id); + ret = hns3_cmd_set_promisc_mode(hw, ¶m); + if (ret) { + PMD_INIT_LOG(ERR, "failed to clear vf:%d promisc mode," + " ret = %d", func_id, ret); + return ret; + } + } return 0; } +static void +hns3_promisc_uninit(struct hns3_hw *hw) +{ + struct hns3_promisc_param param; + uint16_t func_id; + int ret; + + func_id = HNS3_PF_FUNC_ID; + + /* + * In current version VFs are not supported when PF is driven by + * DPDK driver, and VFs' promisc mode status has been cleared during + * init and their status will not change. So just clear PF's promisc + * mode status during uninit. + */ + hns3_promisc_param_init(¶m, false, false, false, func_id); + ret = hns3_cmd_set_promisc_mode(hw, ¶m); + if (ret) + PMD_INIT_LOG(ERR, "failed to clear promisc status during" + " uninit, ret = %d", ret); +} + static int hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) { + bool allmulti = dev->data->all_multicast ? true : false; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false; - int ret = 0; + uint64_t offloads; + int err; + int ret; rte_spinlock_lock(&hw->lock); - ret = hns3_set_promisc_mode(hw, true, en_mc_pmc); + ret = hns3_set_promisc_mode(hw, true, true); + if (ret) { + rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "failed to enable promiscuous mode, ret = %d", + ret); + return ret; + } + + /* + * When promiscuous mode was enabled, disable the vlan filter to let + * all packets coming in in the receiving direction. + */ + offloads = dev->data->dev_conf.rxmode.offloads; + if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + ret = hns3_enable_vlan_filter(hns, false); + if (ret) { + hns3_err(hw, "failed to enable promiscuous mode due to " + "failure to disable vlan filter, ret = %d", + ret); + err = hns3_set_promisc_mode(hw, false, allmulti); + if (err) + hns3_err(hw, "failed to restore promiscuous " + "status after disable vlan filter " + "failed during enabling promiscuous " + "mode, ret = %d", ret); + } + } + rte_spinlock_unlock(&hw->lock); - if (ret) - hns3_err(hw, "Failed to enable promiscuous mode: %d", ret); return ret; } @@ -3346,17 +3967,39 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) static int hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) { + bool allmulti = dev->data->all_multicast ? true : false; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false; - int ret = 0; + uint64_t offloads; + int err; + int ret; /* If now in all_multicast mode, must remain in all_multicast mode. */ rte_spinlock_lock(&hw->lock); - ret = hns3_set_promisc_mode(hw, false, en_mc_pmc); + ret = hns3_set_promisc_mode(hw, false, allmulti); + if (ret) { + rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "failed to disable promiscuous mode, ret = %d", + ret); + return ret; + } + /* when promiscuous mode was disabled, restore the vlan filter status */ + offloads = dev->data->dev_conf.rxmode.offloads; + if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + ret = hns3_enable_vlan_filter(hns, true); + if (ret) { + hns3_err(hw, "failed to disable promiscuous mode due to" + " failure to restore vlan filter, ret = %d", + ret); + err = hns3_set_promisc_mode(hw, true, true); + if (err) + hns3_err(hw, "failed to restore promiscuous " + "status after enabling vlan filter " + "failed during disabling promiscuous " + "mode, ret = %d", ret); + } + } rte_spinlock_unlock(&hw->lock); - if (ret) - hns3_err(hw, "Failed to disable promiscuous mode: %d", ret); return ret; } @@ -3366,14 +4009,17 @@ hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false; - int ret = 0; + int ret; + + if (dev->data->promiscuous) + return 0; rte_spinlock_lock(&hw->lock); - ret = hns3_set_promisc_mode(hw, en_uc_pmc, true); + ret = hns3_set_promisc_mode(hw, false, true); rte_spinlock_unlock(&hw->lock); if (ret) - hns3_err(hw, "Failed to enable allmulticast mode: %d", ret); + hns3_err(hw, "failed to enable allmulticast mode, ret = %d", + ret); return ret; } @@ -3383,22 +4029,44 @@ hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false; - int ret = 0; + int ret; /* If now in promiscuous mode, must remain in all_multicast mode. */ - if (dev->data->promiscuous == 1) + if (dev->data->promiscuous) return 0; rte_spinlock_lock(&hw->lock); - ret = hns3_set_promisc_mode(hw, en_uc_pmc, false); + ret = hns3_set_promisc_mode(hw, false, false); rte_spinlock_unlock(&hw->lock); if (ret) - hns3_err(hw, "Failed to disable allmulticast mode: %d", ret); + hns3_err(hw, "failed to disable allmulticast mode, ret = %d", + ret); return ret; } +static int +hns3_dev_promisc_restore(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + bool allmulti = hw->data->all_multicast ? true : false; + int ret; + + if (hw->data->promiscuous) { + ret = hns3_set_promisc_mode(hw, true, true); + if (ret) + hns3_err(hw, "failed to restore promiscuous mode, " + "ret = %d", ret); + return ret; + } + + ret = hns3_set_promisc_mode(hw, false, allmulti); + if (ret) + hns3_err(hw, "failed to restore allmulticast mode, ret = %d", + ret); + return ret; +} + static int hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed) { @@ -3527,7 +4195,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw) ret = hns3_cmd_send(hw, &desc, 1); if (ret) { hns3_err(hw, "get link status cmd failed %d", ret); - return ret; + return ETH_LINK_DOWN; } req = (struct hns3_link_status_cmd *)desc.data; @@ -3536,14 +4204,16 @@ hns3_get_mac_link_status(struct hns3_hw *hw) return !!link_status; } -static void +void hns3_update_link_status(struct hns3_hw *hw) { int state; state = hns3_get_mac_link_status(hw); - if (state != hw->mac.link_status) + if (state != hw->mac.link_status) { hw->mac.link_status = state; + hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); + } } static void @@ -3553,8 +4223,11 @@ hns3_service_handler(void *param) struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - hns3_update_speed_duplex(eth_dev); - hns3_update_link_status(hw); + if (!hns3_is_reset_pending(hns)) { + hns3_update_speed_duplex(eth_dev); + hns3_update_link_status(hw); + } else + hns3_warn(hw, "Cancel the query when reset is pending"); rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); } @@ -3589,9 +4262,10 @@ hns3_init_hardware(struct hns3_adapter *hns) goto err_mac_init; } - ret = hns3_set_promisc_mode(hw, false, false); + ret = hns3_promisc_init(hw); if (ret) { - PMD_INIT_LOG(ERR, "Failed to set promisc mode: %d", ret); + PMD_INIT_LOG(ERR, "Failed to init promisc: %d", + ret); goto err_mac_init; } @@ -3624,6 +4298,19 @@ hns3_init_hardware(struct hns3_adapter *hns) PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); goto err_mac_init; } + + /* + * In the initialization clearing the all hardware mapping relationship + * configurations between queues and interrupt vectors is needed, so + * some error caused by the residual configurations, such as the + * unexpected interrupt, can be avoid. + */ + ret = hns3_init_ring_with_vector(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); + goto err_mac_init; + } + return 0; err_mac_init: @@ -3652,6 +4339,8 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_cmd_init_queue; } + hns3_clear_all_event_cause(hw); + /* Firmware command initialize */ ret = hns3_cmd_init(hw); if (ret) { @@ -3659,6 +4348,18 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_cmd_init; } + ret = rte_intr_callback_register(&pci_dev->intr_handle, + hns3_interrupt_handler, + eth_dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); + goto err_intr_callback_register; + } + + /* Enable interrupt */ + rte_intr_enable(&pci_dev->intr_handle); + hns3_pf_enable_irq0(hw); + /* Get configuration */ ret = hns3_get_configuration(hw); if (ret) { @@ -3681,17 +4382,29 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) hns3_set_default_rss_args(hw); + ret = hns3_enable_hw_error_intr(hns, true); + if (ret) { + PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", + ret); + goto err_fdir; + } + return 0; +err_fdir: + hns3_fdir_filter_uninit(hns); err_hw_init: hns3_uninit_umv_space(hw); err_get_config: - hns3_cmd_uninit(hw); - + hns3_pf_disable_irq0(hw); + rte_intr_disable(&pci_dev->intr_handle); + hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, + eth_dev); +err_intr_callback_register: err_cmd_init: + hns3_cmd_uninit(hw); hns3_cmd_destroy_queue(hw); - err_cmd_init_queue: hw->io_base = NULL; @@ -3702,13 +4415,22 @@ static void hns3_uninit_pf(struct rte_eth_dev *eth_dev) { struct hns3_adapter *hns = eth_dev->data->dev_private; + struct rte_device *dev = eth_dev->device; + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); struct hns3_hw *hw = &hns->hw; PMD_INIT_FUNC_TRACE(); + hns3_enable_hw_error_intr(hns, false); hns3_rss_uninit(hns); + (void)hns3_config_gro(hw, false); + hns3_promisc_uninit(hw); hns3_fdir_filter_uninit(hns); hns3_uninit_umv_space(hw); + hns3_pf_disable_irq0(hw); + rte_intr_disable(&pci_dev->intr_handle); + hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, + eth_dev); hns3_cmd_uninit(hw); hns3_cmd_destroy_queue(hw); hw->io_base = NULL; @@ -3745,13 +4467,115 @@ err_config_mac_mode: } static int -hns3_dev_start(struct rte_eth_dev *eth_dev) +hns3_map_rx_interrupt(struct rte_eth_dev *dev) { - struct hns3_adapter *hns = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; + uint32_t intr_vector; + uint16_t q_id; + int ret; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return 0; + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + /* check and configure queue intr-vector mapping */ + if (rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) { + intr_vector = hw->used_rx_queues; + /* creates event fd for each intr vector when MSIX is used */ + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL; + } + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + hw->used_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + hns3_err(hw, "Failed to allocate %d rx_queues" + " intr_vec", hw->used_rx_queues); + ret = -ENOMEM; + goto alloc_intr_vec_error; + } + } + + if (rte_intr_allow_others(intr_handle)) { + vec = RTE_INTR_VEC_RXTX_OFFSET; + base = RTE_INTR_VEC_RXTX_OFFSET; + } + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3_bind_ring_with_vector(hw, vec, true, + HNS3_RING_TYPE_RX, + q_id); + if (ret) + goto bind_vector_error; + intr_handle->intr_vec[q_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + rte_intr_enable(intr_handle); + return 0; + +bind_vector_error: + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + return ret; +alloc_intr_vec_error: + rte_intr_efd_disable(intr_handle); + return ret; +} + +static int +hns3_restore_rx_interrupt(struct hns3_hw *hw) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint16_t q_id; + int ret; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return 0; + + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3_bind_ring_with_vector(hw, + intr_handle->intr_vec[q_id], true, + HNS3_RING_TYPE_RX, q_id); + if (ret) + return ret; + } + } + + return 0; +} + +static void +hns3_restore_filter(struct rte_eth_dev *dev) +{ + hns3_restore_rss_filter(dev); +} + +static int +hns3_dev_start(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; int ret; PMD_INIT_FUNC_TRACE(); + if (rte_atomic16_read(&hw->reset.resetting)) + return -EBUSY; rte_spinlock_lock(&hw->lock); hw->adapter_state = HNS3_NIC_STARTING; @@ -3762,10 +4586,29 @@ hns3_dev_start(struct rte_eth_dev *eth_dev) rte_spinlock_unlock(&hw->lock); return ret; } + ret = hns3_map_rx_interrupt(dev); + if (ret) { + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + return ret; + } hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); - hns3_set_rxtx_function(eth_dev); + + hns3_set_rxtx_function(dev); + hns3_mp_req_start_rxtx(dev); + rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); + + hns3_restore_filter(dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * When finished the initialization, enable queues to receive/transmit + * packets. + */ + hns3_enable_all_queues(hw, true); hns3_info(hw, "hns3 dev start successful!"); return 0; @@ -3783,28 +4626,75 @@ hns3_do_stop(struct hns3_adapter *hns) return ret; hw->mac.link_status = ETH_LINK_DOWN; - hns3_configure_all_mac_addr(hns, true); - reset_queue = true; + if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { + hns3_configure_all_mac_addr(hns, true); + reset_queue = true; + } else + reset_queue = false; hw->mac.default_addr_setted = false; return hns3_stop_queues(hns, reset_queue); } static void -hns3_dev_stop(struct rte_eth_dev *eth_dev) +hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) { - struct hns3_adapter *hns = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; + uint16_t q_id; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return; + + /* unmap the ring with vector */ + if (rte_intr_allow_others(intr_handle)) { + vec = RTE_INTR_VEC_RXTX_OFFSET; + base = RTE_INTR_VEC_RXTX_OFFSET; + } + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + (void)hns3_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_RX, + q_id); + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void +hns3_dev_stop(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; PMD_INIT_FUNC_TRACE(); hw->adapter_state = HNS3_NIC_STOPPING; - hns3_set_rxtx_function(eth_dev); + hns3_set_rxtx_function(dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(dev); + /* Prevent crashes when queues are still in use. */ + rte_delay_ms(hw->tqps_num); rte_spinlock_lock(&hw->lock); - - hns3_do_stop(hns); - hns3_dev_release_mbufs(hns); - hw->adapter_state = HNS3_NIC_CONFIGURED; + if (rte_atomic16_read(&hw->reset.resetting) == 0) { + hns3_do_stop(hns); + hns3_unmap_rx_interrupt(dev); + hns3_dev_release_mbufs(hns); + hw->adapter_state = HNS3_NIC_CONFIGURED; + } + rte_eal_alarm_cancel(hns3_service_handler, dev); rte_spinlock_unlock(&hw->lock); } @@ -3814,20 +4704,28 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + return; + } + if (hw->adapter_state == HNS3_NIC_STARTED) hns3_dev_stop(eth_dev); hw->adapter_state = HNS3_NIC_CLOSING; - rte_eal_alarm_cancel(hns3_service_handler, eth_dev); + hns3_reset_abort(hns); + hw->adapter_state = HNS3_NIC_CLOSED; hns3_configure_all_mc_mac_addr(hns, true); hns3_remove_all_vlan_table(hns); hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); hns3_uninit_pf(eth_dev); hns3_free_all_queues(eth_dev); + rte_free(hw->reset.wait_data); rte_free(eth_dev->process_private); eth_dev->process_private = NULL; - hw->adapter_state = HNS3_NIC_CLOSED; + hns3_mp_uninit_primary(); hns3_warn(hw, "Close port %d finished", hw->data->port_id); } @@ -4002,21 +4900,446 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) for (i = 0; i < dcb_info->nb_tcs; i++) dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; - for (i = 0; i < HNS3_MAX_TC_NUM; i++) { - dcb_info->tc_queue.tc_rxq[0][i].base = - hw->tc_queue[i].tqp_offset; + for (i = 0; i < hw->num_tc; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; dcb_info->tc_queue.tc_txq[0][i].base = - hw->tc_queue[i].tqp_offset; - dcb_info->tc_queue.tc_rxq[0][i].nb_queue = - hw->tc_queue[i].tqp_count; + hw->tc_queue[i].tqp_offset; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; dcb_info->tc_queue.tc_txq[0][i].nb_queue = - hw->tc_queue[i].tqp_count; + hw->tc_queue[i].tqp_count; + } + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static int +hns3_reinit_dev(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_cmd_init(hw); + if (ret) { + hns3_err(hw, "Failed to init cmd: %d", ret); + return ret; + } + + ret = hns3_reset_all_queues(hns); + if (ret) { + hns3_err(hw, "Failed to reset all queues: %d", ret); + return ret; + } + + ret = hns3_init_hardware(hns); + if (ret) { + hns3_err(hw, "Failed to init hardware: %d", ret); + return ret; + } + + ret = hns3_enable_hw_error_intr(hns, true); + if (ret) { + hns3_err(hw, "fail to enable hw error interrupts: %d", + ret); + return ret; + } + hns3_info(hw, "Reset done, driver initialization finished."); + + return 0; +} + +static bool +is_pf_reset_done(struct hns3_hw *hw) +{ + uint32_t val, reg, reg_bit; + + switch (hw->reset.level) { + case HNS3_IMP_RESET: + reg = HNS3_GLOBAL_RESET_REG; + reg_bit = HNS3_IMP_RESET_BIT; + break; + case HNS3_GLOBAL_RESET: + reg = HNS3_GLOBAL_RESET_REG; + reg_bit = HNS3_GLOBAL_RESET_BIT; + break; + case HNS3_FUNC_RESET: + reg = HNS3_FUN_RST_ING; + reg_bit = HNS3_FUN_RST_ING_B; + break; + case HNS3_FLR_RESET: + default: + hns3_err(hw, "Wait for unsupported reset level: %d", + hw->reset.level); + return true; + } + val = hns3_read_dev(hw, reg); + if (hns3_get_bit(val, reg_bit)) + return false; + else + return true; +} + +bool +hns3_is_reset_pending(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + enum hns3_reset_level reset; + + hns3_check_event_cause(hns, NULL); + reset = hns3_get_reset_level(hns, &hw->reset.pending); + if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { + hns3_warn(hw, "High level reset %d is pending", reset); + return true; + } + reset = hns3_get_reset_level(hns, &hw->reset.request); + if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { + hns3_warn(hw, "High level reset %d is request", reset); + return true; + } + return false; +} + +static int +hns3_wait_hardware_ready(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_wait_data *wait_data = hw->reset.wait_data; + struct timeval tv; + + if (wait_data->result == HNS3_WAIT_SUCCESS) + return 0; + else if (wait_data->result == HNS3_WAIT_TIMEOUT) { + gettimeofday(&tv, NULL); + hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + return -ETIME; + } else if (wait_data->result == HNS3_WAIT_REQUEST) + return -EAGAIN; + + wait_data->hns = hns; + wait_data->check_completion = is_pf_reset_done; + wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * + HNS3_RESET_WAIT_MS + get_timeofday_ms(); + wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; + wait_data->count = HNS3_RESET_WAIT_CNT; + wait_data->result = HNS3_WAIT_REQUEST; + rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); + return -EAGAIN; +} + +static int +hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) +{ + struct hns3_cmd_desc desc; + struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); + hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); + req->fun_reset_vfid = func_id; + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_imp_reset_cmd(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); + desc.data[0] = 0xeedd; + + return hns3_cmd_send(hw, &desc, 1); +} + +static void +hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) +{ + struct hns3_hw *hw = &hns->hw; + struct timeval tv; + uint32_t val; + + gettimeofday(&tv, NULL); + if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || + hns3_read_dev(hw, HNS3_FUN_RST_ING)) { + hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + return; + } + + switch (reset_level) { + case HNS3_IMP_RESET: + hns3_imp_reset_cmd(hw); + hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + break; + case HNS3_GLOBAL_RESET: + val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); + hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); + hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); + hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + break; + case HNS3_FUNC_RESET: + hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + /* schedule again to check later */ + hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); + hns3_schedule_reset(hns); + break; + default: + hns3_warn(hw, "Unsupported reset level: %d", reset_level); + return; + } + hns3_atomic_clear_bit(reset_level, &hw->reset.request); +} + +static enum hns3_reset_level +hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) +{ + struct hns3_hw *hw = &hns->hw; + enum hns3_reset_level reset_level = HNS3_NONE_RESET; + + /* Return the highest priority reset level amongst all */ + if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) + reset_level = HNS3_IMP_RESET; + else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) + reset_level = HNS3_GLOBAL_RESET; + else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) + reset_level = HNS3_FUNC_RESET; + else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) + reset_level = HNS3_FLR_RESET; + + if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) + return HNS3_NONE_RESET; + + return reset_level; +} + +static int +hns3_prepare_reset(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + uint32_t reg_val; + int ret; + + switch (hw->reset.level) { + case HNS3_FUNC_RESET: + ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); + if (ret) + return ret; + + /* + * After performaning pf reset, it is not necessary to do the + * mailbox handling or send any command to firmware, because + * any mailbox handling or command to firmware is only valid + * after hns3_cmd_init is called. + */ + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hw->reset.stats.request_cnt++; + break; + case HNS3_IMP_RESET: + reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); + hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | + BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); + break; + default: + break; } + return 0; +} + +static int +hns3_set_rst_done(struct hns3_hw *hw) +{ + struct hns3_pf_rst_done_cmd *req; + struct hns3_cmd_desc desc; + + req = (struct hns3_pf_rst_done_cmd *)desc.data; + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); + req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_stop_service(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct rte_eth_dev *eth_dev; + + eth_dev = &rte_eth_devices[hw->data->port_id]; + if (hw->adapter_state == HNS3_NIC_STARTED) + rte_eal_alarm_cancel(hns3_service_handler, eth_dev); + hw->mac.link_status = ETH_LINK_DOWN; + + hns3_set_rxtx_function(eth_dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(eth_dev); + rte_delay_ms(hw->tqps_num); + + rte_spinlock_lock(&hw->lock); + if (hns->hw.adapter_state == HNS3_NIC_STARTED || + hw->adapter_state == HNS3_NIC_STOPPING) { + hns3_do_stop(hns); + hw->reset.mbuf_deferred_free = true; + } else + hw->reset.mbuf_deferred_free = false; + + /* + * It is cumbersome for hardware to pick-and-choose entries for deletion + * from table space. Hence, for function reset software intervention is + * required to delete the entries + */ + if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) + hns3_configure_all_mc_mac_addr(hns, true); rte_spinlock_unlock(&hw->lock); return 0; } +static int +hns3_start_service(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct rte_eth_dev *eth_dev; + + if (hw->reset.level == HNS3_IMP_RESET || + hw->reset.level == HNS3_GLOBAL_RESET) + hns3_set_rst_done(hw); + eth_dev = &rte_eth_devices[hw->data->port_id]; + hns3_set_rxtx_function(eth_dev); + hns3_mp_req_start_rxtx(eth_dev); + if (hw->adapter_state == HNS3_NIC_STARTED) { + hns3_service_handler(eth_dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * When finished the initialization, enable queues to receive + * and transmit packets. + */ + hns3_enable_all_queues(hw, true); + } + + return 0; +} + +static int +hns3_restore_conf(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_configure_all_mac_addr(hns, false); + if (ret) + return ret; + + ret = hns3_configure_all_mc_mac_addr(hns, false); + if (ret) + goto err_mc_mac; + + ret = hns3_dev_promisc_restore(hns); + if (ret) + goto err_promisc; + + ret = hns3_restore_vlan_table(hns); + if (ret) + goto err_promisc; + + ret = hns3_restore_vlan_conf(hns); + if (ret) + goto err_promisc; + + ret = hns3_restore_all_fdir_filter(hns); + if (ret) + goto err_promisc; + + ret = hns3_restore_rx_interrupt(hw); + if (ret) + goto err_promisc; + + ret = hns3_restore_gro_conf(hw); + if (ret) + goto err_promisc; + + if (hns->hw.adapter_state == HNS3_NIC_STARTED) { + ret = hns3_do_start(hns, false); + if (ret) + goto err_promisc; + hns3_info(hw, "hns3 dev restart successful!"); + } else if (hw->adapter_state == HNS3_NIC_STOPPING) + hw->adapter_state = HNS3_NIC_CONFIGURED; + return 0; + +err_promisc: + hns3_configure_all_mc_mac_addr(hns, true); +err_mc_mac: + hns3_configure_all_mac_addr(hns, true); + return ret; +} + +static void +hns3_reset_service(void *param) +{ + struct hns3_adapter *hns = (struct hns3_adapter *)param; + struct hns3_hw *hw = &hns->hw; + enum hns3_reset_level reset_level; + struct timeval tv_delta; + struct timeval tv_start; + struct timeval tv; + uint64_t msec; + int ret; + + /* + * The interrupt is not triggered within the delay time. + * The interrupt may have been lost. It is necessary to handle + * the interrupt to recover from the error. + */ + if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) { + rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + hns3_err(hw, "Handling interrupts in delayed tasks"); + hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); + reset_level = hns3_get_reset_level(hns, &hw->reset.pending); + if (reset_level == HNS3_NONE_RESET) { + hns3_err(hw, "No reset level is set, try IMP reset"); + hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); + } + } + rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); + + /* + * Check if there is any ongoing reset in the hardware. This status can + * be checked from reset_pending. If there is then, we need to wait for + * hardware to complete reset. + * a. If we are able to figure out in reasonable time that hardware + * has fully resetted then, we can proceed with driver, client + * reset. + * b. else, we can come back later to check this status so re-sched + * now. + */ + reset_level = hns3_get_reset_level(hns, &hw->reset.pending); + if (reset_level != HNS3_NONE_RESET) { + gettimeofday(&tv_start, NULL); + ret = hns3_reset_process(hns, reset_level); + gettimeofday(&tv, NULL); + timersub(&tv, &tv_start, &tv_delta); + msec = tv_delta.tv_sec * MSEC_PER_SEC + + tv_delta.tv_usec / USEC_PER_MSEC; + if (msec > HNS3_RESET_PROCESS_MS) + hns3_err(hw, "%d handle long time delta %" PRIx64 + " ms time=%ld.%.6ld", + hw->reset.level, msec, + tv.tv_sec, tv.tv_usec); + if (ret == -EAGAIN) + return; + } + + /* Check if we got any *new* reset requests to be honored */ + reset_level = hns3_get_reset_level(hns, &hw->reset.request); + if (reset_level != HNS3_NONE_RESET) + hns3_msix_process(hns, reset_level); +} + static const struct eth_dev_ops hns3_eth_dev_ops = { .dev_start = hns3_dev_start, .dev_stop = hns3_dev_stop, @@ -4026,12 +5349,21 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .allmulticast_enable = hns3_dev_allmulticast_enable, .allmulticast_disable = hns3_dev_allmulticast_disable, .mtu_set = hns3_dev_mtu_set, + .stats_get = hns3_stats_get, + .stats_reset = hns3_stats_reset, + .xstats_get = hns3_dev_xstats_get, + .xstats_get_names = hns3_dev_xstats_get_names, + .xstats_reset = hns3_dev_xstats_reset, + .xstats_get_by_id = hns3_dev_xstats_get_by_id, + .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, .dev_infos_get = hns3_dev_infos_get, .fw_version_get = hns3_fw_version_get, .rx_queue_setup = hns3_rx_queue_setup, .tx_queue_setup = hns3_tx_queue_setup, .rx_queue_release = hns3_dev_rx_queue_release, .tx_queue_release = hns3_dev_tx_queue_release, + .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, .dev_configure = hns3_dev_configure, .flow_ctrl_get = hns3_flow_ctrl_get, .flow_ctrl_set = hns3_flow_ctrl_set, @@ -4050,21 +5382,30 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .vlan_tpid_set = hns3_vlan_tpid_set, .vlan_offload_set = hns3_vlan_offload_set, .vlan_pvid_set = hns3_vlan_pvid_set, + .get_reg = hns3_get_regs, .get_dcb_info = hns3_get_dcb_info, .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, }; +static const struct hns3_reset_ops hns3_reset_ops = { + .reset_service = hns3_reset_service, + .stop_service = hns3_stop_service, + .prepare_reset = hns3_prepare_reset, + .wait_hardware_ready = hns3_wait_hardware_ready, + .reinit_dev = hns3_reinit_dev, + .restore_conf = hns3_restore_conf, + .start_service = hns3_start_service, +}; + static int hns3_dev_init(struct rte_eth_dev *eth_dev) { - struct rte_device *dev = eth_dev->device; - struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - uint16_t device_id = pci_dev->id.device_id; int ret; PMD_INIT_FUNC_TRACE(); + eth_dev->process_private = (struct hns3_process_private *) rte_zmalloc_socket("hns3_filter_list", sizeof(struct hns3_process_private), @@ -4078,16 +5419,14 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) hns3_set_rxtx_function(eth_dev); eth_dev->dev_ops = &hns3_eth_dev_ops; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + hns3_mp_init_secondary(); + hw->secondary_cnt++; return 0; + } + hns3_mp_init_primary(); hw->adapter_state = HNS3_NIC_UNINITIALIZED; - - if (device_id == HNS3_DEV_ID_25GE_RDMA || - device_id == HNS3_DEV_ID_50GE_RDMA || - device_id == HNS3_DEV_ID_100G_RDMA_MACSEC) - hns3_set_bit(hw->flag, HNS3_DEV_SUPPORT_DCB_B, 1); - hns->is_vf = false; hw->data = eth_dev->data; @@ -4097,6 +5436,11 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) */ hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; + ret = hns3_reset_init(hw); + if (ret) + goto err_init_reset; + hw->reset.ops = &hns3_reset_ops; + ret = hns3_init_pf(eth_dev); if (ret) { PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); @@ -4126,7 +5470,14 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) */ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; - rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); + if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { + hns3_err(hw, "Reschedule reset service after dev_init"); + hns3_schedule_reset(hns); + } else { + /* IMP will wait ready flag before reset */ + hns3_notify_reset_ready(hw, false); + } + hns3_info(hw, "hns3 dev initialization successful!"); return 0; @@ -4134,6 +5485,8 @@ err_rte_zmalloc: hns3_uninit_pf(eth_dev); err_init_pf: + rte_free(hw->reset.wait_data); +err_init_reset: eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; @@ -4199,13 +5552,5 @@ static struct rte_pci_driver rte_hns3_pmd = { RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); - -RTE_INIT(hns3_init_log) -{ - hns3_logtype_init = rte_log_register("pmd.net.hns3.init"); - if (hns3_logtype_init >= 0) - rte_log_set_level(hns3_logtype_init, RTE_LOG_NOTICE); - hns3_logtype_driver = rte_log_register("pmd.net.hns3.driver"); - if (hns3_logtype_driver >= 0) - rte_log_set_level(hns3_logtype_driver, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE); +RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);