X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev.c;h=73d504253d2b5a42a3631a5323f18077df83200d;hb=a3d4f4d291d79e2801397055067d903ef5e4d4aa;hp=244a3b8272f5b5484b67d86b753917f08e379acf;hpb=4c623ca40013c58d73aec67717583e3ee1cb45e3;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 244a3b8272..73d504253d 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -35,8 +35,6 @@ #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ -#define HNS3_PORT_BASE_VLAN_DISABLE 0 -#define HNS3_PORT_BASE_VLAN_ENABLE 1 #define HNS3_INVLID_PVID 0xFFFF #define HNS3_FILTER_TYPE_VF 0 @@ -58,13 +56,13 @@ #define HNS3_FUN_RST_ING_B 0 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 +#define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U +#define HNS3_VECTOR0_IMP_RD_POISON_B 5U +#define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U #define HNS3_RESET_WAIT_MS 100 #define HNS3_RESET_WAIT_CNT 200 -int hns3_logtype_init; -int hns3_logtype_driver; - enum hns3_evt_cause { HNS3_VECTOR0_EVENT_RST, HNS3_VECTOR0_EVENT_MBX, @@ -102,12 +100,14 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) struct hns3_hw *hw = &hns->hw; uint32_t vector0_int_stats; uint32_t cmdq_src_val; + uint32_t hw_err_src_reg; uint32_t val; enum hns3_evt_cause ret; /* fetch the events from their corresponding regs */ vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); + hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); /* * Assumption: If by any chance reset and mailbox events are reported @@ -150,8 +150,9 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) } /* check for vector0 msix event source */ - if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK) { - val = vector0_int_stats; + if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || + hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { + val = vector0_int_stats | hw_err_src_reg; ret = HNS3_VECTOR0_EVENT_ERR; goto out; } @@ -164,9 +165,9 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) goto out; } - if (clearval && (vector0_int_stats || cmdq_src_val)) - hns3_warn(hw, "surprise irq ector0_int_stats:0x%x cmdq_src_val:0x%x", - vector0_int_stats, cmdq_src_val); + if (clearval && (vector0_int_stats || cmdq_src_val || hw_err_src_reg)) + hns3_warn(hw, "vector0_int_stats:0x%x cmdq_src_val:0x%x hw_err_src_reg:0x%x", + vector0_int_stats, cmdq_src_val, hw_err_src_reg); val = vector0_int_stats; ret = HNS3_VECTOR0_EVENT_OTHER; out: @@ -220,11 +221,14 @@ hns3_interrupt_handler(void *param) /* vector 0 interrupt is shared with reset and mailbox source events. */ if (event_cause == HNS3_VECTOR0_EVENT_ERR) { + hns3_warn(hw, "Received err interrupt"); hns3_handle_msix_error(hns, &hw->reset.request); + hns3_handle_ras_error(hns, &hw->reset.request); hns3_schedule_reset(hns); - } else if (event_cause == HNS3_VECTOR0_EVENT_RST) + } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { + hns3_warn(hw, "Received reset interrupt"); hns3_schedule_reset(hns); - else if (event_cause == HNS3_VECTOR0_EVENT_MBX) + } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) hns3_dev_handle_mbx_msg(hw); else hns3_err(hw, "Received unknown event"); @@ -313,13 +317,14 @@ static int hns3_restore_vlan_table(struct hns3_adapter *hns) { struct hns3_user_vlan_table *vlan_entry; + struct hns3_hw *hw = &hns->hw; struct hns3_pf *pf = &hns->pf; uint16_t vlan_id; int ret = 0; - if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) + if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) return hns3_vlan_pvid_configure(hns, - pf->port_base_vlan_cfg.pvid, 1); + hw->port_base_vlan_cfg.pvid, 1); LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { if (vlan_entry->hd_tbl_status) { @@ -336,7 +341,7 @@ hns3_restore_vlan_table(struct hns3_adapter *hns) static int hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) { - struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; bool writen_to_tbl = false; int ret = 0; @@ -354,7 +359,7 @@ hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) * vlan list. The vlan id in vlan list will be writen in vlan filter * table until port base vlan disabled */ - if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { + if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { ret = hns3_set_port_vlan_filter(hns, vlan_id, on); writen_to_tbl = true; } @@ -507,11 +512,10 @@ static int hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) { struct hns3_rx_vtag_cfg rxvlan_cfg; - struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; int ret; - if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { + if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { rxvlan_cfg.strip_tag1_en = false; rxvlan_cfg.strip_tag2_en = enable; } else { @@ -728,12 +732,12 @@ hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, static void hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on) { - struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; - pf->port_base_vlan_cfg.state = on ? + hw->port_base_vlan_cfg.state = on ? HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; - pf->port_base_vlan_cfg.pvid = pvid; + hw->port_base_vlan_cfg.pvid = pvid; } static void @@ -777,13 +781,12 @@ static void hns3_remove_all_vlan_table(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - struct hns3_pf *pf = &hns->pf; int ret; hns3_rm_all_vlan_table(hns, true); - if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) { + if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) { ret = hns3_set_port_vlan_filter(hns, - pf->port_base_vlan_cfg.pvid, 0); + hw->port_base_vlan_cfg.pvid, 0); if (ret) { hns3_err(hw, "Failed to remove all vlan table, ret =%d", ret); @@ -797,7 +800,6 @@ hns3_update_vlan_filter_entries(struct hns3_adapter *hns, uint16_t port_base_vlan_state, uint16_t new_pvid, uint16_t old_pvid) { - struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; int ret = 0; @@ -825,7 +827,7 @@ hns3_update_vlan_filter_entries(struct hns3_adapter *hns, } } - if (new_pvid == pf->port_base_vlan_cfg.pvid) + if (new_pvid == hw->port_base_vlan_cfg.pvid) hns3_add_all_vlan_table(hns); return ret; @@ -862,17 +864,16 @@ hns3_en_pvid_strip(struct hns3_adapter *hns, int on) static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) { - struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; uint16_t port_base_vlan_state; uint16_t old_pvid; int ret; - if (on == 0 && pvid != pf->port_base_vlan_cfg.pvid) { - if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) + if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { + if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) hns3_warn(hw, "Invalid operation! As current pvid set " "is %u, disable pvid %u is invalid", - pf->port_base_vlan_cfg.pvid, pvid); + hw->port_base_vlan_cfg.pvid, pvid); return 0; } @@ -894,7 +895,7 @@ hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) if (pvid == HNS3_INVLID_PVID) goto out; - old_pvid = pf->port_base_vlan_cfg.pvid; + old_pvid = hw->port_base_vlan_cfg.pvid; ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid, old_pvid); if (ret) { @@ -913,6 +914,8 @@ hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + bool pvid_en_state_change; + uint16_t pvid_state; int ret; if (pvid > RTE_ETHER_MAX_VLAN_ID) { @@ -921,20 +924,34 @@ hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) return -EINVAL; } + /* + * If PVID configuration state change, should refresh the PVID + * configuration state in struct hns3_tx_queue/hns3_rx_queue. + */ + pvid_state = hw->port_base_vlan_cfg.state; + if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || + (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) + pvid_en_state_change = false; + else + pvid_en_state_change = true; + rte_spinlock_lock(&hw->lock); ret = hns3_vlan_pvid_configure(hns, pvid, on); rte_spinlock_unlock(&hw->lock); - return ret; + if (ret) + return ret; + + if (pvid_en_state_change) + hns3_update_all_queues_pvid_state(hw); + + return 0; } static void init_port_base_vlan_info(struct hns3_hw *hw) { - struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); - struct hns3_pf *pf = &hns->pf; - - pf->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; - pf->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID; + hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; + hw->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID; } static int @@ -1107,25 +1124,6 @@ hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, return hns3_cmd_send(hw, &desc, 1); } -int -hns3_config_gro(struct hns3_hw *hw, bool en) -{ - struct hns3_cfg_gro_status_cmd *req; - struct hns3_cmd_desc desc; - int ret; - - hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false); - req = (struct hns3_cfg_gro_status_cmd *)desc.data; - - req->gro_en = rte_cpu_to_le_16(en ? 1 : 0); - - ret = hns3_cmd_send(hw, &desc, 1); - if (ret) - hns3_err(hw, "GRO hardware config cmd failed, ret = %d", ret); - - return ret; -} - static int hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, uint16_t *allocated_size, bool is_alloc) @@ -1412,7 +1410,7 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct hns3_mac_vlan_tbl_entry_cmd req; struct hns3_pf *pf = &hns->pf; - struct hns3_cmd_desc desc; + struct hns3_cmd_desc desc[3]; char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; uint16_t egress_port = 0; uint8_t vf_id; @@ -1446,7 +1444,7 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) * it if the entry is inexistent. Repeated unicast entry * is not allowed in the mac vlan table. */ - ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, false); + ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false); if (ret == -ENOENT) { if (!hns3_is_umv_space_full(hw)) { ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); @@ -2219,7 +2217,7 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, static int hns3_init_ring_with_vector(struct hns3_hw *hw) { - uint8_t vec; + uint16_t vec; int ret; int i; @@ -2230,27 +2228,23 @@ hns3_init_ring_with_vector(struct hns3_hw *hw) * vector. In the initialization clearing the all hardware mapping * relationship configurations between queues and interrupt vectors is * needed, so some error caused by the residual configurations, such as - * the unexpected Tx interrupt, can be avoid. Because of the hardware - * constraints in hns3 hardware engine, we have to implement clearing - * the mapping relationship configurations by binding all queues to the - * last interrupt vector and reserving the last interrupt vector. This - * method results in a decrease of the maximum queues when upper - * applications call the rte_eth_dev_configure API function to enable - * Rx interrupt. + * the unexpected Tx interrupt, can be avoid. */ vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ - /* vec - 1: the last interrupt is reserved */ - hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1; + if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) + vec = vec - 1; /* the last interrupt is reserved */ + hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); for (i = 0; i < hw->intr_tqps_num; i++) { /* - * Set gap limiter and rate limiter configuration of queue's - * interrupt. + * Set gap limiter/rate limiter/quanity limiter algorithm + * configuration for interrupt coalesce of queue's interrupt. */ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, HNS3_TQP_INTR_GL_DEFAULT); hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, HNS3_TQP_INTR_GL_DEFAULT); hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); + hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); ret = hns3_bind_ring_with_vector(hw, vec, false, HNS3_RING_TYPE_TX, i); @@ -2284,6 +2278,7 @@ hns3_dev_configure(struct rte_eth_dev *dev) uint16_t nb_tx_q = dev->data->nb_tx_queues; struct rte_eth_rss_conf rss_conf; uint16_t mtu; + bool gro_en; int ret; /* @@ -2350,6 +2345,18 @@ hns3_dev_configure(struct rte_eth_dev *dev) if (ret) goto cfg_err; + /* config hardware GRO */ + gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; + ret = hns3_config_gro(hw, gro_en); + if (ret) + goto cfg_err; + + hns->rx_simple_allowed = true; + hns->rx_vec_allowed = true; + hns->tx_simple_allowed = true; + hns->tx_vec_allowed = true; + + hns3_init_rx_ptype_tble(dev); hw->adapter_state = HNS3_NIC_CONFIGURED; return 0; @@ -2454,9 +2461,10 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) info->max_rx_queues = queue_num; info->max_tx_queues = hw->tqps_num; info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ - info->min_rx_bufsize = hw->rx_buf_len; + info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; info->max_mac_addrs = HNS3_UC_MACADDR_NUM; info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; + info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -2468,21 +2476,20 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_VLAN_FILTER | DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_RSS_HASH); - info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; + DEV_RX_OFFLOAD_RSS_HASH | + DEV_RX_OFFLOAD_TCP_LRO); info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_QINQ_INSERT | DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO | - info->tx_queue_offload_capa); + DEV_TX_OFFLOAD_MBUF_FAST_FREE | + hns3_txvlan_cap_get(hw)); info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = HNS3_MAX_RING_DESC, @@ -2494,6 +2501,23 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) .nb_max = HNS3_MAX_RING_DESC, .nb_min = HNS3_MIN_RING_DESC, .nb_align = HNS3_ALIGN_RING_DESC, + .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, + .nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT, + }; + + info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, + /* + * If there are no available Rx buffer descriptors, incoming + * packets are always dropped by hardware based on hns3 network + * engine. + */ + .rx_drop_en = 1, + .offloads = 0, + }; + info->default_txconf = (struct rte_eth_txconf) { + .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, + .offloads = 0, }; info->vmdq_queue_num = 0; @@ -2561,6 +2585,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, case ETH_SPEED_NUM_40G: case ETH_SPEED_NUM_50G: case ETH_SPEED_NUM_100G: + case ETH_SPEED_NUM_200G: new_link.link_speed = mac->link_speed; break; default: @@ -2660,8 +2685,8 @@ hns3_query_pf_resource(struct hns3_hw *hw) pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); hw->num_msi = - hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number), - HNS3_VEC_NUM_M, HNS3_VEC_NUM_S); + hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), + HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); return 0; } @@ -2790,6 +2815,9 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed) case HNS3_CFG_SPEED_100G: *speed = ETH_SPEED_NUM_100G; break; + case HNS3_CFG_SPEED_200G: + *speed = ETH_SPEED_NUM_200G; + break; default: return -EINVAL; } @@ -2797,6 +2825,105 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed) return 0; } +static void +hns3_set_default_dev_specifications(struct hns3_hw *hw) +{ + hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; + hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; + hw->rss_key_size = HNS3_RSS_KEY_SIZE; + hw->max_tm_rate = HNS3_ETHER_MAX_RATE; +} + +static void +hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) +{ + struct hns3_dev_specs_0_cmd *req0; + + req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; + + hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; + hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); + hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); + hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); +} + +static int +hns3_query_dev_specifications(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; + int ret; + int i; + + for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, + true); + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + } + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); + + ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); + if (ret) + return ret; + + hns3_parse_dev_specifications(hw, desc); + + return 0; +} + +static int +hns3_get_capability(struct hns3_hw *hw) +{ + struct rte_pci_device *pci_dev; + struct rte_eth_dev *eth_dev; + uint16_t device_id; + uint8_t revision; + int ret; + + eth_dev = &rte_eth_devices[hw->data->port_id]; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + device_id = pci_dev->id.device_id; + + if (device_id == HNS3_DEV_ID_25GE_RDMA || + device_id == HNS3_DEV_ID_50GE_RDMA || + device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || + device_id == HNS3_DEV_ID_200G_RDMA) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); + + /* Get PCI revision id */ + ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, + HNS3_PCI_REVISION_ID); + if (ret != HNS3_PCI_REVISION_ID_LEN) { + PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", + ret); + return -EIO; + } + hw->revision = revision; + + if (revision < PCI_REVISION_ID_HIP09_A) { + hns3_set_default_dev_specifications(hw); + hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; + hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL; + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; + hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; + return 0; + } + + ret = hns3_query_dev_specifications(hw); + if (ret) { + PMD_INIT_LOG(ERR, + "failed to query dev specifications, ret = %d", + ret); + return ret; + } + + hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; + hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL; + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; + hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; + + return 0; +} + static int hns3_get_board_configuration(struct hns3_hw *hw) { @@ -2811,7 +2938,8 @@ hns3_get_board_configuration(struct hns3_hw *hw) return ret; } - if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER) { + if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER && + !hns3_dev_copper_supported(hw)) { PMD_INIT_LOG(ERR, "media type is copper, not supported."); return -EOPNOTSUPP; } @@ -2819,7 +2947,6 @@ hns3_get_board_configuration(struct hns3_hw *hw) hw->mac.media_type = cfg.media_type; hw->rss_size_max = cfg.rss_size_max; hw->rss_dis_flag = false; - hw->rx_buf_len = cfg.rx_buf_len; memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); hw->mac.phy_addr = cfg.phy_addr; hw->mac.default_addr_setted = false; @@ -2872,6 +2999,13 @@ hns3_get_configuration(struct hns3_hw *hw) return ret; } + /* Get device capability */ + ret = hns3_get_capability(hw); + if (ret) { + PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); + return ret; + } + /* Get pf resource */ ret = hns3_query_pf_resource(hw); if (ret) { @@ -2988,6 +3122,10 @@ hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); break; + case ETH_SPEED_NUM_200G: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); + break; default: PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); return -EINVAL; @@ -3158,7 +3296,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, + pf->dv_buf_size; shared_buf_tc = tc_num * aligned_mps + aligned_mps; - shared_std = roundup(max_t(uint32_t, shared_buf_min, shared_buf_tc), + shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), HNS3_BUF_SIZE_UNIT); rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); @@ -3188,8 +3326,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, if (tc_num) hi_thrd = hi_thrd / tc_num; - hi_thrd = max_t(uint32_t, hi_thrd, - HNS3_BUF_MUL_BY * aligned_mps); + hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; } else { @@ -4136,7 +4273,15 @@ hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); - hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); + + /* + * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC + * when receiving frames. Otherwise, CRC will be stripped. + */ + if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); + else + hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); @@ -4284,6 +4429,39 @@ err_mac_init: return ret; } +static int +hns3_clear_hw(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret && ret != -EOPNOTSUPP) + return ret; + + return 0; +} + +static void +hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) +{ + uint32_t val; + + /* + * The new firmware support report more hardware error types by + * msix mode. These errors are defined as RAS errors in hardware + * and belong to a different type from the MSI-x errors processed + * by the network driver. + * + * Network driver should open the new error report on initialition + */ + val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); + hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); + hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); +} + static int hns3_init_pf(struct rte_eth_dev *eth_dev) { @@ -4314,6 +4492,20 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_cmd_init; } + /* + * To ensure that the hardware environment is clean during + * initialization, the driver actively clear the hardware environment + * during initialization, including PF and corresponding VFs' vlan, mac, + * flow table configurations, etc. + */ + ret = hns3_clear_hw(hw); + if (ret) { + PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); + goto err_cmd_init; + } + + hns3_config_all_msix_error(hw, true); + ret = rte_intr_callback_register(&pci_dev->intr_handle, hns3_interrupt_handler, eth_dev); @@ -4389,6 +4581,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) hns3_enable_hw_error_intr(hns, false); hns3_rss_uninit(hns); + (void)hns3_config_gro(hw, false); hns3_promisc_uninit(hw); hns3_fdir_filter_uninit(hns); hns3_uninit_umv_space(hw); @@ -4396,6 +4589,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) rte_intr_disable(&pci_dev->intr_handle); hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, eth_dev); + hns3_config_all_msix_error(hw, false); hns3_cmd_uninit(hw); hns3_cmd_destroy_queue(hw); hw->io_base = NULL; @@ -4561,6 +4755,7 @@ hns3_dev_start(struct rte_eth_dev *dev) hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); + hns3_rx_scattered_calc(dev); hns3_set_rxtx_function(dev); hns3_mp_req_start_rxtx(dev); rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); @@ -4659,6 +4854,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) hns3_dev_release_mbufs(hns); hw->adapter_state = HNS3_NIC_CONFIGURED; } + hns3_rx_scattered_reset(dev); rte_eal_alarm_cancel(hns3_service_handler, dev); rte_spinlock_unlock(&hw->lock); } @@ -5080,6 +5276,28 @@ hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) return reset_level; } +static void +hns3_record_imp_error(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + uint32_t reg_val; + + reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); + if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { + hns3_warn(hw, "Detected IMP RD poison!"); + hns3_error_int_stats_add(hns, "IMP_RD_POISON_INT_STS"); + hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); + hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); + } + + if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { + hns3_warn(hw, "Detected IMP CMDQ error!"); + hns3_error_int_stats_add(hns, "CMDQ_MEM_ECC_INT_STS"); + hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); + hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); + } +} + static int hns3_prepare_reset(struct hns3_adapter *hns) { @@ -5103,6 +5321,7 @@ hns3_prepare_reset(struct hns3_adapter *hns) hw->reset.stats.request_cnt++; break; case HNS3_IMP_RESET: + hns3_record_imp_error(hns); reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); @@ -5223,6 +5442,10 @@ hns3_restore_conf(struct hns3_adapter *hns) if (ret) goto err_promisc; + ret = hns3_restore_gro_conf(hw); + if (ret) + goto err_promisc; + if (hns->hw.adapter_state == HNS3_NIC_STARTED) { ret = hns3_do_start(hns, false); if (ret) @@ -5302,6 +5525,7 @@ hns3_reset_service(void *param) } static const struct eth_dev_ops hns3_eth_dev_ops = { + .dev_configure = hns3_dev_configure, .dev_start = hns3_dev_start, .dev_stop = hns3_dev_stop, .dev_close = hns3_dev_close, @@ -5325,7 +5549,10 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .tx_queue_release = hns3_dev_tx_queue_release, .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, - .dev_configure = hns3_dev_configure, + .rxq_info_get = hns3_rxq_info_get, + .txq_info_get = hns3_txq_info_get, + .rx_burst_mode_get = hns3_rx_burst_mode_get, + .tx_burst_mode_get = hns3_tx_burst_mode_get, .flow_ctrl_get = hns3_flow_ctrl_get, .flow_ctrl_set = hns3_flow_ctrl_set, .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, @@ -5361,26 +5588,14 @@ static const struct hns3_reset_ops hns3_reset_ops = { static int hns3_dev_init(struct rte_eth_dev *eth_dev) { - struct rte_device *dev = eth_dev->device; - struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); struct hns3_adapter *hns = eth_dev->data->dev_private; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *eth_addr; struct hns3_hw *hw = &hns->hw; - uint16_t device_id = pci_dev->id.device_id; - uint8_t revision; int ret; PMD_INIT_FUNC_TRACE(); - /* Get PCI revision id */ - ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, - HNS3_PCI_REVISION_ID); - if (ret != HNS3_PCI_REVISION_ID_LEN) { - PMD_INIT_LOG(ERR, "Failed to read pci revision id, ret = %d", - ret); - return -EIO; - } - hw->revision = revision; - eth_dev->process_private = (struct hns3_process_private *) rte_zmalloc_socket("hns3_filter_list", sizeof(struct hns3_process_private), @@ -5395,19 +5610,26 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) hns3_set_rxtx_function(eth_dev); eth_dev->dev_ops = &hns3_eth_dev_ops; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - hns3_mp_init_secondary(); + ret = hns3_mp_init_secondary(); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init for secondary " + "process, ret = %d", ret); + goto err_mp_init_secondary; + } + hw->secondary_cnt++; return 0; } - hns3_mp_init_primary(); - hw->adapter_state = HNS3_NIC_UNINITIALIZED; - - if (device_id == HNS3_DEV_ID_25GE_RDMA || - device_id == HNS3_DEV_ID_50GE_RDMA || - device_id == HNS3_DEV_ID_100G_RDMA_MACSEC) - hns3_set_bit(hw->flag, HNS3_DEV_SUPPORT_DCB_B, 1); + ret = hns3_mp_init_primary(); + if (ret) { + PMD_INIT_LOG(ERR, + "Failed to init for primary process, ret = %d", + ret); + goto err_mp_init_primary; + } + hw->adapter_state = HNS3_NIC_UNINITIALIZED; hns->is_vf = false; hw->data = eth_dev->data; @@ -5441,6 +5663,15 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) goto err_rte_zmalloc; } + eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; + if (!rte_is_valid_assigned_ether_addr(eth_addr)) { + rte_eth_random_addr(hw->mac.mac_addr); + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + (struct rte_ether_addr *)hw->mac.mac_addr); + hns3_warn(hw, "default mac_addr from firmware is an invalid " + "unicast address, using random MAC address %s", + mac_str); + } rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, ð_dev->data->mac_addrs[0]); @@ -5467,7 +5698,12 @@ err_rte_zmalloc: err_init_pf: rte_free(hw->reset.wait_data); + err_init_reset: + hns3_mp_uninit_primary(); + +err_mp_init_primary: +err_mp_init_secondary: eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; @@ -5520,6 +5756,7 @@ static const struct rte_pci_id pci_id_hns3_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -5533,13 +5770,5 @@ static struct rte_pci_driver rte_hns3_pmd = { RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); - -RTE_INIT(hns3_init_log) -{ - hns3_logtype_init = rte_log_register("pmd.net.hns3.init"); - if (hns3_logtype_init >= 0) - rte_log_set_level(hns3_logtype_init, RTE_LOG_NOTICE); - hns3_logtype_driver = rte_log_register("pmd.net.hns3.driver"); - if (hns3_logtype_driver >= 0) - rte_log_set_level(hns3_logtype_driver, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE); +RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);