X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev.c;h=c850e5eb14812d9dad666dfe93aedcbe260391b3;hb=0607dadf98c71bdc96ea7b54eda26f1b667be1f6;hp=3e98df33c74edb36bc1a7940c4e0137246da1137;hpb=992b24a1ce2113116f4dbac4a038ff1e2df9859e;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 3e98df33c7..c850e5eb14 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -35,7 +35,7 @@ #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ -#define HNS3_INVLID_PVID 0xFFFF +#define HNS3_INVALID_PVID 0xFFFF #define HNS3_FILTER_TYPE_VF 0 #define HNS3_FILTER_TYPE_PORT 1 @@ -63,6 +63,11 @@ #define HNS3_RESET_WAIT_MS 100 #define HNS3_RESET_WAIT_CNT 200 +/* FEC mode order defined in HNS3 hardware */ +#define HNS3_HW_FEC_MODE_NOFEC 0 +#define HNS3_HW_FEC_MODE_BASER 1 +#define HNS3_HW_FEC_MODE_RS 2 + enum hns3_evt_cause { HNS3_VECTOR0_EVENT_RST, HNS3_VECTOR0_EVENT_MBX, @@ -70,6 +75,34 @@ enum hns3_evt_cause { HNS3_VECTOR0_EVENT_OTHER, }; +static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { + { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, + + { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, + + { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, + + { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, + + { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, + + { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS) } +}; + static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels); static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); @@ -81,6 +114,8 @@ static int hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr); static int hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr); +static int hns3_restore_fec(struct hns3_hw *hw); +static int hns3_query_dev_fec_info(struct rte_eth_dev *dev); static void hns3_pf_disable_irq0(struct hns3_hw *hw) @@ -346,8 +381,9 @@ hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) int ret = 0; /* - * When vlan filter is enabled, hardware regards vlan id 0 as the entry - * for normal packet, deleting vlan id 0 is not allowed. + * When vlan filter is enabled, hardware regards packets without vlan + * as packets with vlan 0. So, to receive packets without vlan, vlan id + * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. */ if (on == 0 && vlan_id == 0) return 0; @@ -364,7 +400,7 @@ hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) writen_to_tbl = true; } - if (ret == 0 && vlan_id) { + if (ret == 0) { if (on) hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); else @@ -743,16 +779,6 @@ hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, return ret; } -static void -hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on) -{ - struct hns3_hw *hw = &hns->hw; - - hw->port_base_vlan_cfg.state = on ? - HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; - - hw->port_base_vlan_cfg.pvid = pvid; -} static void hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) @@ -761,10 +787,10 @@ hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) struct hns3_pf *pf = &hns->pf; LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { - if (vlan_entry->hd_tbl_status) + if (vlan_entry->hd_tbl_status) { hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); - - vlan_entry->hd_tbl_status = false; + vlan_entry->hd_tbl_status = false; + } } if (is_del_list) { @@ -784,10 +810,10 @@ hns3_add_all_vlan_table(struct hns3_adapter *hns) struct hns3_pf *pf = &hns->pf; LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { - if (!vlan_entry->hd_tbl_status) + if (!vlan_entry->hd_tbl_status) { hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); - - vlan_entry->hd_tbl_status = true; + vlan_entry->hd_tbl_status = true; + } } } @@ -798,7 +824,7 @@ hns3_remove_all_vlan_table(struct hns3_adapter *hns) int ret; hns3_rm_all_vlan_table(hns, true); - if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) { + if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { ret = hns3_set_port_vlan_filter(hns, hw->port_base_vlan_cfg.pvid, 0); if (ret) { @@ -811,40 +837,41 @@ hns3_remove_all_vlan_table(struct hns3_adapter *hns) static int hns3_update_vlan_filter_entries(struct hns3_adapter *hns, - uint16_t port_base_vlan_state, - uint16_t new_pvid, uint16_t old_pvid) + uint16_t port_base_vlan_state, uint16_t new_pvid) { struct hns3_hw *hw = &hns->hw; - int ret = 0; + uint16_t old_pvid; + int ret; if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { - if (old_pvid != HNS3_INVLID_PVID && old_pvid != 0) { + old_pvid = hw->port_base_vlan_cfg.pvid; + if (old_pvid != HNS3_INVALID_PVID) { ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); if (ret) { - hns3_err(hw, - "Failed to clear clear old pvid filter, ret =%d", - ret); + hns3_err(hw, "failed to remove old pvid %u, " + "ret = %d", old_pvid, ret); return ret; } } hns3_rm_all_vlan_table(hns, false); - return hns3_set_port_vlan_filter(hns, new_pvid, 1); - } - - if (new_pvid != 0) { + ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); + if (ret) { + hns3_err(hw, "failed to add new pvid %u, ret = %d", + new_pvid, ret); + return ret; + } + } else { ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); if (ret) { - hns3_err(hw, "Failed to set port vlan filter, ret =%d", - ret); + hns3_err(hw, "failed to remove pvid %u, ret = %d", + new_pvid, ret); return ret; } - } - if (new_pvid == hw->port_base_vlan_cfg.pvid) hns3_add_all_vlan_table(hns); - - return ret; + } + return 0; } static int @@ -883,11 +910,10 @@ hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) { struct hns3_hw *hw = &hns->hw; uint16_t port_base_vlan_state; - uint16_t old_pvid; int ret; if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { - if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) + if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) hns3_warn(hw, "Invalid operation! As current pvid set " "is %u, disable pvid %u is invalid", hw->port_base_vlan_cfg.pvid, pvid); @@ -910,19 +936,18 @@ hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) return ret; } - if (pvid == HNS3_INVLID_PVID) + if (pvid == HNS3_INVALID_PVID) goto out; - old_pvid = hw->port_base_vlan_cfg.pvid; - ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid, - old_pvid); + ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); if (ret) { - hns3_err(hw, "Failed to update vlan filter entries, ret =%d", + hns3_err(hw, "failed to update vlan filter entries, ret = %d", ret); return ret; } out: - hns3_store_port_base_vlan_info(hns, pvid, on); + hw->port_base_vlan_cfg.state = port_base_vlan_state; + hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; return ret; } @@ -968,20 +993,19 @@ hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) return 0; } -static void -init_port_base_vlan_info(struct hns3_hw *hw) -{ - hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; - hw->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID; -} - static int hns3_default_vlan_config(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; int ret; - ret = hns3_set_port_vlan_filter(hns, 0, 1); + /* + * When vlan filter is enabled, hardware regards packets without vlan + * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan + * table, packets without vlan won't be received. So, add vlan 0 as + * the default vlan. + */ + ret = hns3_vlan_filter_configure(hns, 0, 1); if (ret) hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); return ret; @@ -1000,8 +1024,10 @@ hns3_init_vlan_config(struct hns3_adapter *hns) * ensure that the hardware configuration remains unchanged before and * after reset. */ - if (rte_atomic16_read(&hw->reset.resetting) == 0) - init_port_base_vlan_info(hw); + if (rte_atomic16_read(&hw->reset.resetting) == 0) { + hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; + hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; + } ret = hns3_vlan_filter_init(hns); if (ret) { @@ -1023,7 +1049,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns) * and hns3_restore_vlan_conf later. */ if (rte_atomic16_read(&hw->reset.resetting) == 0) { - ret = hns3_vlan_pvid_configure(hns, HNS3_INVLID_PVID, 0); + ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); if (ret) { hns3_err(hw, "pvid set fail in pf, ret =%d", ret); return ret; @@ -2302,20 +2328,25 @@ hns3_dev_configure(struct rte_eth_dev *dev) bool gro_en; int ret; + hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); + /* - * Hardware does not support individually enable/disable/reset the Tx or - * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx - * and Rx queues at the same time. When the numbers of Tx queues - * allocated by upper applications are not equal to the numbers of Rx - * queues, driver needs to setup fake Tx or Rx queues to adjust numbers - * of Tx/Rx queues. otherwise, network engine can not work as usual. But - * these fake queues are imperceptible, and can not be used by upper - * applications. + * Some versions of hardware network engine does not support + * individually enable/disable/reset the Tx or Rx queue. These devices + * must enable/disable/reset Tx and Rx queues at the same time. When the + * numbers of Tx queues allocated by upper applications are not equal to + * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues + * to adjust numbers of Tx/Rx queues. otherwise, network engine can not + * work as usual. But these fake queues are imperceptible, and can not + * be used by upper applications. */ - ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); - if (ret) { - hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret); - return ret; + if (!hns3_dev_indep_txrx_supported(hw)) { + ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); + if (ret) { + hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", + ret); + return ret; + } } hw->adapter_state = HNS3_NIC_CONFIGURING; @@ -2335,6 +2366,7 @@ hns3_dev_configure(struct rte_eth_dev *dev) if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; rss_conf = conf->rx_adv_conf.rss_conf; + hw->rss_dis_flag = false; if (rss_conf.rss_key == NULL) { rss_conf.rss_key = rss_cfg->key; rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE; @@ -2512,6 +2544,10 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) DEV_TX_OFFLOAD_MBUF_FAST_FREE | hns3_txvlan_cap_get(hw)); + if (hns3_dev_indep_txrx_supported(hw)) + info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = HNS3_MAX_RING_DESC, .nb_min = HNS3_MIN_RING_DESC, @@ -2523,7 +2559,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) .nb_min = HNS3_MIN_RING_DESC, .nb_align = HNS3_ALIGN_RING_DESC, .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, - .nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT, + .nb_mtu_seg_max = hw->max_non_tso_bd_num, }; info->default_rxconf = (struct rte_eth_rxconf) { @@ -2667,6 +2703,49 @@ hns3_query_function_status(struct hns3_hw *hw) return hns3_parse_func_status(hw, req); } +static int +hns3_get_pf_max_tqp_num(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + + if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { + /* + * The total_tqps_num obtained from firmware is maximum tqp + * numbers of this port, which should be used for PF and VFs. + * There is no need for pf to have so many tqp numbers in + * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, + * coming from config file, is assigned to maximum queue number + * for the PF of this port by user. So users can modify the + * maximum queue number of PF according to their own application + * scenarios, which is more flexible to use. In addition, many + * memories can be saved due to allocating queue statistics + * room according to the actual number of queues required. The + * maximum queue number of PF for network engine with + * revision_id greater than 0x30 is assigned by config file. + */ + if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { + hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " + "must be greater than 0.", + RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); + return -EINVAL; + } + + hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, + hw->total_tqps_num); + } else { + /* + * Due to the limitation on the number of PF interrupts + * available, the maximum queue number assigned to PF on + * the network engine with revision_id 0x21 is 64. + */ + hw->tqps_num = RTE_MIN(hw->total_tqps_num, + HNS3_MAX_TQP_NUM_HIP08_PF); + } + + return 0; +} + static int hns3_query_pf_resource(struct hns3_hw *hw) { @@ -2684,9 +2763,13 @@ hns3_query_pf_resource(struct hns3_hw *hw) } req = (struct hns3_pf_res_cmd *)desc.data; - hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num); + hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + + rte_le_to_cpu_16(req->ext_tqp_num); + ret = hns3_get_pf_max_tqp_num(hw); + if (ret) + return ret; + pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; - hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC); pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); if (req->tx_buf_size) @@ -2717,6 +2800,7 @@ hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) { struct hns3_cfg_param_cmd *req; uint64_t mac_addr_tmp_high; + uint8_t ext_rss_size_max; uint64_t mac_addr_tmp; uint32_t i; @@ -2769,6 +2853,21 @@ hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) HNS3_CFG_UMV_TBL_SPACE_S); if (!cfg->umv_space) cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; + + ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), + HNS3_CFG_EXT_RSS_SIZE_M, + HNS3_CFG_EXT_RSS_SIZE_S); + + /* + * Field ext_rss_size_max obtained from firmware will be more flexible + * for future changes and expansions, which is an exponent of 2, instead + * of reading out directly. If this field is not zero, hns3 PF PMD + * driver uses it as rss_size_max under one TC. Device, whose revision + * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the + * maximum number of queues supported under a TC through this field. + */ + if (ext_rss_size_max) + cfg->rss_size_max = 1U << ext_rss_size_max; } /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash @@ -2894,7 +2993,9 @@ hns3_query_dev_specifications(struct hns3_hw *hw) static int hns3_get_capability(struct hns3_hw *hw) { + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct rte_pci_device *pci_dev; + struct hns3_pf *pf = &hns->pf; struct rte_eth_dev *eth_dev; uint16_t device_id; uint8_t revision; @@ -2910,6 +3011,13 @@ hns3_get_capability(struct hns3_hw *hw) device_id == HNS3_DEV_ID_200G_RDMA) hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); + ret = hns3_query_dev_fec_info(eth_dev); + if (ret) { + PMD_INIT_LOG(ERR, + "failed to query FEC information, ret = %d", ret); + return ret; + } + /* Get PCI revision id */ ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, HNS3_PCI_REVISION_ID); @@ -2925,8 +3033,10 @@ hns3_get_capability(struct hns3_hw *hw) hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL; hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; + hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; + pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; return 0; } @@ -2941,8 +3051,10 @@ hns3_get_capability(struct hns3_hw *hw) hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL; hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; + hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; + pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; return 0; } @@ -3038,7 +3150,7 @@ hns3_get_configuration(struct hns3_hw *hw) ret = hns3_get_board_configuration(hw); if (ret) - PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret); + PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); return ret; } @@ -3071,29 +3183,18 @@ hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, static int hns3_map_tqp(struct hns3_hw *hw) { - uint16_t tqps_num = hw->total_tqps_num; - uint16_t func_id; - uint16_t tqp_id; - bool is_pf; - int num; int ret; int i; /* - * In current version VF is not supported when PF is driven by DPDK - * driver, so we allocate tqps to PF as much as possible. + * In current version, VF is not supported when PF is driven by DPDK + * driver, so we assign total tqps_num tqps allocated to this port + * to PF. */ - tqp_id = 0; - num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC); - for (func_id = HNS3_PF_FUNC_ID; func_id < num; func_id++) { - is_pf = func_id == HNS3_PF_FUNC_ID ? true : false; - for (i = 0; - i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) { - ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i, - is_pf); - if (ret) - return ret; - } + for (i = 0; i < hw->total_tqps_num; i++) { + ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); + if (ret) + return ret; } return 0; @@ -4548,17 +4649,21 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_get_config; } + ret = hns3_tqp_stats_init(hw); + if (ret) + goto err_get_config; + ret = hns3_init_hardware(hns); if (ret) { PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); - goto err_get_config; + goto err_init_hw; } /* Initialize flow director filter list & hash */ ret = hns3_fdir_filter_init(hns); if (ret) { PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); - goto err_hw_init; + goto err_fdir; } hns3_set_default_rss_args(hw); @@ -4567,16 +4672,17 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) if (ret) { PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", ret); - goto err_fdir; + goto err_enable_intr; } return 0; -err_fdir: +err_enable_intr: hns3_fdir_filter_uninit(hns); -err_hw_init: +err_fdir: hns3_uninit_umv_space(hw); - +err_init_hw: + hns3_tqp_stats_uninit(hw); err_get_config: hns3_pf_disable_irq0(hw); rte_intr_disable(&pci_dev->intr_handle); @@ -4608,6 +4714,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) hns3_promisc_uninit(hw); hns3_fdir_filter_uninit(hns); hns3_uninit_umv_space(hw); + hns3_tqp_stats_uninit(hw); hns3_pf_disable_irq0(hw); rte_intr_disable(&pci_dev->intr_handle); hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, @@ -4628,23 +4735,22 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) if (ret) return ret; - /* Enable queues */ - ret = hns3_start_queues(hns, reset_queue); + ret = hns3_init_queues(hns, reset_queue); if (ret) { - PMD_INIT_LOG(ERR, "Failed to start queues: %d", ret); + PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); return ret; } - /* Enable MAC */ ret = hns3_cfg_mac_mode(hw, true); if (ret) { - PMD_INIT_LOG(ERR, "Failed to enable MAC: %d", ret); + PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); goto err_config_mac_mode; } return 0; err_config_mac_mode: - hns3_stop_queues(hns, true); + hns3_dev_release_mbufs(hns); + hns3_reset_all_tqps(hns); return ret; } @@ -4775,6 +4881,32 @@ hns3_dev_start(struct rte_eth_dev *dev) return ret; } + /* + * There are three register used to control the status of a TQP + * (contains a pair of Tx queue and Rx queue) in the new version network + * engine. One is used to control the enabling of Tx queue, the other is + * used to control the enabling of Rx queue, and the last is the master + * switch used to control the enabling of the tqp. The Tx register and + * TQP register must be enabled at the same time to enable a Tx queue. + * The same applies to the Rx queue. For the older network engine, this + * function only refresh the enabled flag, and it is used to update the + * status of queue in the dpdk framework. + */ + ret = hns3_start_all_txqs(dev); + if (ret) { + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + return ret; + } + + ret = hns3_start_all_rxqs(dev); + if (ret) { + hns3_stop_all_txqs(dev); + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + return ret; + } + hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); @@ -4787,11 +4919,12 @@ hns3_dev_start(struct rte_eth_dev *dev) /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); + /* - * When finished the initialization, enable queues to receive/transmit - * packets. + * After finished the initialization, enable tqps to receive/transmit + * packets and refresh all queue status. */ - hns3_enable_all_queues(hw, true); + hns3_start_tqps(hw); hns3_info(hw, "hns3 dev start successful!"); return 0; @@ -4801,7 +4934,6 @@ static int hns3_do_stop(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - bool reset_queue; int ret; ret = hns3_cfg_mac_mode(hw, false); @@ -4811,11 +4943,15 @@ hns3_do_stop(struct hns3_adapter *hns) if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { hns3_configure_all_mac_addr(hns, true); - reset_queue = true; - } else - reset_queue = false; + ret = hns3_reset_all_tqps(hns); + if (ret) { + hns3_err(hw, "failed to reset all queues ret = %d.", + ret); + return ret; + } + } hw->mac.default_addr_setted = false; - return hns3_stop_queues(hns, reset_queue); + return 0; } static void @@ -4861,6 +4997,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) struct hns3_hw *hw = &hns->hw; PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; hw->adapter_state = HNS3_NIC_STOPPING; hns3_set_rxtx_function(dev); @@ -4872,6 +5009,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) rte_spinlock_lock(&hw->lock); if (rte_atomic16_read(&hw->reset.resetting) == 0) { + hns3_stop_tqps(hw); hns3_do_stop(hns); hns3_unmap_rx_interrupt(dev); hns3_dev_release_mbufs(hns); @@ -4882,7 +5020,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) rte_spinlock_unlock(&hw->lock); } -static void +static int hns3_dev_close(struct rte_eth_dev *eth_dev) { struct hns3_adapter *hns = eth_dev->data->dev_private; @@ -4891,7 +5029,7 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) { rte_free(eth_dev->process_private); eth_dev->process_private = NULL; - return; + return 0; } if (hw->adapter_state == HNS3_NIC_STARTED) @@ -4911,6 +5049,8 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) eth_dev->process_private = NULL; hns3_mp_uninit_primary(); hns3_warn(hw, "Close port %d finished", hw->data->port_id); + + return 0; } static int @@ -5109,7 +5249,7 @@ hns3_reinit_dev(struct hns3_adapter *hns) return ret; } - ret = hns3_reset_all_queues(hns); + ret = hns3_reset_all_tqps(hns); if (ret) { hns3_err(hw, "Failed to reset all queues: %d", ret); return ret; @@ -5387,6 +5527,7 @@ hns3_stop_service(struct hns3_adapter *hns) rte_spinlock_lock(&hw->lock); if (hns->hw.adapter_state == HNS3_NIC_STARTED || hw->adapter_state == HNS3_NIC_STOPPING) { + hns3_enable_all_queues(hw, false); hns3_do_stop(hns); hw->reset.mbuf_deferred_free = true; } else @@ -5469,6 +5610,10 @@ hns3_restore_conf(struct hns3_adapter *hns) if (ret) goto err_promisc; + ret = hns3_restore_fec(hw); + if (ret) + goto err_promisc; + if (hns->hw.adapter_state == HNS3_NIC_STARTED) { ret = hns3_do_start(hns, false); if (ret) @@ -5547,6 +5692,313 @@ hns3_reset_service(void *param) hns3_msix_process(hns, reset_level); } +static unsigned int +hns3_get_speed_capa_num(uint16_t device_id) +{ + unsigned int num; + + switch (device_id) { + case HNS3_DEV_ID_25GE: + case HNS3_DEV_ID_25GE_RDMA: + num = 2; + break; + case HNS3_DEV_ID_100G_RDMA_MACSEC: + case HNS3_DEV_ID_200G_RDMA: + num = 1; + break; + default: + num = 0; + break; + } + + return num; +} + +static int +hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, + uint16_t device_id) +{ + switch (device_id) { + case HNS3_DEV_ID_25GE: + /* fallthrough */ + case HNS3_DEV_ID_25GE_RDMA: + speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; + speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; + + /* In HNS3 device, the 25G NIC is compatible with 10G rate */ + speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; + speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; + break; + case HNS3_DEV_ID_100G_RDMA_MACSEC: + speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; + speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; + break; + case HNS3_DEV_ID_200G_RDMA: + speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; + speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; + break; + default: + return -ENOTSUP; + } + + return 0; +} + +static int +hns3_fec_get_capability(struct rte_eth_dev *dev, + struct rte_eth_fec_capa *speed_fec_capa, + unsigned int num) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t device_id = pci_dev->id.device_id; + unsigned int capa_num; + int ret; + + capa_num = hns3_get_speed_capa_num(device_id); + if (capa_num == 0) { + hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", + device_id); + return -ENOTSUP; + } + + if (speed_fec_capa == NULL || num < capa_num) + return capa_num; + + ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); + if (ret) + return -ENOTSUP; + + return capa_num; +} + +static int +get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) +{ + struct hns3_config_fec_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); + req = (struct hns3_config_fec_cmd *)desc.data; + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "get current fec auto state failed, ret = %d", + ret); + return ret; + } + + *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); + return 0; +} + +static int +hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) +{ +#define QUERY_ACTIVE_SPEED 1 + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_sfp_speed_cmd *resp; + uint32_t tmp_fec_capa; + uint8_t auto_state; + struct hns3_cmd_desc desc; + int ret; + + /* + * If link is down and AUTO is enabled, AUTO is returned, otherwise, + * configured FEC mode is returned. + * If link is up, current FEC mode is returned. + */ + if (hw->mac.link_status == ETH_LINK_DOWN) { + ret = get_current_fec_auto_state(hw, &auto_state); + if (ret) + return ret; + + if (auto_state == 0x1) { + *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); + return 0; + } + } + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true); + resp = (struct hns3_sfp_speed_cmd *)desc.data; + resp->query_type = QUERY_ACTIVE_SPEED; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); + return ret; + } else if (ret) { + hns3_err(hw, "get FEC failed, ret = %d", ret); + return ret; + } + + /* + * FEC mode order defined in hns3 hardware is inconsistend with + * that defined in the ethdev library. So the sequence needs + * to be converted. + */ + switch (resp->active_fec) { + case HNS3_HW_FEC_MODE_NOFEC: + tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); + break; + case HNS3_HW_FEC_MODE_BASER: + tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); + break; + case HNS3_HW_FEC_MODE_RS: + tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); + break; + default: + tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); + break; + } + + *fec_capa = tmp_fec_capa; + return 0; +} + +static int +hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) +{ + struct hns3_config_fec_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); + + req = (struct hns3_config_fec_cmd *)desc.data; + switch (mode) { + case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): + hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, + HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); + break; + case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): + hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, + HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); + break; + case RTE_ETH_FEC_MODE_CAPA_MASK(RS): + hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, + HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); + break; + case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): + hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); + break; + default: + return 0; + } + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "set fec mode failed, ret = %d", ret); + + return ret; +} + +static uint32_t +get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) +{ + struct hns3_mac *mac = &hw->mac; + uint32_t cur_capa; + + switch (mac->link_speed) { + case ETH_SPEED_NUM_10G: + cur_capa = fec_capa[1].capa; + break; + case ETH_SPEED_NUM_25G: + case ETH_SPEED_NUM_100G: + case ETH_SPEED_NUM_200G: + cur_capa = fec_capa[0].capa; + break; + default: + cur_capa = 0; + break; + } + + return cur_capa; +} + +static bool +is_fec_mode_one_bit_set(uint32_t mode) +{ + int cnt = 0; + uint8_t i; + + for (i = 0; i < sizeof(mode); i++) + if (mode >> i & 0x1) + cnt++; + + return cnt == 1 ? true : false; +} + +static int +hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) +{ +#define FEC_CAPA_NUM 2 + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); + struct hns3_pf *pf = &hns->pf; + + struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; + uint32_t cur_capa; + uint32_t num = FEC_CAPA_NUM; + int ret; + + ret = hns3_fec_get_capability(dev, fec_capa, num); + if (ret < 0) + return ret; + + /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */ + if (!is_fec_mode_one_bit_set(mode)) + hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD," + "FEC mode should be only one bit set", mode); + + /* + * Check whether the configured mode is within the FEC capability. + * If not, the configured mode will not be supported. + */ + cur_capa = get_current_speed_fec_cap(hw, fec_capa); + if (!(cur_capa & mode)) { + hns3_err(hw, "unsupported FEC mode = 0x%x", mode); + return -EINVAL; + } + + ret = hns3_set_fec_hw(hw, mode); + if (ret) + return ret; + + pf->fec_mode = mode; + return 0; +} + +static int +hns3_restore_fec(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint32_t mode = pf->fec_mode; + int ret; + + ret = hns3_set_fec_hw(hw, mode); + if (ret) + hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", + mode, ret); + + return ret; +} + +static int +hns3_query_dev_fec_info(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); + struct hns3_pf *pf = &hns->pf; + int ret; + + ret = hns3_fec_get(dev, &pf->fec_mode); + if (ret) + hns3_err(hw, "query device FEC info failed, ret = %d", ret); + + return ret; +} + static const struct eth_dev_ops hns3_eth_dev_ops = { .dev_configure = hns3_dev_configure, .dev_start = hns3_dev_start, @@ -5570,6 +6022,10 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .tx_queue_setup = hns3_tx_queue_setup, .rx_queue_release = hns3_dev_rx_queue_release, .tx_queue_release = hns3_dev_tx_queue_release, + .rx_queue_start = hns3_dev_rx_queue_start, + .rx_queue_stop = hns3_dev_rx_queue_stop, + .tx_queue_start = hns3_dev_tx_queue_start, + .tx_queue_stop = hns3_dev_tx_queue_stop, .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, .rxq_info_get = hns3_rxq_info_get, @@ -5596,6 +6052,9 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .get_reg = hns3_get_regs, .get_dcb_info = hns3_get_dcb_info, .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, + .fec_get_capability = hns3_fec_get_capability, + .fec_get = hns3_fec_get, + .fec_set = hns3_fec_set, }; static const struct hns3_reset_ops hns3_reset_ops = { @@ -5699,11 +6158,6 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) ð_dev->data->mac_addrs[0]); hw->adapter_state = HNS3_NIC_INITIALIZED; - /* - * Pass the information to the rte_eth_dev_close() that it should also - * release the private port resources. - */ - eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { hns3_err(hw, "Reschedule reset service after dev_init"); @@ -5747,10 +6201,6 @@ hns3_dev_uninit(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - eth_dev->tx_pkt_prepare = NULL; if (hw->adapter_state < HNS3_NIC_CLOSING) hns3_dev_close(eth_dev);