X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_main.c;h=f74626be5ea97eba2347b11ae1bce9fe57ab8f61;hb=28eef5e0a8fa592f8bdf3a3ea0a6a00fecee6acf;hp=a604a5bb2530ab85741aeb94ebd1ec6f2fc4a374;hpb=1a998268bf809b2fafd6ababc19a362742acb47b;p=dpdk.git diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c index a604a5bb25..f74626be5e 100644 --- a/drivers/net/qede/qede_main.c +++ b/drivers/net/qede/qede_main.c @@ -12,8 +12,6 @@ #include "qede_ethdev.h" -static uint8_t npar_tx_switching = 1; - /* Alarm timeout. */ #define QEDE_ALARM_TIMEOUT_US 100000 @@ -21,7 +19,7 @@ static uint8_t npar_tx_switching = 1; char fw_file[PATH_MAX]; const char *QEDE_DEFAULT_FIRMWARE = - "/lib/firmware/qed/qed_init_values-8.14.6.0.bin"; + "/lib/firmware/qed/qed_init_values-8.20.0.0.bin"; static void qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params) @@ -42,15 +40,15 @@ static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev) static int qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, - enum qed_protocol protocol, uint32_t dp_module, - uint8_t dp_level, bool is_vf) + uint32_t dp_module, uint8_t dp_level, bool is_vf) { struct ecore_hw_prepare_params hw_prepare_params; - struct qede_dev *qdev = (struct qede_dev *)edev; int rc; ecore_init_struct(edev); - qdev->protocol = protocol; + edev->drv_type = DRV_ID_DRV_TYPE_LINUX; + /* Protocol type is always fixed to PROTOCOL_ETH */ + if (is_vf) edev->b_is_vf = true; @@ -74,7 +72,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, static int qed_nic_setup(struct ecore_dev *edev) { - int rc, i; + int rc; rc = ecore_resc_alloc(edev); if (rc) @@ -222,23 +220,33 @@ static void qed_stop_iov_task(struct ecore_dev *edev) static int qed_slowpath_start(struct ecore_dev *edev, struct qed_slowpath_params *params) { - bool allow_npar_tx_switching; const uint8_t *data = NULL; struct ecore_hwfn *hwfn; struct ecore_mcp_drv_version drv_version; struct ecore_hw_init_params hw_init_params; - struct qede_dev *qdev = (struct qede_dev *)edev; + struct ecore_ptt *p_ptt; int rc; -#ifdef CONFIG_ECORE_BINARY_FW if (IS_PF(edev)) { +#ifdef CONFIG_ECORE_BINARY_FW rc = qed_load_firmware_data(edev); if (rc) { DP_ERR(edev, "Failed to find fw file %s\n", fw_file); goto err; } - } #endif + hwfn = ECORE_LEADING_HWFN(edev); + if (edev->num_hwfns == 1) { /* skip aRFS for 100G device */ + p_ptt = ecore_ptt_acquire(hwfn); + if (p_ptt) { + ECORE_LEADING_HWFN(edev)->p_arfs_ptt = p_ptt; + } else { + DP_ERR(edev, "Failed to acquire PTT for flowdir\n"); + rc = -ENOMEM; + goto err; + } + } + } rc = qed_nic_setup(edev); if (rc) @@ -254,26 +262,26 @@ static int qed_slowpath_start(struct ecore_dev *edev, if (rc) { DP_NOTICE(edev, true, "Failed to allocate stream memory\n"); - goto err2; + goto err1; } } +#endif qed_start_iov_task(edev); -#endif #ifdef CONFIG_ECORE_BINARY_FW if (IS_PF(edev)) data = (const uint8_t *)edev->firmware + sizeof(u32); #endif - allow_npar_tx_switching = npar_tx_switching ? true : false; - /* Start the slowpath */ memset(&hw_init_params, 0, sizeof(hw_init_params)); hw_init_params.b_hw_start = true; hw_init_params.int_mode = ECORE_INT_MODE_MSIX; - hw_init_params.allow_npar_tx_switch = allow_npar_tx_switching; + hw_init_params.allow_npar_tx_switch = true; hw_init_params.bin_fw_data = data; + hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; + hw_init_params.avoid_eng_reset = false; rc = ecore_hw_init(edev, &hw_init_params); if (rc) { DP_ERR(edev, "ecore_hw_init failed\n"); @@ -295,7 +303,7 @@ static int qed_slowpath_start(struct ecore_dev *edev, if (rc) { DP_NOTICE(edev, true, "Failed sending drv version command\n"); - return rc; + goto err3; } } @@ -303,8 +311,14 @@ static int qed_slowpath_start(struct ecore_dev *edev, return 0; +err3: ecore_hw_stop(edev); err2: + qed_stop_iov_task(edev); +#ifdef CONFIG_ECORE_ZIPPED_FW + qed_free_stream_mem(edev); +err1: +#endif ecore_resc_free(edev); err: #ifdef CONFIG_ECORE_BINARY_FW @@ -323,27 +337,40 @@ static int qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info) { struct ecore_ptt *ptt = NULL; + struct ecore_tunnel_info *tun = &edev->tunnel; memset(dev_info, 0, sizeof(struct qed_dev_info)); + + if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && + tun->vxlan.b_mode_enabled) + dev_info->vxlan_enable = true; + + if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && + tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && + tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN) + dev_info->gre_enable = true; + + if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && + tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && + tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN) + dev_info->geneve_enable = true; + dev_info->num_hwfns = edev->num_hwfns; dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]); + dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu; + rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr, ETHER_ADDR_LEN); + dev_info->fw_major = FW_MAJOR_VERSION; + dev_info->fw_minor = FW_MINOR_VERSION; + dev_info->fw_rev = FW_REVISION_VERSION; + dev_info->fw_eng = FW_ENGINEERING_VERSION; + if (IS_PF(edev)) { - dev_info->fw_major = FW_MAJOR_VERSION; - dev_info->fw_minor = FW_MINOR_VERSION; - dev_info->fw_rev = FW_REVISION_VERSION; - dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->mf_mode = edev->mf_mode; dev_info->tx_switching = false; - } else { - ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major, - &dev_info->fw_minor, &dev_info->fw_rev, - &dev_info->fw_eng); - } - if (IS_PF(edev)) { ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev)); if (ptt) { ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt, @@ -371,7 +398,6 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info) int qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info) { - struct qede_dev *qdev = (struct qede_dev *)edev; uint8_t queues = 0; int i; @@ -420,9 +446,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info) return 0; } -static void -qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE], - const char ver_str[NAME_SIZE]) +static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE]) { int i; @@ -430,29 +454,18 @@ qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE], for_each_hwfn(edev, i) { snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); } - memcpy(edev->ver_str, ver_str, NAME_SIZE); - edev->drv_type = DRV_ID_DRV_TYPE_LINUX; } static uint32_t qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info, - void *sb_virt_addr, dma_addr_t sb_phy_addr, - uint16_t sb_id, enum qed_sb_type type) + void *sb_virt_addr, dma_addr_t sb_phy_addr, uint16_t sb_id) { struct ecore_hwfn *p_hwfn; int hwfn_index; uint16_t rel_sb_id; - uint8_t n_hwfns; + uint8_t n_hwfns = edev->num_hwfns; uint32_t rc; - /* RoCE uses single engine and CMT uses two engines. When using both - * we force only a single engine. Storage uses only engine 0 too. - */ - if (type == QED_SB_TYPE_L2_QUEUE) - n_hwfns = edev->num_hwfns; - else - n_hwfns = 1; - hwfn_index = sb_id % n_hwfns; p_hwfn = &edev->hwfns[hwfn_index]; rel_sb_id = sb_id / n_hwfns; @@ -472,7 +485,6 @@ static void qed_fill_link(struct ecore_hwfn *hwfn, struct ecore_mcp_link_params params; struct ecore_mcp_link_state link; struct ecore_mcp_link_capabilities link_caps; - uint32_t media_type; uint8_t change = 0; memset(if_link, 0, sizeof(*if_link)); @@ -620,19 +632,6 @@ static int qed_nic_stop(struct ecore_dev *edev) return rc; } -static int qed_nic_reset(struct ecore_dev *edev) -{ - int rc; - - rc = ecore_hw_reset(edev); - if (rc) - return rc; - - ecore_resc_free(edev); - - return 0; -} - static int qed_slowpath_stop(struct ecore_dev *edev) { #ifdef CONFIG_QED_SRIOV @@ -651,10 +650,11 @@ static int qed_slowpath_stop(struct ecore_dev *edev) if (IS_QED_ETH_IF(edev)) qed_sriov_disable(edev, true); #endif - qed_nic_stop(edev); } - qed_nic_reset(edev); + qed_nic_stop(edev); + + ecore_resc_free(edev); qed_stop_iov_task(edev); return 0; @@ -714,10 +714,11 @@ const struct qed_common_ops qed_common_ops_pass = { INIT_STRUCT_FIELD(probe, &qed_probe), INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params), INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start), - INIT_STRUCT_FIELD(set_id, &qed_set_id), + INIT_STRUCT_FIELD(set_name, &qed_set_name), INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc), INIT_STRUCT_FIELD(chain_free, &ecore_chain_free), INIT_STRUCT_FIELD(sb_init, &qed_sb_init), + INIT_STRUCT_FIELD(get_sb_info, &qed_get_sb_info), INIT_STRUCT_FIELD(get_link, &qed_get_current_link), INIT_STRUCT_FIELD(set_link, &qed_set_link), INIT_STRUCT_FIELD(drain, &qed_drain), @@ -725,3 +726,13 @@ const struct qed_common_ops qed_common_ops_pass = { INIT_STRUCT_FIELD(remove, &qed_remove), INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state), }; + +const struct qed_eth_ops qed_eth_ops_pass = { + INIT_STRUCT_FIELD(common, &qed_common_ops_pass), + INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info), +}; + +const struct qed_eth_ops *qed_get_eth_ops(void) +{ + return &qed_eth_ops_pass; +}