X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_main.c;h=326e56f82ca631ad1efd727809a4f19b355e1764;hb=0863dbe396976530eb6e8849878a88c7e01774f3;hp=60655b7e1a06bdb3ec9d9db88373fd2c1660f74e;hpb=301ea2d7147c4c837cd3a88bd734b59c3764623c;p=dpdk.git diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c index 60655b7e1a..326e56f82c 100644 --- a/drivers/net/qede/qede_main.c +++ b/drivers/net/qede/qede_main.c @@ -21,7 +21,7 @@ static uint8_t npar_tx_switching = 1; char fw_file[PATH_MAX]; const char *QEDE_DEFAULT_FIRMWARE = - "/lib/firmware/qed/qed_init_values_zipped-8.10.9.0.bin"; + "/lib/firmware/qed/qed_init_values-8.18.9.0.bin"; static void qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params) @@ -50,11 +50,12 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, int rc; ecore_init_struct(edev); + edev->drv_type = DRV_ID_DRV_TYPE_LINUX; qdev->protocol = protocol; - if (is_vf) { + + if (is_vf) edev->b_is_vf = true; - edev->b_hw_channel = true; /* @DPDK */ - } + ecore_init_dp(edev, dp_module, dp_level, NULL); qed_init_pci(edev, pci_dev); @@ -62,6 +63,8 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, hw_prepare_params.personality = ECORE_PCI_ETH; hw_prepare_params.drv_resc_alloc = false; hw_prepare_params.chk_reg_fifo = false; + hw_prepare_params.initiate_pf_flr = true; + hw_prepare_params.epoch = (u32)time(NULL); rc = ecore_hw_prepare(edev, &hw_prepare_params); if (rc) { DP_ERR(edev, "hw prepare failed\n"); @@ -137,6 +140,7 @@ static int qed_load_firmware_data(struct ecore_dev *edev) if (fstat(fd, &st) < 0) { DP_NOTICE(edev, false, "Can't stat firmware file\n"); + close(fd); return -1; } @@ -158,9 +162,11 @@ static int qed_load_firmware_data(struct ecore_dev *edev) if (edev->fw_len < 104) { DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n", edev->fw_len); + close(fd); return -EINVAL; } + close(fd); return 0; } #endif @@ -225,16 +231,12 @@ static int qed_slowpath_start(struct ecore_dev *edev, struct ecore_hw_init_params hw_init_params; struct qede_dev *qdev = (struct qede_dev *)edev; int rc; -#ifdef QED_ENC_SUPPORTED - struct ecore_tunn_start_params tunn_info; -#endif #ifdef CONFIG_ECORE_BINARY_FW if (IS_PF(edev)) { rc = qed_load_firmware_data(edev); if (rc) { - DP_NOTICE(edev, true, - "Failed to find fw file %s\n", fw_file); + DP_ERR(edev, "Failed to find fw file %s\n", fw_file); goto err; } } @@ -270,22 +272,12 @@ static int qed_slowpath_start(struct ecore_dev *edev, /* Start the slowpath */ memset(&hw_init_params, 0, sizeof(hw_init_params)); -#ifdef QED_ENC_SUPPORTED - memset(&tunn_info, 0, sizeof(tunn_info)); - tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | - 1 << QED_MODE_L2GRE_TUNN | - 1 << QED_MODE_IPGRE_TUNN | - 1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN; - tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; - tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; - tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; - hw_init_params.p_tunn = &tunn_info; -#endif hw_init_params.b_hw_start = true; hw_init_params.int_mode = ECORE_INT_MODE_MSIX; hw_init_params.allow_npar_tx_switch = allow_npar_tx_switching; hw_init_params.bin_fw_data = data; - hw_init_params.epoch = (u32)time(NULL); + hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; + hw_init_params.avoid_eng_reset = false; rc = ecore_hw_init(edev, &hw_init_params); if (rc) { DP_ERR(edev, "ecore_hw_init failed\n"); @@ -335,10 +327,28 @@ static int qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info) { struct ecore_ptt *ptt = NULL; + struct ecore_tunnel_info *tun = &edev->tunnel; memset(dev_info, 0, sizeof(struct qed_dev_info)); + + if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && + tun->vxlan.b_mode_enabled) + dev_info->vxlan_enable = true; + + if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && + tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && + tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN) + dev_info->gre_enable = true; + + if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && + tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && + tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN) + dev_info->geneve_enable = true; + dev_info->num_hwfns = edev->num_hwfns; dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]); + dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu; + rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr, ETHER_ADDR_LEN); @@ -349,13 +359,7 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info) dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->mf_mode = edev->mf_mode; dev_info->tx_switching = false; - } else { - ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major, - &dev_info->fw_minor, &dev_info->fw_rev, - &dev_info->fw_eng); - } - if (IS_PF(edev)) { ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev)); if (ptt) { ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt, @@ -373,6 +377,10 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info) ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt); } } else { + ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major, + &dev_info->fw_minor, &dev_info->fw_rev, + &dev_info->fw_eng); + ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt, &dev_info->mfw_rev, NULL); } @@ -384,6 +392,7 @@ int qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info) { struct qede_dev *qdev = (struct qede_dev *)edev; + uint8_t queues = 0; int i; memset(info, 0, sizeof(*info)); @@ -391,23 +400,36 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info) info->num_tc = 1 /* @@@TBD aelior MULTI_COS */; if (IS_PF(edev)) { + int max_vf_vlan_filters = 0; + info->num_queues = 0; for_each_hwfn(edev, i) info->num_queues += FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE); - info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN); + if (edev->p_iov_info) + max_vf_vlan_filters = edev->p_iov_info->total_vfs * + ECORE_ETH_VF_NUM_VLAN_FILTERS; + info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) - + max_vf_vlan_filters; rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr, ETHER_ADDR_LEN); } else { - ecore_vf_get_num_rxqs(&edev->hwfns[0], &info->num_queues); + ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev), + &info->num_queues); + if (edev->num_hwfns > 1) { + ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues); + info->num_queues += queues; + } ecore_vf_get_num_vlan_filters(&edev->hwfns[0], - &info->num_vlan_filters); + (u8 *)&info->num_vlan_filters); ecore_vf_get_port_mac(&edev->hwfns[0], (uint8_t *)&info->port_mac); + + info->is_legacy = ecore_vf_get_pre_fp_hsi(&edev->hwfns[0]); } qed_fill_dev_info(edev, &info->common); @@ -418,9 +440,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info) return 0; } -static void -qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE], - const char ver_str[VER_SIZE]) +static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE]) { int i; @@ -428,8 +448,6 @@ qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE], for_each_hwfn(edev, i) { snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); } - rte_memcpy(edev->ver_str, ver_str, VER_SIZE); - edev->drv_type = DRV_ID_DRV_TYPE_LINUX; } static uint32_t @@ -498,6 +516,9 @@ static void qed_fill_link(struct ecore_hwfn *hwfn, if_link->duplex = QEDE_DUPLEX_FULL; + /* Fill up the native advertised speed cap mask */ + if_link->adv_speed = params.speed.advertised_speeds; + if (params.speed.autoneg) if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG; @@ -663,11 +684,53 @@ static void qed_remove(struct ecore_dev *edev) ecore_hw_remove(edev); } +static int qed_send_drv_state(struct ecore_dev *edev, bool active) +{ + struct ecore_hwfn *hwfn = ECORE_LEADING_HWFN(edev); + struct ecore_ptt *ptt; + int status = 0; + + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = ecore_mcp_ov_update_driver_state(hwfn, ptt, active ? + ECORE_OV_DRIVER_STATE_ACTIVE : + ECORE_OV_DRIVER_STATE_DISABLED); + + ecore_ptt_release(hwfn, ptt); + + return status; +} + +static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb, + u16 qid, struct ecore_sb_info_dbg *sb_dbg) +{ + struct ecore_hwfn *hwfn = &edev->hwfns[qid % edev->num_hwfns]; + struct ecore_ptt *ptt; + int rc; + + if (IS_VF(edev)) + return -EINVAL; + + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, true, "Can't acquire PTT\n"); + return -EAGAIN; + } + + memset(sb_dbg, 0, sizeof(*sb_dbg)); + rc = ecore_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); + + ecore_ptt_release(hwfn, ptt); + return rc; +} + const struct qed_common_ops qed_common_ops_pass = { INIT_STRUCT_FIELD(probe, &qed_probe), INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params), INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start), - INIT_STRUCT_FIELD(set_id, &qed_set_id), + INIT_STRUCT_FIELD(set_name, &qed_set_name), INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc), INIT_STRUCT_FIELD(chain_free, &ecore_chain_free), INIT_STRUCT_FIELD(sb_init, &qed_sb_init), @@ -676,4 +739,5 @@ const struct qed_common_ops qed_common_ops_pass = { INIT_STRUCT_FIELD(drain, &qed_drain), INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop), INIT_STRUCT_FIELD(remove, &qed_remove), + INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state), };