X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fbase%2Fecore_sriov.c;h=0279709cd1754a7182853288721b6018b4d38f52;hb=803a4cf0e05207b3cccb4699b8ef09df1a673cd3;hp=1ec6451df5997ca04cbc5a7aca2890857644df87;hpb=6e4fcea92f40de2306dcd9e0668cae90947823ae;p=dpdk.git diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c index 1ec6451df5..0279709cd1 100644 --- a/drivers/net/qede/base/ecore_sriov.c +++ b/drivers/net/qede/base/ecore_sriov.c @@ -27,6 +27,12 @@ #include "ecore_init_fw_funcs.h" #include "ecore_sp_commands.h" +static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 fw_return_code); + const char *ecore_channel_tlvs_string[] = { "CHANNEL_TLV_NONE", /* ends tlv sequence */ "CHANNEL_TLV_ACQUIRE", @@ -54,11 +60,11 @@ const char *ecore_channel_tlvs_string[] = { "CHANNEL_TLV_UPDATE_TUNN_PARAM", "CHANNEL_TLV_COALESCE_UPDATE", "CHANNEL_TLV_QID", + "CHANNEL_TLV_COALESCE_READ", "CHANNEL_TLV_MAX" }; -static u8 ecore_vf_calculate_legacy(struct ecore_hwfn *p_hwfn, - struct ecore_vf_info *p_vf) +static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf) { u8 legacy = 0; @@ -210,9 +216,7 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn, } static struct ecore_queue_cid * -ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn, - struct ecore_vf_info *p_vf, - struct ecore_vf_queue *p_queue) +ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue) { int i; @@ -231,8 +235,7 @@ enum ecore_iov_validate_q_mode { ECORE_IOV_VALIDATE_Q_DISABLE, }; -static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn, - struct ecore_vf_info *p_vf, +static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf, u16 qid, enum ecore_iov_validate_q_mode mode, bool b_is_tx) @@ -274,8 +277,7 @@ static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn, return false; } - return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, - mode, false); + return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false); } static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn, @@ -291,8 +293,7 @@ static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn, return false; } - return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, - mode, true); + return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true); } static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn, @@ -314,13 +315,12 @@ static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn, } /* Is there at least 1 queue open? */ -static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn, - struct ecore_vf_info *p_vf) +static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf) { u8 i; for (i = 0; i < p_vf->num_rxqs; i++) - if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i, + if (ecore_iov_validate_queue_mode(p_vf, i, ECORE_IOV_VALIDATE_Q_ENABLE, false)) return true; @@ -328,13 +328,12 @@ static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn, return false; } -static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn, - struct ecore_vf_info *p_vf) +static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf) { u8 i; for (i = 0; i < p_vf->num_txqs; i++) - if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i, + if (ecore_iov_validate_queue_mode(p_vf, i, ECORE_IOV_VALIDATE_Q_ENABLE, true)) return true; @@ -591,17 +590,19 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov)); if (!p_sriov) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_sriov'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n"); return ECORE_NOMEM; } p_hwfn->pf_iov_info = p_sriov; + ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + ecore_sriov_eqe_event); + return ecore_iov_allocate_vfdb(p_hwfn); } -void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +void ecore_iov_setup(struct ecore_hwfn *p_hwfn) { if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) return; @@ -611,6 +612,8 @@ void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) void ecore_iov_free(struct ecore_hwfn *p_hwfn) { + ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); + if (IS_PF_SRIOV_ALLOC(p_hwfn)) { ecore_iov_free_vfdb(p_hwfn); OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info); @@ -644,7 +647,7 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn) p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_dev->p_iov_info)); if (!p_dev->p_iov_info) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Can't support IOV due to lack of memory\n"); return ECORE_NOMEM; } @@ -818,12 +821,53 @@ static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn, ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); } +static enum _ecore_status_t +ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 abs_vf_id, + u8 num_sbs) +{ + u8 current_max = 0; + int i; + + /* If client overrides this, don't do anything */ + if (p_hwfn->p_dev->b_dont_override_vf_msix) + return ECORE_SUCCESS; + + /* For AH onward, configuration is per-PF. Find maximum of all + * the currently enabled child VFs, and set the number to be that. + */ + if (!ECORE_IS_BB(p_hwfn->p_dev)) { + ecore_for_each_vf(p_hwfn, i) { + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true); + if (!p_vf) + continue; + + current_max = OSAL_MAX_T(u8, current_max, + p_vf->num_sbs); + } + } + + if (num_sbs > current_max) + return ecore_mcp_config_vf_msix(p_hwfn, p_ptt, + abs_vf_id, num_sbs); + + return ECORE_SUCCESS; +} + static enum _ecore_status_t ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_vf_info *vf) { u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; - enum _ecore_status_t rc; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* It's possible VF was previously considered malicious - + * clear the indication even if we're only going to disable VF. + */ + vf->b_malicious = false; if (vf->to_disable) return ECORE_SUCCESS; @@ -837,11 +881,8 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn, ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf); - /* It's possible VF was previously considered malicious */ - vf->b_malicious = false; - - rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt, - vf->abs_vf_id, vf->num_sbs); + rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt, + vf->abs_vf_id, vf->num_sbs); if (rc != ECORE_SUCCESS) return rc; @@ -1267,8 +1308,7 @@ static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn, } /* place a given tlv on the tlv buffer, continuing current tlv list */ -void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, - u8 **offset, u16 type, u16 length) +void *ecore_add_tlv(u8 **offset, u16 type, u16 length) { struct channel_tlv *tl = (struct channel_tlv *)*offset; @@ -1324,7 +1364,12 @@ void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list) static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_vf_info *p_vf, - u16 length, u8 status) +#ifdef CONFIG_ECORE_SW_CHANNEL + u16 length, +#else + u16 OSAL_UNUSED length, +#endif + u8 status) { struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct ecore_dmae_params params; @@ -1338,7 +1383,7 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn, mbx->sw_mbx.response_size = length + sizeof(struct channel_list_end_tlv); - if (!p_hwfn->p_dev->b_hw_channel) + if (!p_vf->b_hw_channel) return; #endif @@ -1354,17 +1399,22 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn, (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, ¶ms); + /* Once PF copies the rc to the VF, the latter can continue and + * and send an additional message. So we have to make sure the + * channel would be re-set to ready prior to that. + */ + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); + ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, mbx->req_virt->first_tlv.reply_address, sizeof(u64) / 4, ¶ms); - REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); + OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status); } -static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn, - enum ecore_iov_vport_update_flag flag) +static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag) { switch (flag) { case ECORE_IOV_VP_UPDATE_ACTIVATE: @@ -1402,15 +1452,15 @@ static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn, size = sizeof(struct pfvf_def_resp_tlv); total_len = size; - ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); + ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); /* Prepare response for all extended tlvs if they are found by PF */ for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) { if (!(tlvs_mask & (1 << i))) continue; - resp = ecore_add_tlv(p_hwfn, &p_mbx->offset, - ecore_iov_vport_to_tlv(p_hwfn, i), size); + resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i), + size); if (tlvs_accepted & (1 << i)) resp->hdr.status = status; @@ -1420,12 +1470,13 @@ static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "VF[%d] - vport_update resp: TLV %d, status %02x\n", p_vf->relative_vf_id, - ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); + ecore_iov_vport_to_tlv(i), + resp->hdr.status); total_len += size; } - ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, + ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); return total_len; @@ -1440,13 +1491,11 @@ static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn, mbx->offset = (u8 *)mbx->reply_virt; - ecore_add_tlv(p_hwfn, &mbx->offset, type, length); - ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + ecore_add_tlv(&mbx->offset, type, length); + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); - - OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status); } struct ecore_public_vf_info @@ -1495,6 +1544,60 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id); } +/* Returns either 0, or log(size) */ +static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); + + if (val) + return val + 11; + return 0; +} + +static void +ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; + u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) - + DB_ADDR_VF(0, DQ_DEMS_LEGACY); + u32 bar_size; + + p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons); + + /* If VF didn't bother asking for QIDs than don't bother limiting + * number of CIDs. The VF doesn't care about the number, and this + * has the likely result of causing an additional acquisition. + */ + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) + return; + + /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount + * that would make sure doorbells for all CIDs fall within the bar. + * If it doesn't, make sure regview window is sufficient. + */ + if (p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { + bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt); + if (bar_size) + bar_size = 1 << bar_size; + + if (ECORE_IS_CMT(p_hwfn->p_dev)) + bar_size /= 2; + } else { + bar_size = PXP_VF_BAR0_DQ_LENGTH; + } + + if (bar_size / db_size < 256) + p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids, + (u8)(bar_size / db_size)); +} + static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_vf_info *p_vf, @@ -1531,9 +1634,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn, p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters, p_req->num_vlan_filters); - p_resp->num_cids = - OSAL_MIN_T(u8, p_req->num_cids, - p_hwfn->pf_params.eth_pf_params.num_vf_cons); + ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); /* This isn't really needed/enforced, but some legacy VFs might depend * on the correct filling of this field. @@ -1574,8 +1675,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn, return PFVF_STATUS_SUCCESS; } -static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn, - struct pfvf_stats_info *p_stats) +static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats) { p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + OFFSETOF(struct mstorm_vf_zone, @@ -1657,7 +1757,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn, } /* On 100g PFs, prevent old VFs from loading */ - if ((p_hwfn->p_dev->num_hwfns > 1) && + if (ECORE_IS_CMT(p_hwfn->p_dev) && !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { DP_INFO(p_hwfn, "VF[%d] is running an old driver that doesn't support" @@ -1685,11 +1785,11 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn, /* fill in pfdev info */ pfdev_info->chip_num = p_hwfn->p_dev->chip_num; pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */ - pfdev_info->indices_per_sb = PIS_PER_SB; + pfdev_info->indices_per_sb = PIS_PER_SB_E4; pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; - if (p_hwfn->p_dev->num_hwfns > 1) + if (ECORE_IS_CMT(p_hwfn->p_dev)) pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; /* Share our ability to use multiple queue-ids only with VFs @@ -1698,7 +1798,11 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn, if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; - ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); + /* Share the sizes of the bars with VF */ + resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn, + p_ptt); + + ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info); OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); @@ -1863,7 +1967,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, if (!p_vf->vport_instance) return ECORE_INVAL; - if (events & (1 << MAC_ADDR_FORCED)) { + if ((events & (1 << MAC_ADDR_FORCED)) || + p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ @@ -1884,7 +1989,11 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, return rc; } - p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + p_vf->configured_features |= + 1 << VFPF_BULLETIN_MAC_ADDR; + else + p_vf->configured_features |= 1 << MAC_ADDR_FORCED; } if (events & (1 << VLAN_ADDR_FORCED)) { @@ -1939,8 +2048,7 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid = OSAL_NULL; /* There can be at most 1 Rx queue on qzone. Find it */ - p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf, - p_queue); + p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue); if (p_cid == OSAL_NULL) continue; @@ -2076,16 +2184,19 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn, u8 status = PFVF_STATUS_SUCCESS; enum _ecore_status_t rc; + OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf); vf->vport_instance--; vf->spoof_chk = false; - if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) || - (ecore_iov_validate_active_txq(p_hwfn, vf))) { + if ((ecore_iov_validate_active_rxq(vf)) || + (ecore_iov_validate_active_txq(vf))) { vf->b_malicious = true; DP_NOTICE(p_hwfn, false, "VF [%02x] - considered malicious;" " Unable to stop RX/TX queuess\n", vf->abs_vf_id); + status = PFVF_STATUS_MALICIOUS; + goto out; } rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); @@ -2099,6 +2210,7 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn, vf->configured_features = 0; OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config)); +out: ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, sizeof(struct pfvf_def_resp_tlv), status); } @@ -2124,9 +2236,8 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn, else length = sizeof(struct pfvf_def_resp_tlv); - p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, - length); - ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length); + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* Update the TLV with the response */ @@ -2207,7 +2318,7 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn, if (p_queue->cids[qid_usage_idx].p_cid) goto out; - vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf); + vf_legacy = ecore_vf_calculate_legacy(vf); /* Acquire a new queue-cid */ OSAL_MEMSET(¶ms, 0, sizeof(params)); @@ -2228,7 +2339,7 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn, vf_params.qid_usage_idx = qid_usage_idx; p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid, - ¶ms, &vf_params); + ¶ms, true, &vf_params); if (p_cid == OSAL_NULL) goto out; @@ -2387,7 +2498,7 @@ static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn, if (b_update_required) { u16 geneve_port; - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, ECORE_SPQ_MODE_EBLOCK, OSAL_NULL); if (rc != ECORE_SUCCESS) @@ -2402,11 +2513,11 @@ static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn, } send_resp: - p_resp = ecore_add_tlv(p_hwfn, &mbx->offset, + p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); - ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); @@ -2438,9 +2549,8 @@ static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn, else length = sizeof(struct pfvf_def_resp_tlv); - p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, - length); - ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length); + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* Update the TLV with the response */ @@ -2483,7 +2593,7 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn, if (p_queue->cids[qid_usage_idx].p_cid) goto out; - vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf); + vf_legacy = ecore_vf_calculate_legacy(vf); /* Acquire a new queue-cid */ params.queue_id = p_queue->fw_tx_qid; @@ -2503,7 +2613,7 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn, vf_params.qid_usage_idx = qid_usage_idx; p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid, - ¶ms, &vf_params); + ¶ms, false, &vf_params); if (p_cid == OSAL_NULL) goto out; @@ -2552,7 +2662,7 @@ static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn, p_queue->cids[qid_usage_idx].b_is_tx) { struct ecore_queue_cid *p_cid; - p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf, p_queue); + p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue); DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", vf->relative_vf_id, rxq_id, qid_usage_idx, @@ -2974,8 +3084,7 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn, goto out; } - p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf, - &vf->vf_queues[q_idx]); + p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); p_rss->rss_ind_table[i] = p_cid; } @@ -2988,7 +3097,6 @@ out: static void ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn, - struct ecore_vf_info *vf, struct ecore_sp_vport_update_params *p_data, struct ecore_sge_tpa_params *p_sge_tpa, struct ecore_iov_vf_mbx *p_mbx, @@ -3078,7 +3186,7 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn, ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); - ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, + ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms, &sge_tpa_params, mbx, &tlvs_mask); tlvs_accepted = tlvs_mask; @@ -3109,8 +3217,8 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn, "Upper-layer prevents said VF" " configuration\n"); else - DP_NOTICE(p_hwfn, true, - "No feature tlvs found for vport update\n"); + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No feature tlvs found for vport update\n"); status = PFVF_STATUS_NOT_SUPPORTED; goto out; } @@ -3315,12 +3423,13 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn, goto out; } - /* Update shadow copy of the VF configuration */ + /* Update shadow copy of the VF configuration. In case shadow indicates + * the action should be blocked return success to VF to imitate the + * firmware behaviour in such case. + */ if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) != - ECORE_SUCCESS) { - status = PFVF_STATUS_FAILURE; + ECORE_SUCCESS) goto out; - } /* Determine if the unicast filtering is acceptible by PF */ if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && @@ -3425,6 +3534,76 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn, length, status); } +static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf) +{ + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_read_coal_resp_tlv *p_resp; + struct vfpf_read_coal_req_tlv *req; + u8 status = PFVF_STATUS_FAILURE; + struct ecore_vf_queue *p_queue; + struct ecore_queue_cid *p_cid; + enum _ecore_status_t rc = ECORE_SUCCESS; + u16 coal = 0, qid, i; + bool b_is_rx; + + mbx->offset = (u8 *)mbx->reply_virt; + req = &mbx->req_virt->read_coal_req; + + qid = req->qid; + b_is_rx = req->is_rx ? true : false; + + if (b_is_rx) { + if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + + p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); + rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc != ECORE_SUCCESS) + goto send_resp; + } else { + if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + p_queue = &p_vf->vf_queues[qid]; + if ((p_queue->cids[i].p_cid == OSAL_NULL) || + (!p_queue->cids[i].b_is_tx)) + continue; + + p_cid = p_queue->cids[i].p_cid; + + rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, + p_cid, &coal); + if (rc != ECORE_SUCCESS) + goto send_resp; + break; + } + } + + status = PFVF_STATUS_SUCCESS; + +send_resp: + p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ, + sizeof(*p_resp)); + p_resp->coal = coal; + + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_vf_info *vf) @@ -3465,8 +3644,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, vf->abs_vf_id, rx_coal, tx_coal, qid); if (rx_coal) { - p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf, - &vf->vf_queues[qid]); + p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); if (rc != ECORE_SUCCESS) { @@ -3475,6 +3653,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); goto out; } + vf->rx_coal = rx_coal; } /* TODO - in future, it might be possible to pass this in a per-cid @@ -3499,6 +3678,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, goto out; } } + vf->tx_coal = tx_coal; } status = PFVF_STATUS_SUCCESS; @@ -3507,6 +3687,92 @@ out: sizeof(struct pfvf_def_resp_tlv), status); } +enum _ecore_status_t +ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn, + u16 rx_coal, u16 tx_coal, + u16 vf_id, u16 qid) +{ + struct ecore_queue_cid *p_cid; + struct ecore_vf_info *vf; + struct ecore_ptt *p_ptt; + int i, rc = 0; + + if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) { + DP_NOTICE(p_hwfn, true, + "VF[%d] - Can not set coalescing: VF is not active\n", + vf_id); + return ECORE_INVAL; + } + + vf = &p_hwfn->pf_iov_info->vfs_array[vf_id]; + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_AGAIN; + + if (!ecore_iov_validate_rxq(p_hwfn, vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE) && + rx_coal) { + DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + if (!ecore_iov_validate_txq(p_hwfn, vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE) && + tx_coal) { + DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", + vf->abs_vf_id, rx_coal, tx_coal, qid); + + if (rx_coal) { + p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); + + rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc != ECORE_SUCCESS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Unable to set rx queue = %d coalesce\n", + vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); + goto out; + } + vf->rx_coal = rx_coal; + } + + /* TODO - in future, it might be possible to pass this in a per-cid + * granularity. For now, do this for all Tx queues. + */ + if (tx_coal) { + struct ecore_vf_queue *p_queue = &vf->vf_queues[qid]; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (p_queue->cids[i].p_cid == OSAL_NULL) + continue; + + if (!p_queue->cids[i].b_is_tx) + continue; + + rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, + p_queue->cids[i].p_cid); + if (rc != ECORE_SUCCESS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Unable to set tx queue coalesce\n", + vf->abs_vf_id); + goto out; + } + } + vf->tx_coal = tx_coal; + } + +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + static enum _ecore_status_t ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn, struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt) @@ -3538,11 +3804,11 @@ static enum _ecore_status_t ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn, struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt) { - u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; + u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; int i, cnt; /* Read initial consumers & producers */ - for (i = 0; i < MAX_NUM_VOQS; i++) { + for (i = 0; i < MAX_NUM_VOQS_E4; i++) { u32 prod; cons[i] = ecore_rd(p_hwfn, p_ptt, @@ -3557,7 +3823,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn, /* Wait for consumers to pass the producers */ i = 0; for (cnt = 0; cnt < 50; cnt++) { - for (; i < MAX_NUM_VOQS; i++) { + for (; i < MAX_NUM_VOQS_E4; i++) { u32 tmp; tmp = ecore_rd(p_hwfn, p_ptt, @@ -3567,7 +3833,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn, break; } - if (i == MAX_NUM_VOQS) + if (i == MAX_NUM_VOQS_E4) break; OSAL_MSLEEP(20); @@ -3666,8 +3932,7 @@ cleanup: ack_vfs[vfid / 32] |= (1 << (vfid % 32)); p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= ~(1ULL << (rel_vf_id % 64)); - p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= - ~(1ULL << (rel_vf_id % 64)); + p_vf->vf_mbx.b_pending_msg = false; } return rc; @@ -3777,11 +4042,11 @@ void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, p_bulletin = p_vf->bulletin.p_virt; if (p_params) - __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin); + __ecore_vf_get_link_params(p_params, p_bulletin); if (p_link) - __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin); + __ecore_vf_get_link_state(p_link, p_bulletin); if (p_caps) - __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); + __ecore_vf_get_link_caps(p_caps, p_bulletin); } void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, @@ -3797,12 +4062,22 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, mbx = &p_vf->vf_mbx; /* ecore_iov_process_mbx_request */ - DP_VERBOSE(p_hwfn, - ECORE_MSG_IOV, - "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id); +#ifndef CONFIG_ECORE_SW_CHANNEL + if (!mbx->b_pending_msg) { + DP_NOTICE(p_hwfn, true, + "VF[%02x]: Trying to process mailbox message when none is pending\n", + p_vf->abs_vf_id); + return; + } + mbx->b_pending_msg = false; +#endif mbx->first_tlv = mbx->req_virt->first_tlv; + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%02x]: Processing mailbox message [type %04x]\n", + p_vf->abs_vf_id, mbx->first_tlv.tl.type); + OSAL_IOV_VF_MSG_TYPE(p_hwfn, p_vf->relative_vf_id, mbx->first_tlv.tl.type); @@ -3863,6 +4138,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, case CHANNEL_TLV_COALESCE_UPDATE: ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_COALESCE_READ: + ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); + break; } } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) { /* If we've received a message from a VF we consider malicious @@ -3927,26 +4205,20 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, #endif } -void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid) +void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn, + u64 *events) { - u64 add_bit = 1ULL << (vfid % 64); + int i; - /* TODO - add locking mechanisms [no atomics in ecore, so we can't - * add the lock inside the ecore_pf_iov struct]. - */ - p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; -} + OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH); -void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn, - u64 *events) -{ - u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; + ecore_for_each_vf(p_hwfn, i) { + struct ecore_vf_info *p_vf; - /* TODO - Take a lock */ - OSAL_MEMCPY(events, p_pending_events, - sizeof(u64) * ECORE_VF_ARRAY_LENGTH); - OSAL_MEMSET(p_pending_events, 0, - sizeof(u64) * ECORE_VF_ARRAY_LENGTH); + p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; + if (p_vf->vf_mbx.b_pending_msg) + events[i / 64] |= 1ULL << (i % 64); + } } static struct ecore_vf_info * @@ -3980,6 +4252,8 @@ static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn, */ p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; + p_vf->vf_mbx.b_pending_msg = true; + return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id); } @@ -3988,24 +4262,31 @@ static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn, { struct ecore_vf_info *p_vf; - p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId); + p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); if (!p_vf) return; - DP_INFO(p_hwfn, - "VF [%d] - Malicious behavior [%02x]\n", - p_vf->abs_vf_id, p_data->errId); + if (!p_vf->b_malicious) { + DP_NOTICE(p_hwfn, false, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); - p_vf->b_malicious = true; + p_vf->b_malicious = true; + } else { + DP_INFO(p_hwfn, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); + } OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id); } -enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, - u8 opcode, - __le16 echo, - union event_ring_data *data) +static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 OSAL_UNUSED fw_return_code) { switch (opcode) { case COMMON_EVENT_VF_PF_CHANNEL: @@ -4044,7 +4325,7 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) return i; out: - return E4_MAX_NUM_VFS; + return MAX_NUM_VFS_E4; } enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn, @@ -4093,7 +4374,11 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, return; } - feature = 1 << MAC_ADDR_FORCED; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + feature = 1 << VFPF_BULLETIN_MAC_ADDR; + else + feature = 1 << MAC_ADDR_FORCED; + OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); vf_info->bulletin.p_virt->valid_bitmap |= feature; @@ -4134,9 +4419,13 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, vf_info->bulletin.p_virt->valid_bitmap |= feature; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); + return ECORE_SUCCESS; } +#ifndef LINUX_REMOVE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, bool b_untagged_only, int vfid) @@ -4193,6 +4482,7 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, *opaque_fid = vf_info->opaque_fid; } +#endif void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn, u16 pvid, int vfid) @@ -4380,6 +4670,22 @@ u32 ecore_iov_pfvf_msg_length(void) return sizeof(union pfvf_tlvs); } +u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return OSAL_NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & + (1 << VFPF_BULLETIN_MAC_ADDR))) + return OSAL_NULL; + + return p_vf->bulletin.p_virt->mac; +} + u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) { struct ecore_vf_info *p_vf; @@ -4413,6 +4719,7 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid, int val) { + struct ecore_mcp_link_state *p_link; struct ecore_vf_info *vf; u8 abs_vp_id = 0; enum _ecore_status_t rc; @@ -4426,7 +4733,10 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, if (rc != ECORE_SUCCESS) return rc; - return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); + p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output; + + return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val, + p_link->speed); } enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn, @@ -4556,3 +4866,17 @@ ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid) else return 0; } + +#ifdef CONFIG_ECORE_SW_CHANNEL +void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid, + bool b_is_hw) +{ + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return; + + vf_info->b_hw_channel = b_is_hw; +} +#endif