mempool: introduce helpers for populate and required size
[dpdk.git] / drivers / net / qede / base / ecore_sriov.c
index 18458cf..deee04a 100644 (file)
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
  * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
  */
 
 #include "bcm_osal.h"
@@ -33,7 +31,7 @@ static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
                                                  union event_ring_data *data,
                                                  u8 fw_return_code);
 
-const char *ecore_channel_tlvs_string[] = {
+const char *qede_ecore_channel_tlvs_string[] = {
        "CHANNEL_TLV_NONE",     /* ends tlv sequence */
        "CHANNEL_TLV_ACQUIRE",
        "CHANNEL_TLV_VPORT_START",
@@ -61,6 +59,8 @@ const char *ecore_channel_tlvs_string[] = {
        "CHANNEL_TLV_COALESCE_UPDATE",
        "CHANNEL_TLV_QID",
        "CHANNEL_TLV_COALESCE_READ",
+       "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
+       "CHANNEL_TLV_UPDATE_MTU",
        "CHANNEL_TLV_MAX"
 };
 
@@ -218,7 +218,7 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
 static struct ecore_queue_cid *
 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
 {
-       int i;
+       u32 i;
 
        for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
                if (p_queue->cids[i].p_cid &&
@@ -240,7 +240,7 @@ static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
                                          enum ecore_iov_validate_q_mode mode,
                                          bool b_is_tx)
 {
-       int i;
+       u32 i;
 
        if (mode == ECORE_IOV_VALIDATE_Q_NA)
                return true;
@@ -347,7 +347,7 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
 {
        struct ecore_bulletin_content *p_bulletin;
        int crc_size = sizeof(p_bulletin->crc);
-       struct ecore_dmae_params params;
+       struct dmae_params params;
        struct ecore_vf_info *p_vf;
 
        p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
@@ -371,8 +371,8 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
 
        /* propagate bulletin board via dmae to vm memory */
        OSAL_MEMSET(&params, 0, sizeof(params));
-       params.flags = ECORE_DMAE_FLAG_VF_DST;
-       params.dst_vfid = p_vf->abs_vf_id;
+       SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
+       params.dst_vf_id = p_vf->abs_vf_id;
        return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
                                    p_vf->vf_bulletin, p_vf->bulletin.size / 4,
                                    &params);
@@ -590,8 +590,7 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
 
        p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
        if (!p_sriov) {
-               DP_NOTICE(p_hwfn, true,
-                         "Failed to allocate `struct ecore_sriov'\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
                return ECORE_NOMEM;
        }
 
@@ -648,7 +647,7 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
        p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
                                        sizeof(*p_dev->p_iov_info));
        if (!p_dev->p_iov_info) {
-               DP_NOTICE(p_hwfn, true,
+               DP_NOTICE(p_hwfn, false,
                          "Can't support IOV due to lack of memory\n");
                return ECORE_NOMEM;
        }
@@ -907,7 +906,7 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
  *
  * @brief ecore_iov_config_perm_table - configure the permission
  *      zone table.
- *      In E4, queue zone permission table size is 320x9. There
+ *      The queue zone permission table size is 320x9. There
  *      are 320 VF queues for single engine device (256 for dual
  *      engine device), and each entry has the following format:
  *      {Valid, VF[7:0]}
@@ -968,6 +967,9 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
 
        for (qid = 0; qid < num_rx_queues; qid++) {
                p_block = ecore_get_igu_free_sb(p_hwfn, false);
+               if (!p_block)
+                       continue;
+
                vf->igu_sbs[qid] = p_block->igu_sb_id;
                p_block->status &= ~ECORE_IGU_STATUS_FREE;
                SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
@@ -980,10 +982,12 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
                ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
                                        p_hwfn->rel_pf_id,
                                        vf->abs_vf_id, 1);
+
                ecore_dmae_host2grc(p_hwfn, p_ptt,
                                    (u64)(osal_uintptr_t)&sb_entry,
                                    CAU_REG_SB_VAR_MEMORY +
-                                   p_block->igu_sb_id * sizeof(u64), 2, 0);
+                                   p_block->igu_sb_id * sizeof(u64), 2,
+                                   OSAL_NULL /* default parameters */);
        }
 
        vf->num_sbs = (u8)num_rx_queues;
@@ -1063,6 +1067,15 @@ void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
        p_bulletin->capability_speed = p_caps->speed_capabilities;
 }
 
+#ifndef ASIC_ONLY
+static void ecore_emul_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt)
+{
+       /* Increase the maximum number of DORQ FIFO entries used by child VFs */
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT_LIM, 0x3ec);
+}
+#endif
+
 enum _ecore_status_t
 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
                         struct ecore_ptt *p_ptt,
@@ -1187,18 +1200,39 @@ ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
                           &link_params, &link_state, &link_caps);
 
        rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+       if (rc != ECORE_SUCCESS)
+               return rc;
 
-       if (rc == ECORE_SUCCESS) {
-               vf->b_init = true;
-               p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
+       vf->b_init = true;
+#ifndef REMOVE_DBG
+       p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
                        (1ULL << (vf->relative_vf_id % 64));
+#endif
 
-               if (IS_LEAD_HWFN(p_hwfn))
-                       p_hwfn->p_dev->p_iov_info->num_vfs++;
+       if (IS_LEAD_HWFN(p_hwfn))
+               p_hwfn->p_dev->p_iov_info->num_vfs++;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               ecore_emul_iov_init_hw_for_vf(p_hwfn, p_ptt);
+#endif
+
+       return ECORE_SUCCESS;
        }
 
-       return rc;
+#ifndef ASIC_ONLY
+static void ecore_emul_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt)
+{
+       if (!ecore_mcp_is_init(p_hwfn)) {
+               u32 sriov_dis = ecore_rd(p_hwfn, p_ptt,
+                                        PGLUE_B_REG_SR_IOV_DISABLED_REQUEST);
+
+               ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR,
+                        sriov_dis);
 }
+}
+#endif
 
 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
                                                 struct ecore_ptt *p_ptt,
@@ -1256,6 +1290,11 @@ enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
                        p_hwfn->p_dev->p_iov_info->num_vfs--;
        }
 
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               ecore_emul_iov_release_hw_for_vf(p_hwfn, p_ptt);
+#endif
+
        return ECORE_SUCCESS;
 }
 
@@ -1279,7 +1318,7 @@ static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
                           ECORE_MSG_IOV,
                           "VF[%d]: vf pf channel locked by %s\n",
                           vf->abs_vf_id,
-                          ecore_channel_tlvs_string[tlv]);
+                          qede_ecore_channel_tlvs_string[tlv]);
        else
                DP_VERBOSE(p_hwfn,
                           ECORE_MSG_IOV,
@@ -1297,7 +1336,7 @@ static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
                           ECORE_MSG_IOV,
                           "VF[%d]: vf pf channel unlocked by %s\n",
                           vf->abs_vf_id,
-                          ecore_channel_tlvs_string[expected_tlv]);
+                          qede_ecore_channel_tlvs_string[expected_tlv]);
        else
                DP_VERBOSE(p_hwfn,
                           ECORE_MSG_IOV,
@@ -1337,7 +1376,7 @@ void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
                if (ecore_iov_tlv_supported(tlv->type))
                        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                                   "TLV number %d: type %s, length %d\n",
-                                  i, ecore_channel_tlvs_string[tlv->type],
+                                  i, qede_ecore_channel_tlvs_string[tlv->type],
                                   tlv->length);
                else
                        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -1373,7 +1412,7 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
                                    u8 status)
 {
        struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
-       struct ecore_dmae_params params;
+       struct dmae_params params;
        u8 eng_vf_id;
 
        mbx->reply_virt->default_resp.hdr.status = status;
@@ -1390,9 +1429,9 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
 
        eng_vf_id = p_vf->abs_vf_id;
 
-       OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
-       params.flags = ECORE_DMAE_FLAG_VF_DST;
-       params.dst_vfid = eng_vf_id;
+       OSAL_MEMSET(&params, 0, sizeof(params));
+       SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
+       params.dst_vf_id = eng_vf_id;
 
        ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
                             mbx->req_virt->first_tlv.reply_address +
@@ -1786,7 +1825,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
        /* fill in pfdev info */
        pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
        pfdev_info->db_size = 0;        /* @@@ TBD MichalK Vf Doorbells */
-       pfdev_info->indices_per_sb = PIS_PER_SB_E4;
+       pfdev_info->indices_per_sb = PIS_PER_SB;
 
        pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
                                   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -1968,7 +2007,9 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
        if (!p_vf->vport_instance)
                return ECORE_INVAL;
 
-       if (events & (1 << MAC_ADDR_FORCED)) {
+       if ((events & (1 << MAC_ADDR_FORCED)) ||
+           p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+           p_vf->p_vf_info.is_trusted_configured) {
                /* Since there's no way [currently] of removing the MAC,
                 * we can always assume this means we need to force it.
                 */
@@ -1989,7 +2030,12 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
                        return rc;
                }
 
-               p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+               if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+                   p_vf->p_vf_info.is_trusted_configured)
+                       p_vf->configured_features |=
+                               1 << VFPF_BULLETIN_MAC_ADDR;
+               else
+                       p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
        }
 
        if (events & (1 << VLAN_ADDR_FORCED)) {
@@ -2081,8 +2127,8 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
                                         struct ecore_ptt *p_ptt,
                                         struct ecore_vf_info *vf)
 {
-       struct ecore_sp_vport_start_params params = { 0 };
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct ecore_sp_vport_start_params params;
        struct vfpf_vport_start_tlv *start;
        u8 status = PFVF_STATUS_SUCCESS;
        struct ecore_vf_info *vf_info;
@@ -2133,6 +2179,7 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
                *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
        }
 
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_start_params));
        params.tpa_mode = start->tpa_mode;
        params.remove_inner_vlan = start->inner_vlan_removal;
        params.tx_switching = true;
@@ -2152,7 +2199,9 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
        params.vport_id = vf->vport_id;
        params.max_buffers_per_cqe = start->max_buffers_per_cqe;
        params.mtu = vf->mtu;
-       params.check_mac = true;
+
+       /* Non trusted VFs should enable control frame filtering */
+       params.check_mac = !vf->p_vf_info.is_trusted_configured;
 
        rc = ecore_sp_eth_vport_start(p_hwfn, &params);
        if (rc != ECORE_SUCCESS) {
@@ -2236,10 +2285,14 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
        ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
                      sizeof(struct channel_list_end_tlv));
 
-       /* Update the TLV with the response */
+       /* Update the TLV with the response.
+        * The VF Rx producers are located in the vf zone.
+        */
        if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
                req = &mbx->req_virt->start_rxq;
-               p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+
+               p_tlv->offset =
+                       PXP_VF_BAR0_START_MSDM_ZONE_B +
                                OFFSETOF(struct mstorm_vf_zone,
                                         non_trigger.eth_rx_queue_producers) +
                                sizeof(struct eth_rx_prod_data) * req->rx_qid;
@@ -2339,13 +2392,15 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
        if (p_cid == OSAL_NULL)
                goto out;
 
-       /* Legacy VFs have their Producers in a different location, which they
-        * calculate on their own and clean the producer prior to this.
+       /* The VF Rx producers are located in the vf zone.
+        * Legacy VFs have their producers in the queue zone, but they
+        * calculate the location by their own and clean them prior to this.
         */
        if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
                REG_WR(p_hwfn,
                       GTT_BAR0_MAP_REG_MSDM_RAM +
-                      MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+                      MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
+                                                 req->rx_qid),
                       0);
 
        rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
@@ -2854,6 +2909,45 @@ out:
                               length, status);
 }
 
+static enum _ecore_status_t
+ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   struct ecore_vf_info *p_vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+       struct ecore_sp_vport_update_params params;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct vfpf_update_mtu_tlv *p_req;
+       u8 status = PFVF_STATUS_SUCCESS;
+
+       /* Valiate PF can send such a request */
+       if (!p_vf->vport_instance) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "No VPORT instance available for VF[%d], failing MTU update\n",
+                          p_vf->abs_vf_id);
+               status = PFVF_STATUS_FAILURE;
+               goto send_status;
+       }
+
+       p_req = &mbx->req_virt->update_mtu;
+
+       OSAL_MEMSET(&params, 0, sizeof(params));
+       params.opaque_fid =  p_vf->opaque_fid;
+       params.vport_id = p_vf->vport_id;
+       params.mtu = p_req->mtu;
+       rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+                                  OSAL_NULL);
+
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+send_status:
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+                              CHANNEL_TLV_UPDATE_MTU,
+                              sizeof(struct pfvf_def_resp_tlv),
+                              status);
+       return rc;
+}
+
 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
                                 void *p_tlvs_list, u16 req_type)
 {
@@ -2869,7 +2963,7 @@ void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
                if (p_tlv->type == req_type) {
                        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                                   "Extended tlv type %s, length %d found\n",
-                                  ecore_channel_tlvs_string[p_tlv->type],
+                                  qede_ecore_channel_tlvs_string[p_tlv->type],
                                   p_tlv->length);
                        return p_tlv;
                }
@@ -2975,8 +3069,7 @@ ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
 
        p_data->update_approx_mcast_flg = 1;
        OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
-                   sizeof(unsigned long) *
-                   ETH_MULTICAST_MAC_BINS_IN_REGS);
+                   sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
        *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
 }
 
@@ -3309,6 +3402,15 @@ ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
        if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
                return ECORE_SUCCESS;
 
+       /* Since we don't have the implementation of the logic for removing
+        * a forced MAC and restoring shadow MAC, let's not worry about
+        * processing shadow copies of MAC as long as VF trust mode is ON,
+        * to keep things simple.
+        */
+       if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+           p_vf->p_vf_info.is_trusted_configured)
+               return ECORE_SUCCESS;
+
        /* First remove entries and then add new ones */
        if (p_params->opcode == ECORE_FILTER_REMOVE) {
                for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
@@ -3419,12 +3521,13 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
                goto out;
        }
 
-       /* Update shadow copy of the VF configuration */
+       /* Update shadow copy of the VF configuration. In case shadow indicates
+        * the action should be blocked return success to VF to imitate the
+        * firmware behaviour in such case.
+        */
        if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
-           ECORE_SUCCESS) {
-               status = PFVF_STATUS_FAILURE;
+           ECORE_SUCCESS)
                goto out;
-       }
 
        /* Determine if the unicast filtering is acceptible by PF */
        if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
@@ -3610,7 +3713,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
        struct ecore_queue_cid *p_cid;
        u16 rx_coal, tx_coal;
        u16 qid;
-       int i;
+       u32 i;
 
        req = &mbx->req_virt->update_coalesce;
 
@@ -3690,7 +3793,8 @@ ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
        struct ecore_queue_cid *p_cid;
        struct ecore_vf_info *vf;
        struct ecore_ptt *p_ptt;
-       int i, rc = 0;
+       int rc = 0;
+       u32 i;
 
        if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
                DP_NOTICE(p_hwfn, true,
@@ -3795,48 +3899,70 @@ ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
+#define MAX_NUM_EXT_VOQS       (MAX_NUM_PORTS * NUM_OF_TCS)
+
 static enum _ecore_status_t
 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
                          struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
 {
-       u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
-       int i, cnt;
+       u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
+       u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
+       u8 max_ports_per_engine = p_hwfn->p_dev->num_ports_in_engine;
+       u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
+       u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
+       u8 port_id, tc, tc_id = 0, voq = 0;
+       int cnt;
 
        /* Read initial consumers & producers */
-       for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
-               u32 prod;
-
-               cons[i] = ecore_rd(p_hwfn, p_ptt,
-                                  PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
-                                  i * 0x40);
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+               for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
+                       tc_id = (tc < max_phys_tcs_per_port) ?
+                               tc :
+                               PURE_LB_TC;
+                       voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
+                       cons[voq] = ecore_rd(p_hwfn, p_ptt,
+                                            cons_voq0_addr + voq * 0x40);
                prod = ecore_rd(p_hwfn, p_ptt,
-                               PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
-                               i * 0x40);
-               distance[i] = prod - cons[i];
+                                       prod_voq0_addr + voq * 0x40);
+                       distance[voq] = prod - cons[voq];
+               }
        }
 
        /* Wait for consumers to pass the producers */
-       i = 0;
+       port_id = 0;
+       tc = 0;
        for (cnt = 0; cnt < 50; cnt++) {
-               for (; i < MAX_NUM_VOQS_E4; i++) {
-                       u32 tmp;
-
+               for (; port_id < max_ports_per_engine; port_id++) {
+                       /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+                       for (; tc < max_phys_tcs_per_port + 1; tc++) {
+                               tc_id = (tc < max_phys_tcs_per_port) ?
+                                       tc :
+                                       PURE_LB_TC;
+                               voq = VOQ(port_id, tc_id,
+                                         max_phys_tcs_per_port);
                        tmp = ecore_rd(p_hwfn, p_ptt,
-                                      PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
-                                      i * 0x40);
-                       if (distance[i] > tmp - cons[i])
+                                              cons_voq0_addr + voq * 0x40);
+                       if (distance[voq] > tmp - cons[voq])
+                               break;
+               }
+
+                       if (tc == max_phys_tcs_per_port + 1)
+                               tc = 0;
+                       else
                                break;
                }
 
-               if (i == MAX_NUM_VOQS_E4)
+               if (port_id == max_ports_per_engine)
                        break;
 
                OSAL_MSLEEP(20);
        }
 
        if (cnt == 50) {
-               DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
-                      p_vf->abs_vf_id, i);
+               DP_ERR(p_hwfn,
+                      "VF[%d] - pbf polling failed on VOQ %d [port_id %d, tc_id %d]\n",
+                      p_vf->abs_vf_id, voq, port_id, tc_id);
                return ECORE_TIMEOUT;
        }
 
@@ -3936,11 +4062,11 @@ cleanup:
 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
                                              struct ecore_ptt *p_ptt)
 {
-       u32 ack_vfs[VF_MAX_STATIC / 32];
+       u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
        enum _ecore_status_t rc = ECORE_SUCCESS;
        u16 i;
 
-       OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+       OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
 
        /* Since BRB <-> PRS interface can't be tested as part of the flr
         * polling due to HW limitations, simply sleep a bit. And since
@@ -3959,10 +4085,10 @@ enum _ecore_status_t
 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
                                struct ecore_ptt *p_ptt, u16 rel_vf_id)
 {
-       u32 ack_vfs[VF_MAX_STATIC / 32];
+       u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
        enum _ecore_status_t rc = ECORE_SUCCESS;
 
-       OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+       OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
 
        /* Wait instead of polling the BRB <-> PRS interface */
        OSAL_MSLEEP(100);
@@ -3979,7 +4105,8 @@ bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
        u16 i;
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
-       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+
+       for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                           "[%08x,...,%08x]: %08x\n",
                           i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
@@ -4136,6 +4263,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
                case CHANNEL_TLV_COALESCE_READ:
                        ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
                        break;
+               case CHANNEL_TLV_UPDATE_MTU:
+                       ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
+                       break;
                }
        } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
                /* If we've received a message from a VF we consider malicious
@@ -4320,22 +4450,23 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
                        return i;
 
 out:
-       return MAX_NUM_VFS_E4;
+       return MAX_NUM_VFS_K2;
 }
 
 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
                                           struct ecore_ptt *ptt, int vfid)
 {
-       struct ecore_dmae_params params;
+       struct dmae_params params;
        struct ecore_vf_info *vf_info;
 
        vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!vf_info)
                return ECORE_INVAL;
 
-       OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
-       params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
-       params.src_vfid = vf_info->abs_vf_id;
+       OSAL_MEMSET(&params, 0, sizeof(params));
+       SET_FIELD(params.flags, DMAE_PARAMS_SRC_VF_VALID, 0x1);
+       SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1);
+       params.src_vf_id = vf_info->abs_vf_id;
 
        if (ecore_dmae_host2host(p_hwfn, ptt,
                                 vf_info->vf_mbx.pending_req,
@@ -4369,13 +4500,23 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
                return;
        }
 
-       feature = 1 << MAC_ADDR_FORCED;
-       OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+       if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+           vf_info->p_vf_info.is_trusted_configured) {
+               feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+               /* Trust mode will disable Forced MAC */
+               vf_info->bulletin.p_virt->valid_bitmap &=
+                       ~(1 << MAC_ADDR_FORCED);
+       } else {
+               feature = 1 << MAC_ADDR_FORCED;
+               /* Forced MAC will disable MAC_ADDR */
+               vf_info->bulletin.p_virt->valid_bitmap &=
+                       ~(1 << VFPF_BULLETIN_MAC_ADDR);
+       }
+
+       OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
+                   mac, ETH_ALEN);
 
        vf_info->bulletin.p_virt->valid_bitmap |= feature;
-       /* Forced MAC will disable MAC_ADDR */
-       vf_info->bulletin.p_virt->valid_bitmap &=
-           ~(1 << VFPF_BULLETIN_MAC_ADDR);
 
        ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
 }
@@ -4410,9 +4551,14 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
 
        vf_info->bulletin.p_virt->valid_bitmap |= feature;
 
+       if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+           vf_info->p_vf_info.is_trusted_configured)
+               ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
        return ECORE_SUCCESS;
 }
 
+#ifndef LINUX_REMOVE
 enum _ecore_status_t
 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
                                               bool b_untagged_only, int vfid)
@@ -4469,6 +4615,7 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
 
        *opaque_fid = vf_info->opaque_fid;
 }
+#endif
 
 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
                                        u16 pvid, int vfid)
@@ -4656,6 +4803,22 @@ u32 ecore_iov_pfvf_msg_length(void)
        return sizeof(union pfvf_tlvs);
 }
 
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+                                     u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf || !p_vf->bulletin.p_virt)
+               return OSAL_NULL;
+
+       if (!(p_vf->bulletin.p_virt->valid_bitmap &
+               (1 << VFPF_BULLETIN_MAC_ADDR)))
+               return OSAL_NULL;
+
+       return p_vf->bulletin.p_virt->mac;
+}
+
 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
 {
        struct ecore_vf_info *p_vf;
@@ -4689,9 +4852,9 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
                                                 struct ecore_ptt *p_ptt,
                                                 int vfid, int val)
 {
-       struct ecore_mcp_link_state *p_link;
        struct ecore_vf_info *vf;
        u8 abs_vp_id = 0;
+       u16 rl_id;
        enum _ecore_status_t rc;
 
        vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
@@ -4703,10 +4866,34 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+       rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
+       return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
+}
+
+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
+                                                    int vfid, u32 rate)
+{
+       struct ecore_vf_info *vf;
+       int i;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 
-       return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
-                                  p_link->speed);
+               if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "SR-IOV sanity check failed, can't set min rate\n");
+                       return ECORE_INVAL;
+               }
+       }
+
+       vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
+       if (!vf) {
+               DP_NOTICE(p_dev, true,
+                         "Getting vf info failed, can't set min rate\n");
+               return ECORE_INVAL;
+       }
+
+       return ecore_configure_vport_wfq(p_dev, vf->vport_id, rate);
 }
 
 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
@@ -4819,7 +5006,7 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
        return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
 }
 
-enum _ecore_status_t
+int
 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
 {
        struct ecore_wfq_data *vf_vp_wfq;