net/qede/base: add mailbox for resource allocation
[dpdk.git] / drivers / net / qede / base / ecore_sriov.c
index bdf9164..4ffa8d0 100644 (file)
@@ -51,6 +51,8 @@ const char *ecore_channel_tlvs_string[] = {
        "CHANNEL_TLV_VPORT_UPDATE_RSS",
        "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
        "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+       "CHANNEL_TLV_UPDATE_TUNN_PARAM",
+       "CHANNEL_TLV_COALESCE_UPDATE",
        "CHANNEL_TLV_MAX"
 };
 
@@ -86,6 +88,7 @@ static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
                p_ramrod->personality = PERSONALITY_ETH;
                break;
        case ECORE_PCI_ETH_ROCE:
+       case ECORE_PCI_ETH_IWARP:
                p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
                break;
        default:
@@ -231,6 +234,30 @@ static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
        return false;
 }
 
+static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_vf_info *p_vf)
+{
+       u8 i;
+
+       for (i = 0; i < p_vf->num_rxqs; i++)
+               if (p_vf->vf_queues[i].p_rx_cid)
+                       return true;
+
+       return false;
+}
+
+static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_vf_info *p_vf)
+{
+       u8 i;
+
+       for (i = 0; i < p_vf->num_rxqs; i++)
+               if (p_vf->vf_queues[i].p_tx_cid)
+                       return true;
+
+       return false;
+}
+
 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
 u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
 {
@@ -554,7 +581,6 @@ void ecore_iov_free(struct ecore_hwfn *p_hwfn)
 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
 {
        OSAL_FREE(p_dev, p_dev->p_iov_info);
-       p_dev->p_iov_info = OSAL_NULL;
 }
 
 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
@@ -597,18 +623,33 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                           "IOV capabilities, but no VFs are published\n");
                OSAL_FREE(p_dev, p_dev->p_iov_info);
-               p_dev->p_iov_info = OSAL_NULL;
                return ECORE_SUCCESS;
        }
 
-       /* Calculate the first VF index - this is a bit tricky; Basically,
-        * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
-        * after the first engine's VFs.
+       /* First VF index based on offset is tricky:
+        *  - If ARI is supported [likely], offset - (16 - pf_id) would
+        *    provide the number for eng0. 2nd engine Vfs would begin
+        *    after the first engine's VFs.
+        *  - If !ARI, VFs would start on next device.
+        *    so offset - (256 - pf_id) would provide the number.
+        * Utilize the fact that (256 - pf_id) is achieved only be later
+        * to diffrentiate between the two.
         */
-       p_dev->p_iov_info->first_vf_in_pf = p_hwfn->p_dev->p_iov_info->offset +
-                                           p_hwfn->abs_pf_id - 16;
-       if (ECORE_PATH_ID(p_hwfn))
-               p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+       if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+               u32 first = p_hwfn->p_dev->p_iov_info->offset +
+                           p_hwfn->abs_pf_id - 16;
+
+               p_dev->p_iov_info->first_vf_in_pf = first;
+
+               if (ECORE_PATH_ID(p_hwfn))
+                       p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+       } else {
+               u32 first = p_hwfn->p_dev->p_iov_info->offset +
+                           p_hwfn->abs_pf_id - 256;
+
+               p_dev->p_iov_info->first_vf_in_pf = first;
+       }
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                   "First VF in hwfn 0x%08x\n",
@@ -617,8 +658,8 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
        return ECORE_SUCCESS;
 }
 
-bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
-                               bool b_fail_malicious)
+static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
+                                      bool b_fail_malicious)
 {
        /* Check PF supports sriov */
        if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
@@ -914,17 +955,59 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
        vf->num_sbs = 0;
 }
 
-enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
-                                             struct ecore_ptt *p_ptt,
-                                             u16 rel_vf_id, u16 num_rx_queues)
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+                       u16 vfid,
+                       struct ecore_mcp_link_params *params,
+                       struct ecore_mcp_link_state *link,
+                       struct ecore_mcp_link_capabilities *p_caps)
+{
+       struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+       struct ecore_bulletin_content *p_bulletin;
+
+       if (!p_vf)
+               return;
+
+       p_bulletin = p_vf->bulletin.p_virt;
+       p_bulletin->req_autoneg = params->speed.autoneg;
+       p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+       p_bulletin->req_forced_speed = params->speed.forced_speed;
+       p_bulletin->req_autoneg_pause = params->pause.autoneg;
+       p_bulletin->req_forced_rx = params->pause.forced_rx;
+       p_bulletin->req_forced_tx = params->pause.forced_tx;
+       p_bulletin->req_loopback = params->loopback_mode;
+
+       p_bulletin->link_up = link->link_up;
+       p_bulletin->speed = link->speed;
+       p_bulletin->full_duplex = link->full_duplex;
+       p_bulletin->autoneg = link->an;
+       p_bulletin->autoneg_complete = link->an_complete;
+       p_bulletin->parallel_detection = link->parallel_detection;
+       p_bulletin->pfc_enabled = link->pfc_enabled;
+       p_bulletin->partner_adv_speed = link->partner_adv_speed;
+       p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+       p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+       p_bulletin->partner_adv_pause = link->partner_adv_pause;
+       p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+       p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+                        struct ecore_ptt *p_ptt,
+                        struct ecore_iov_vf_init_params *p_params)
 {
+       struct ecore_mcp_link_capabilities link_caps;
+       struct ecore_mcp_link_params link_params;
+       struct ecore_mcp_link_state link_state;
        u8 num_of_vf_available_chains  = 0;
        struct ecore_vf_info *vf = OSAL_NULL;
+       u16 qid, num_irqs;
        enum _ecore_status_t rc = ECORE_SUCCESS;
        u32 cids;
        u8 i;
 
-       vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+       vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
        if (!vf) {
                DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
                return ECORE_UNKNOWN_ERROR;
@@ -932,22 +1015,52 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
 
        if (vf->b_init) {
                DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
-                         rel_vf_id);
+                         p_params->rel_vf_id);
                return ECORE_INVAL;
        }
 
+       /* Perform sanity checking on the requested queue_id */
+       for (i = 0; i < p_params->num_queues; i++) {
+               u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
+               u16 max_vf_qzone = min_vf_qzone +
+                                  FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
+
+               qid = p_params->req_rx_queue[i];
+               if (qid < min_vf_qzone || qid > max_vf_qzone) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+                                 qid, p_params->rel_vf_id,
+                                 min_vf_qzone, max_vf_qzone);
+                       return ECORE_INVAL;
+               }
+
+               qid = p_params->req_tx_queue[i];
+               if (qid > max_vf_qzone) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+                                 qid, p_params->rel_vf_id, max_vf_qzone);
+                       return ECORE_INVAL;
+               }
+
+               /* If client *really* wants, Tx qid can be shared with PF */
+               if (qid < min_vf_qzone)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+                                  p_params->rel_vf_id, qid, i);
+       }
+
        /* Limit number of queues according to number of CIDs */
        ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                   "VF[%d] - requesting to initialize for 0x%04x queues"
                   " [0x%04x CIDs available]\n",
-                  vf->relative_vf_id, num_rx_queues, (u16)cids);
-       num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
+                  vf->relative_vf_id, p_params->num_queues, (u16)cids);
+       num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
 
        num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
                                                               p_ptt,
                                                               vf,
-                                                              num_rx_queues);
+                                                              num_irqs);
        if (num_of_vf_available_chains == 0) {
                DP_ERR(p_hwfn, "no available igu sbs\n");
                return ECORE_NOMEM;
@@ -958,28 +1071,32 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
        vf->num_txqs = num_of_vf_available_chains;
 
        for (i = 0; i < vf->num_rxqs; i++) {
-               u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
-                                                            vf->igu_sbs[i]);
+               struct ecore_vf_q_info *p_queue = &vf->vf_queues[i];
 
-               if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
-                       DP_NOTICE(p_hwfn, true,
-                                 "VF[%d] will require utilizing of"
-                                 " out-of-bounds queues - %04x\n",
-                                 vf->relative_vf_id, queue_id);
-                       /* TODO - cleanup the already allocate SBs */
-                       return ECORE_INVAL;
-               }
+               p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+               p_queue->fw_tx_qid = p_params->req_tx_queue[i];
 
                /* CIDs are per-VF, so no problem having them 0-based. */
-               vf->vf_queues[i].fw_rx_qid = queue_id;
-               vf->vf_queues[i].fw_tx_qid = queue_id;
-               vf->vf_queues[i].fw_cid = i;
+               p_queue->fw_cid = i;
 
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                          "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
-                          vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+                          "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]  CID %04x\n",
+                          vf->relative_vf_id, i, vf->igu_sbs[i],
+                          p_queue->fw_rx_qid, p_queue->fw_tx_qid,
+                          p_queue->fw_cid);
        }
 
+       /* Update the link configuration in bulletin.
+        */
+       OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
+                   sizeof(link_params));
+       OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
+                   sizeof(link_state));
+       OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
+                   sizeof(link_caps));
+       ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
+                          &link_params, &link_state, &link_caps);
+
        rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
 
        if (rc == ECORE_SUCCESS) {
@@ -994,43 +1111,6 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
        return rc;
 }
 
-void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
-                       u16 vfid,
-                       struct ecore_mcp_link_params *params,
-                       struct ecore_mcp_link_state *link,
-                       struct ecore_mcp_link_capabilities *p_caps)
-{
-       struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
-       struct ecore_bulletin_content *p_bulletin;
-
-       if (!p_vf)
-               return;
-
-       p_bulletin = p_vf->bulletin.p_virt;
-       p_bulletin->req_autoneg = params->speed.autoneg;
-       p_bulletin->req_adv_speed = params->speed.advertised_speeds;
-       p_bulletin->req_forced_speed = params->speed.forced_speed;
-       p_bulletin->req_autoneg_pause = params->pause.autoneg;
-       p_bulletin->req_forced_rx = params->pause.forced_rx;
-       p_bulletin->req_forced_tx = params->pause.forced_tx;
-       p_bulletin->req_loopback = params->loopback_mode;
-
-       p_bulletin->link_up = link->link_up;
-       p_bulletin->speed = link->speed;
-       p_bulletin->full_duplex = link->full_duplex;
-       p_bulletin->autoneg = link->an;
-       p_bulletin->autoneg_complete = link->an_complete;
-       p_bulletin->parallel_detection = link->parallel_detection;
-       p_bulletin->pfc_enabled = link->pfc_enabled;
-       p_bulletin->partner_adv_speed = link->partner_adv_speed;
-       p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
-       p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
-       p_bulletin->partner_adv_pause = link->partner_adv_pause;
-       p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
-
-       p_bulletin->capability_speed = p_caps->speed_capabilities;
-}
-
 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
                                                 struct ecore_ptt *p_ptt,
                                                 u16 rel_vf_id)
@@ -1350,8 +1430,21 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
 
        p_vf->num_active_rxqs = 0;
 
-       for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++)
-               p_vf->vf_queues[i].rxq_active = 0;
+       for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+               struct ecore_vf_q_info *p_queue = &p_vf->vf_queues[i];
+
+               if (p_queue->p_rx_cid) {
+                       ecore_eth_queue_cid_release(p_hwfn,
+                                                   p_queue->p_rx_cid);
+                       p_queue->p_rx_cid = OSAL_NULL;
+               }
+
+               if (p_queue->p_tx_cid) {
+                       ecore_eth_queue_cid_release(p_hwfn,
+                                                   p_queue->p_tx_cid);
+                       p_queue->p_tx_cid = OSAL_NULL;
+               }
+       }
 
        OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
        OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
@@ -1675,11 +1768,9 @@ ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                           "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
                           filter.vlan, p_vf->relative_vf_id);
-               rc = ecore_sp_eth_filter_ucast(p_hwfn,
-                                              p_vf->opaque_fid,
-                                              &filter,
-                                              ECORE_SPQ_MODE_CB,
-                                                      OSAL_NULL);
+               rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+                                              &filter, ECORE_SPQ_MODE_CB,
+                                              OSAL_NULL);
                if (rc) {
                        DP_NOTICE(p_hwfn, true,
                                  "Failed to configure VLAN [%04x]"
@@ -1790,14 +1881,14 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
 
                /* Update all the Rx queues */
                for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
-                       u16 qid;
+                       struct ecore_queue_cid *p_cid;
 
-                       if (!p_vf->vf_queues[i].rxq_active)
+                       p_cid = p_vf->vf_queues[i].p_rx_cid;
+                       if (p_cid == OSAL_NULL)
                                continue;
 
-                       qid = p_vf->vf_queues[i].fw_rx_qid;
-
-                       rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+                       rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+                                                          (void **)&p_cid,
                                                   1, 0, 1,
                                                   ECORE_SPQ_MODE_EBLOCK,
                                                   OSAL_NULL);
@@ -1805,7 +1896,7 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
                                DP_NOTICE(p_hwfn, true,
                                          "Failed to send Rx update"
                                          " fo queue[0x%04x]\n",
-                                         qid);
+                                         p_cid->rel.queue_id);
                                return rc;
                        }
                }
@@ -1849,6 +1940,8 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
        vf->state = VF_ENABLED;
        start = &mbx->req_virt->start_vport;
 
+       ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
        /* Initialize Status block in CAU */
        for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
                if (!start->sb_addr[sb_id]) {
@@ -1863,7 +1956,6 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
                                      vf->igu_sbs[sb_id],
                                      vf->abs_vf_id, 1);
        }
-       ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
 
        vf->mtu = start->mtu;
        vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1930,6 +2022,15 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
        vf->vport_instance--;
        vf->spoof_chk = false;
 
+       if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
+           (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+               vf->b_malicious = true;
+               DP_NOTICE(p_hwfn, false,
+                         "VF [%02x] - considered malicious;"
+                         " Unable to stop RX/TX queuess\n",
+                         vf->abs_vf_id);
+       }
+
        rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
        if (rc != ECORE_SUCCESS) {
                DP_ERR(p_hwfn,
@@ -1987,9 +2088,10 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
                                       struct ecore_ptt *p_ptt,
                                       struct ecore_vf_info *vf)
 {
-       struct ecore_queue_start_common_params p_params;
+       struct ecore_queue_start_common_params params;
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
        u8 status = PFVF_STATUS_NO_RESOURCE;
+       struct ecore_vf_q_info *p_queue;
        struct vfpf_start_rxq_tlv *req;
        bool b_legacy_vf = false;
        enum _ecore_status_t rc;
@@ -2000,13 +2102,23 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
            !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
                goto out;
 
-       OSAL_MEMSET(&p_params, 0, sizeof(p_params));
-       p_params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
-       p_params.vf_qid = req->rx_qid;
-       p_params.vport_id = vf->vport_id;
-       p_params.stats_id = vf->abs_vf_id + 0x10,
-       p_params.sb = req->hw_sb;
-       p_params.sb_idx = req->sb_index;
+       /* Acquire a new queue-cid */
+       p_queue = &vf->vf_queues[req->rx_qid];
+
+       OSAL_MEMSET(&params, 0, sizeof(params));
+       params.queue_id = (u8)p_queue->fw_rx_qid;
+       params.vport_id = vf->vport_id;
+       params.stats_id = vf->abs_vf_id + 0x10;
+       params.sb = req->hw_sb;
+       params.sb_idx = req->sb_index;
+
+       p_queue->p_rx_cid = _ecore_eth_queue_to_cid(p_hwfn,
+                                                   vf->opaque_fid,
+                                                   p_queue->fw_cid,
+                                                   (u8)req->rx_qid,
+                                                   &params);
+       if (p_queue->p_rx_cid == OSAL_NULL)
+               goto out;
 
        /* Legacy VFs have their Producers in a different location, which they
         * calculate on their own and clean the producer prior to this.
@@ -2019,27 +2131,167 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
                       GTT_BAR0_MAP_REG_MSDM_RAM +
                       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
                       0);
+       p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
 
-       rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
-                                          vf->vf_queues[req->rx_qid].fw_cid,
-                                          &p_params,
-                                          req->bd_max_bytes,
-                                          req->rxq_addr,
-                                          req->cqe_pbl_addr,
-                                          req->cqe_pbl_size,
-                                          b_legacy_vf);
 
-       if (rc) {
+       rc = ecore_eth_rxq_start_ramrod(p_hwfn,
+                                       p_queue->p_rx_cid,
+                                       req->bd_max_bytes,
+                                       req->rxq_addr,
+                                       req->cqe_pbl_addr,
+                                       req->cqe_pbl_size);
+       if (rc != ECORE_SUCCESS) {
                status = PFVF_STATUS_FAILURE;
+               ecore_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
+               p_queue->p_rx_cid = OSAL_NULL;
        } else {
                status = PFVF_STATUS_SUCCESS;
-               vf->vf_queues[req->rx_qid].rxq_active = true;
                vf->num_active_rxqs++;
        }
 
 out:
-       ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
-                                       status, b_legacy_vf);
+       ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+                                       b_legacy_vf);
+}
+
+static void
+ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+                                struct ecore_tunnel_info *p_tun,
+                                u16 tunn_feature_mask)
+{
+       p_resp->tunn_feature_mask = tunn_feature_mask;
+       p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+       p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+       p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+       p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+       p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+       p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+       p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+       p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+       p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+       p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+       p_resp->geneve_udp_port = p_tun->geneve_port.port;
+       p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+                               struct ecore_tunn_update_type *p_tun,
+                               enum ecore_tunn_mode mask, u8 tun_cls)
+{
+       if (p_req->tun_mode_update_mask & (1 << mask)) {
+               p_tun->b_update_mode = true;
+
+               if (p_req->tunn_mode & (1 << mask))
+                       p_tun->b_mode_enabled = true;
+       }
+
+       p_tun->tun_cls = tun_cls;
+}
+
+static void
+ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+                             struct ecore_tunn_update_type *p_tun,
+                             struct ecore_tunn_update_udp_port *p_port,
+                             enum ecore_tunn_mode mask,
+                             u8 tun_cls, u8 update_port, u16 port)
+{
+       if (update_port) {
+               p_port->b_update_port = true;
+               p_port->port = port;
+       }
+
+       __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+       bool b_update_requested = false;
+
+       if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+           p_req->update_geneve_port || p_req->update_vxlan_port)
+               b_update_requested = true;
+
+       return b_update_requested;
+}
+
+static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
+                                              struct ecore_ptt *p_ptt,
+                                              struct ecore_vf_info *p_vf)
+{
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+       struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+       struct pfvf_update_tunn_param_tlv *p_resp;
+       struct vfpf_update_tunn_param_tlv *p_req;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u8 status = PFVF_STATUS_SUCCESS;
+       bool b_update_required = false;
+       struct ecore_tunnel_info tunn;
+       u16 tunn_feature_mask = 0;
+
+       mbx->offset = (u8 *)mbx->reply_virt;
+
+       OSAL_MEM_ZERO(&tunn, sizeof(tunn));
+       p_req = &mbx->req_virt->tunn_param_update;
+
+       if (!ecore_iov_pf_validate_tunn_param(p_req)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "No tunnel update requested by VF\n");
+               status = PFVF_STATUS_FAILURE;
+               goto send_resp;
+       }
+
+       tunn.b_update_rx_cls = p_req->update_tun_cls;
+       tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+       ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+                                     ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+                                     p_req->update_vxlan_port,
+                                     p_req->vxlan_port);
+       ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+                                     ECORE_MODE_L2GENEVE_TUNN,
+                                     p_req->l2geneve_clss,
+                                     p_req->update_geneve_port,
+                                     p_req->geneve_port);
+       __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+                                       ECORE_MODE_IPGENEVE_TUNN,
+                                       p_req->ipgeneve_clss);
+       __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+                                       ECORE_MODE_L2GRE_TUNN,
+                                       p_req->l2gre_clss);
+       __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+                                       ECORE_MODE_IPGRE_TUNN,
+                                       p_req->ipgre_clss);
+
+       /* If PF modifies VF's req then it should
+        * still return an error in case of partial configuration
+        * or modified configuration as opposed to requested one.
+        */
+       rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
+                                                &b_update_required, &tunn);
+
+       if (rc != ECORE_SUCCESS)
+               status = PFVF_STATUS_FAILURE;
+
+       /* If ECORE client is willing to update anything ? */
+       if (b_update_required) {
+               rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+                                                ECORE_SPQ_MODE_EBLOCK,
+                                                OSAL_NULL);
+               if (rc != ECORE_SUCCESS)
+                       status = PFVF_STATUS_FAILURE;
+       }
+
+send_resp:
+       p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+                              CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+       ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+       ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
 }
 
 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
@@ -2087,44 +2339,49 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
                                       struct ecore_ptt *p_ptt,
                                       struct ecore_vf_info *vf)
 {
-       struct ecore_queue_start_common_params p_params;
+       struct ecore_queue_start_common_params params;
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
        u8 status = PFVF_STATUS_NO_RESOURCE;
-       union ecore_qm_pq_params pq_params;
+       struct ecore_vf_q_info *p_queue;
        struct vfpf_start_txq_tlv *req;
        enum _ecore_status_t rc;
+       u16 pq;
 
-       /* Prepare the parameters which would choose the right PQ */
-       OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-       pq_params.eth.is_vf = 1;
-       pq_params.eth.vf_id = vf->relative_vf_id;
-
-       OSAL_MEMSET(&p_params, 0, sizeof(p_params));
+       OSAL_MEMSET(&params, 0, sizeof(params));
        req = &mbx->req_virt->start_txq;
 
        if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
            !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
                goto out;
 
-       p_params.queue_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
-       p_params.vport_id = vf->vport_id;
-       p_params.stats_id = vf->abs_vf_id + 0x10,
-       p_params.sb = req->hw_sb;
-       p_params.sb_idx = req->sb_index;
+       /* Acquire a new queue-cid */
+       p_queue = &vf->vf_queues[req->tx_qid];
 
-       rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
-                                          vf->opaque_fid,
-                                          vf->vf_queues[req->tx_qid].fw_cid,
-                                          &p_params,
-                                          req->pbl_addr,
-                                          req->pbl_size,
-                                          &pq_params);
+       params.queue_id = p_queue->fw_tx_qid;
+       params.vport_id = vf->vport_id;
+       params.stats_id = vf->abs_vf_id + 0x10;
+       params.sb = req->hw_sb;
+       params.sb_idx = req->sb_index;
+
+       p_queue->p_tx_cid = _ecore_eth_queue_to_cid(p_hwfn,
+                                                   vf->opaque_fid,
+                                                   p_queue->fw_cid,
+                                                   (u8)req->tx_qid,
+                                                   &params);
+       if (p_queue->p_tx_cid == OSAL_NULL)
+               goto out;
 
-       if (rc)
+       pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+                                   vf->relative_vf_id);
+       rc = ecore_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
+                                       req->pbl_addr, req->pbl_size, pq);
+       if (rc != ECORE_SUCCESS) {
                status = PFVF_STATUS_FAILURE;
-       else {
+               ecore_eth_queue_cid_release(p_hwfn,
+                                           p_queue->p_tx_cid);
+               p_queue->p_tx_cid = OSAL_NULL;
+       } else {
                status = PFVF_STATUS_SUCCESS;
-               vf->vf_queues[req->tx_qid].txq_active = true;
        }
 
 out:
@@ -2137,6 +2394,7 @@ static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
                                                   u8 num_rxqs,
                                                   bool cqe_completion)
 {
+       struct ecore_vf_q_info *p_queue;
        enum _ecore_status_t rc = ECORE_SUCCESS;
        int qid;
 
@@ -2144,16 +2402,18 @@ static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
                return ECORE_INVAL;
 
        for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
-               if (vf->vf_queues[qid].rxq_active) {
-                       rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
-                                                       vf->vf_queues[qid].
-                                                       fw_rx_qid, false,
-                                                       cqe_completion);
+               p_queue = &vf->vf_queues[qid];
 
-                       if (rc)
-                               return rc;
-               }
-               vf->vf_queues[qid].rxq_active = false;
+               if (!p_queue->p_rx_cid)
+                       continue;
+
+               rc = ecore_eth_rx_queue_stop(p_hwfn,
+                                            p_queue->p_rx_cid,
+                                            false, cqe_completion);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+
+               vf->vf_queues[qid].p_rx_cid = OSAL_NULL;
                vf->num_active_rxqs--;
        }
 
@@ -2165,21 +2425,23 @@ static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
                                                   u16 txq_id, u8 num_txqs)
 {
        enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_vf_q_info *p_queue;
        int qid;
 
        if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
                return ECORE_INVAL;
 
        for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
-               if (vf->vf_queues[qid].txq_active) {
-                       rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
-                                                       vf->vf_queues[qid].
-                                                       fw_tx_qid);
+               p_queue = &vf->vf_queues[qid];
+               if (!p_queue->p_tx_cid)
+                       continue;
 
-                       if (rc)
-                               return rc;
-               }
-               vf->vf_queues[qid].txq_active = false;
+               rc = ecore_eth_tx_queue_stop(p_hwfn,
+                                            p_queue->p_tx_cid);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+
+               p_queue->p_tx_cid = OSAL_NULL;
        }
        return rc;
 }
@@ -2235,10 +2497,11 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
                                         struct ecore_ptt *p_ptt,
                                         struct ecore_vf_info *vf)
 {
+       struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
        u16 length = sizeof(struct pfvf_def_resp_tlv);
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
        struct vfpf_update_rxq_tlv *req;
-       u8 status = PFVF_STATUS_SUCCESS;
+       u8 status = PFVF_STATUS_FAILURE;
        u8 complete_event_flg;
        u8 complete_cqe_flg;
        u16 qid;
@@ -2249,30 +2512,38 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
        complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
        complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 
+       /* Validaute inputs */
+       if (req->num_rxqs + req->rx_qid > ECORE_MAX_VF_CHAINS_PER_PF ||
+           !ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
+               DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+                       vf->relative_vf_id, req->rx_qid, req->num_rxqs);
+               goto out;
+       }
+
        for (i = 0; i < req->num_rxqs; i++) {
                qid = req->rx_qid + i;
 
-               if (!vf->vf_queues[qid].rxq_active) {
-                       DP_NOTICE(p_hwfn, true,
-                                 "VF rx_qid = %d isn`t active!\n", qid);
-                       status = PFVF_STATUS_FAILURE;
-                       break;
+               if (!vf->vf_queues[qid].p_rx_cid) {
+                       DP_INFO(p_hwfn,
+                               "VF[%d] rx_qid = %d isn`t active!\n",
+                               vf->relative_vf_id, qid);
+                       goto out;
                }
 
-               rc = ecore_sp_eth_rx_queues_update(p_hwfn,
-                                                  vf->vf_queues[qid].fw_rx_qid,
-                                                  1,
-                                                  complete_cqe_flg,
-                                                  complete_event_flg,
-                                                  ECORE_SPQ_MODE_EBLOCK,
-                                                  OSAL_NULL);
-
-               if (rc) {
-                       status = PFVF_STATUS_FAILURE;
-                       break;
-               }
+               handlers[i] = vf->vf_queues[qid].p_rx_cid;
        }
 
+       rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+                                          req->num_rxqs,
+                                          complete_cqe_flg,
+                                          complete_event_flg,
+                                          ECORE_SPQ_MODE_EBLOCK,
+                                          OSAL_NULL);
+       if (rc)
+               goto out;
+
+       status = PFVF_STATUS_SUCCESS;
+out:
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
                               length, status);
 }
@@ -2449,12 +2720,14 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
                              struct ecore_vf_info *vf,
                              struct ecore_sp_vport_update_params *p_data,
                              struct ecore_rss_params *p_rss,
-                             struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+                             struct ecore_iov_vf_mbx *p_mbx,
+                             u16 *tlvs_mask, u16 *tlvs_accepted)
 {
        struct vfpf_vport_update_rss_tlv *p_rss_tlv;
        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
-       u16 i, q_idx, max_q_idx;
+       bool b_reject = false;
        u16 table_size;
+       u16 i, q_idx;
 
        p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
            ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
@@ -2482,36 +2755,38 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
        p_rss->rss_eng_id = vf->relative_vf_id + 1;
        p_rss->rss_caps = p_rss_tlv->rss_caps;
        p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
-       OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
-                   sizeof(p_rss->rss_ind_table));
        OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
                    sizeof(p_rss->rss_key));
 
        table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
                                (1 << p_rss_tlv->rss_table_size_log));
 
-       max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
-
        for (i = 0; i < table_size; i++) {
-               u16 index = vf->vf_queues[0].fw_rx_qid;
+               q_idx = p_rss_tlv->rss_ind_table[i];
+               if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+                                  vf->relative_vf_id, q_idx);
+                       b_reject = true;
+                       goto out;
+               }
 
-               q_idx = p_rss->rss_ind_table[i];
-               if (q_idx >= max_q_idx)
-                       DP_NOTICE(p_hwfn, true,
-                                 "rss_ind_table[%d] = %d,"
-                                 " rxq is out of range\n",
-                                 i, q_idx);
-               else if (!vf->vf_queues[q_idx].rxq_active)
-                       DP_NOTICE(p_hwfn, true,
-                                 "rss_ind_table[%d] = %d, rxq is not active\n",
-                                 i, q_idx);
-               else
-                       index = vf->vf_queues[q_idx].fw_rx_qid;
-               p_rss->rss_ind_table[i] = index;
+               if (!vf->vf_queues[q_idx].p_rx_cid) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Omitting RSS due to inactive queue %08x\n",
+                                  vf->relative_vf_id, q_idx);
+                       b_reject = true;
+                       goto out;
+               }
+
+               p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
        }
 
        p_data->rss_params = p_rss;
+out:
        *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+       if (!b_reject)
+               *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
 }
 
 static void
@@ -2567,11 +2842,11 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
                                          struct ecore_ptt *p_ptt,
                                          struct ecore_vf_info *vf)
 {
+       struct ecore_rss_params *p_rss_params = OSAL_NULL;
        struct ecore_sp_vport_update_params params;
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
        struct ecore_sge_tpa_params sge_tpa_params;
        u16 tlvs_mask = 0, tlvs_accepted = 0;
-       struct ecore_rss_params rss_params;
        u8 status = PFVF_STATUS_SUCCESS;
        u16 length;
        enum _ecore_status_t rc;
@@ -2586,6 +2861,12 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
                goto out;
        }
 
+       p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
+       if (p_rss_params == OSAL_NULL) {
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
        OSAL_MEMSET(&params, 0, sizeof(params));
        params.opaque_fid = vf->opaque_fid;
        params.vport_id = vf->vport_id;
@@ -2599,20 +2880,24 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
        ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
        ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
        ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
-       ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
-                                     mbx, &tlvs_mask);
        ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
        ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
                                          &sge_tpa_params, mbx, &tlvs_mask);
 
+       tlvs_accepted = tlvs_mask;
+
+       /* Some of the extended TLVs need to be validated first; In that case,
+        * they can update the mask without updating the accepted [so that
+        * PF could communicate to VF it has rejected request].
+        */
+       ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+                                     mbx, &tlvs_mask, &tlvs_accepted);
+
        /* Just log a message if there is no single extended tlv in buffer.
         * When all features of vport update ramrod would be requested by VF
         * as extended TLVs in buffer then an error can be returned in response
         * if there is no extended TLV present in buffer.
         */
-       tlvs_accepted = tlvs_mask;
-
-#ifndef LINUX_REMOVE
        if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
                                     &params, &tlvs_accepted) !=
            ECORE_SUCCESS) {
@@ -2620,7 +2905,6 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
                status = PFVF_STATUS_NOT_SUPPORTED;
                goto out;
        }
-#endif
 
        if (!tlvs_accepted) {
                if (tlvs_mask)
@@ -2641,6 +2925,7 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
                status = PFVF_STATUS_FAILURE;
 
 out:
+       OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
        length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
                                                    tlvs_mask, tlvs_accepted);
        ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
@@ -2943,6 +3228,65 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
                               length, status);
 }
 
+static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct vfpf_update_coalesce *req;
+       u8 status = PFVF_STATUS_FAILURE;
+       struct ecore_queue_cid *p_cid;
+       u16 rx_coal, tx_coal;
+       u16  qid;
+
+       req = &mbx->req_virt->update_coalesce;
+
+       rx_coal = req->rx_coal;
+       tx_coal = req->tx_coal;
+       qid = req->qid;
+       p_cid = vf->vf_queues[qid].p_rx_cid;
+
+       if (!ecore_iov_validate_rxq(p_hwfn, vf, qid)) {
+               DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+                      vf->abs_vf_id, qid);
+               goto out;
+       }
+
+       if (!ecore_iov_validate_txq(p_hwfn, vf, qid)) {
+               DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+                      vf->abs_vf_id, qid);
+               goto out;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+                  vf->abs_vf_id, rx_coal, tx_coal, qid);
+       if (rx_coal) {
+               rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+               if (rc != ECORE_SUCCESS) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Unable to set rx queue = %d coalesce\n",
+                                  vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+                       goto out;
+               }
+       }
+       if (tx_coal) {
+               rc =  ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
+               if (rc != ECORE_SUCCESS) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Unable to set tx queue = %d coalesce\n",
+                                  vf->abs_vf_id, vf->vf_queues[qid].fw_tx_qid);
+                       goto out;
+               }
+       }
+
+       status = PFVF_STATUS_SUCCESS;
+out:
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
+                              sizeof(struct pfvf_def_resp_tlv), status);
+}
+
 static enum _ecore_status_t
 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
                           struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
@@ -3076,6 +3420,13 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
                        return rc;
                }
 
+               /* Workaround to make VF-PF channel ready, as FW
+                * doesn't do that as a part of FLR.
+                */
+               REG_WR(p_hwfn,
+                      GTT_BAR0_MAP_REG_USDM_RAM +
+                      USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+
                /* VF_STOPPED has to be set only after final cleanup
                 * but prior to re-enabling the VF.
                 */
@@ -3144,7 +3495,7 @@ ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
 
 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
 {
-       bool found;
+       bool found = false;
        u16 i;
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
@@ -3286,6 +3637,12 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
                case CHANNEL_TLV_RELEASE:
                        ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
                        break;
+               case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+                       ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_COALESCE_UPDATE:
+                       ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
+                       break;
                }
        } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
                /* If we've received a message from a VF we consider malicious
@@ -3467,7 +3824,7 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
                        return i;
 
 out:
-       return MAX_NUM_VFS;
+       return E4_MAX_NUM_VFS;
 }
 
 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -3963,6 +4320,18 @@ bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
        return (p_vf->state == VF_ENABLED);
 }
 
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+                            u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return false;
+
+       return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
+}
+
 enum _ecore_status_t
 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
 {