net/qede/base: prevent stop vport assert by malicious VF
[dpdk.git] / drivers / net / qede / base / ecore_sriov.c
index 532c492..792cf75 100644 (file)
@@ -53,9 +53,26 @@ const char *ecore_channel_tlvs_string[] = {
        "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
        "CHANNEL_TLV_UPDATE_TUNN_PARAM",
        "CHANNEL_TLV_COALESCE_UPDATE",
+       "CHANNEL_TLV_QID",
        "CHANNEL_TLV_MAX"
 };
 
+static u8 ecore_vf_calculate_legacy(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_vf_info *p_vf)
+{
+       u8 legacy = 0;
+
+       if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+           ETH_HSI_VER_NO_PKT_LEN_TUNN)
+               legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
+
+       if (!(p_vf->acquire.vfdev_info.capabilities &
+            VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+               legacy |= ECORE_QCID_LEGACY_VF_CID;
+
+       return legacy;
+}
+
 /* IOV ramrods */
 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
                                              struct ecore_vf_info *p_vf)
@@ -192,28 +209,90 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
        return vf;
 }
 
+static struct ecore_queue_cid *
+ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
+                             struct ecore_vf_info *p_vf,
+                             struct ecore_vf_queue *p_queue)
+{
+       int i;
+
+       for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+               if (p_queue->cids[i].p_cid &&
+                   !p_queue->cids[i].b_is_tx)
+                       return p_queue->cids[i].p_cid;
+       }
+
+       return OSAL_NULL;
+}
+
+enum ecore_iov_validate_q_mode {
+       ECORE_IOV_VALIDATE_Q_NA,
+       ECORE_IOV_VALIDATE_Q_ENABLE,
+       ECORE_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_vf_info *p_vf,
+                                         u16 qid,
+                                         enum ecore_iov_validate_q_mode mode,
+                                         bool b_is_tx)
+{
+       int i;
+
+       if (mode == ECORE_IOV_VALIDATE_Q_NA)
+               return true;
+
+       for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+               struct ecore_vf_queue_cid *p_qcid;
+
+               p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+               if (p_qcid->p_cid == OSAL_NULL)
+                       continue;
+
+               if (p_qcid->b_is_tx != b_is_tx)
+                       continue;
+
+               /* Found. It's enabled. */
+               return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
+       }
+
+       /* In case we haven't found any valid cid, then its disabled */
+       return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
+}
+
 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
                                   struct ecore_vf_info *p_vf,
-                                  u16 rx_qid)
+                                  u16 rx_qid,
+                                  enum ecore_iov_validate_q_mode mode)
 {
-       if (rx_qid >= p_vf->num_rxqs)
+       if (rx_qid >= p_vf->num_rxqs) {
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                           "VF[0x%02x] - can't touch Rx queue[%04x];"
                           " Only 0x%04x are allocated\n",
                           p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
-       return rx_qid < p_vf->num_rxqs;
+               return false;
+       }
+
+       return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
+                                            mode, false);
 }
 
 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
                                   struct ecore_vf_info *p_vf,
-                                  u16 tx_qid)
+                                  u16 tx_qid,
+                                  enum ecore_iov_validate_q_mode mode)
 {
-       if (tx_qid >= p_vf->num_txqs)
+       if (tx_qid >= p_vf->num_txqs) {
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                           "VF[0x%02x] - can't touch Tx queue[%04x];"
                           " Only 0x%04x are allocated\n",
                           p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
-       return tx_qid < p_vf->num_txqs;
+               return false;
+       }
+
+       return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
+                                            mode, true);
 }
 
 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
@@ -234,13 +313,16 @@ static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
        return false;
 }
 
+/* Is there at least 1 queue open? */
 static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
                                          struct ecore_vf_info *p_vf)
 {
        u8 i;
 
        for (i = 0; i < p_vf->num_rxqs; i++)
-               if (p_vf->vf_queues[i].p_rx_cid)
+               if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+                                                 ECORE_IOV_VALIDATE_Q_ENABLE,
+                                                 false))
                        return true;
 
        return false;
@@ -251,26 +333,15 @@ static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
 {
        u8 i;
 
-       for (i = 0; i < p_vf->num_rxqs; i++)
-               if (p_vf->vf_queues[i].p_tx_cid)
+       for (i = 0; i < p_vf->num_txqs; i++)
+               if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+                                                 ECORE_IOV_VALIDATE_Q_ENABLE,
+                                                 true))
                        return true;
 
        return false;
 }
 
-/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
-u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
-{
-       int i;
-
-       while (length--) {
-               crc ^= *ptr++;
-               for (i = 0; i < 8; i++)
-                       crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
-       }
-       return crc;
-}
-
 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
                                                int vfid,
                                                struct ecore_ptt *p_ptt)
@@ -292,8 +363,8 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
 
        /* Increment bulletin board version and compute crc */
        p_bulletin->version++;
-       p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
-                                     p_vf->bulletin.size - crc_size);
+       p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
+                                    p_vf->bulletin.size - crc_size);
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                   "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
@@ -375,33 +446,6 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
        return ECORE_SUCCESS;
 }
 
-static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
-                                         struct ecore_ptt *p_ptt)
-{
-       struct ecore_igu_block *p_sb;
-       u16 sb_id;
-       u32 val;
-
-       if (!p_hwfn->hw_info.p_igu_info) {
-               DP_ERR(p_hwfn,
-                      "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
-               return;
-       }
-
-       for (sb_id = 0;
-            sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
-               p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
-               if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
-                   !(p_sb->status & ECORE_IGU_STATUS_PF)) {
-                       val = ecore_rd(p_hwfn, p_ptt,
-                                      IGU_REG_MAPPING_MEMORY + sb_id * 4);
-                       SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
-                       ecore_wr(p_hwfn, p_ptt,
-                                IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
-               }
-       }
-}
-
 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
@@ -557,13 +601,12 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
        return ecore_iov_allocate_vfdb(p_hwfn);
 }
 
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
 {
        if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
                return;
 
        ecore_iov_setup_vfdb(p_hwfn);
-       ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
 }
 
 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
@@ -775,12 +818,48 @@ static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
        ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
 }
 
+static enum _ecore_status_t
+ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt,
+                               u8 abs_vf_id,
+                               u8 num_sbs)
+{
+       u8 current_max = 0;
+       int i;
+
+       /* If client overrides this, don't do anything */
+       if (p_hwfn->p_dev->b_dont_override_vf_msix)
+               return ECORE_SUCCESS;
+
+       /* For AH onward, configuration is per-PF. Find maximum of all
+        * the currently enabled child VFs, and set the number to be that.
+        */
+       if (!ECORE_IS_BB(p_hwfn->p_dev)) {
+               ecore_for_each_vf(p_hwfn, i) {
+                       struct ecore_vf_info *p_vf;
+
+                       p_vf  = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
+                       if (!p_vf)
+                               continue;
+
+                       current_max = OSAL_MAX_T(u8, current_max,
+                                                p_vf->num_sbs);
+               }
+       }
+
+       if (num_sbs > current_max)
+               return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+                                               abs_vf_id, num_sbs);
+
+       return ECORE_SUCCESS;
+}
+
 static enum _ecore_status_t
 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
                           struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
 {
        u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
-       enum _ecore_status_t rc;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
 
        if (vf->to_disable)
                return ECORE_SUCCESS;
@@ -796,9 +875,8 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
 
        /* It's possible VF was previously considered malicious */
        vf->b_malicious = false;
-
-       rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
-                                     vf->abs_vf_id, vf->num_sbs);
+       rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+                                            vf->abs_vf_id, vf->num_sbs);
        if (rc != ECORE_SUCCESS)
                return rc;
 
@@ -867,46 +945,38 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
                                     struct ecore_vf_info *vf,
                                     u16 num_rx_queues)
 {
-       struct ecore_igu_block *igu_blocks;
-       int qid = 0, igu_id = 0;
+       struct ecore_igu_block *p_block;
+       struct cau_sb_entry sb_entry;
+       int qid = 0;
        u32 val = 0;
 
-       igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
-
-       if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
-               num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
-
-       p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+       if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+               num_rx_queues =
+               (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+       p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
 
        SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
        SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
        SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
 
-       while ((qid < num_rx_queues) &&
-              (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
-               if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
-                       struct cau_sb_entry sb_entry;
-
-                       vf->igu_sbs[qid] = (u16)igu_id;
-                       igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
-
-                       SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
-
-                       ecore_wr(p_hwfn, p_ptt,
-                                IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
-                                val);
-
-                       /* Configure igu sb in CAU which were marked valid */
-                       ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
-                                               p_hwfn->rel_pf_id,
-                                               vf->abs_vf_id, 1);
-                       ecore_dmae_host2grc(p_hwfn, p_ptt,
-                                           (u64)(osal_uintptr_t)&sb_entry,
-                                           CAU_REG_SB_VAR_MEMORY +
-                                           igu_id * sizeof(u64), 2, 0);
-                       qid++;
-               }
-               igu_id++;
+       for (qid = 0; qid < num_rx_queues; qid++) {
+               p_block = ecore_get_igu_free_sb(p_hwfn, false);
+               vf->igu_sbs[qid] = p_block->igu_sb_id;
+               p_block->status &= ~ECORE_IGU_STATUS_FREE;
+               SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+               ecore_wr(p_hwfn, p_ptt,
+                        IGU_REG_MAPPING_MEMORY +
+                        sizeof(u32) * p_block->igu_sb_id, val);
+
+               /* Configure igu sb in CAU which were marked valid */
+               ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+                                       p_hwfn->rel_pf_id,
+                                       vf->abs_vf_id, 1);
+               ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                   (u64)(osal_uintptr_t)&sb_entry,
+                                   CAU_REG_SB_VAR_MEMORY +
+                                   p_block->igu_sb_id * sizeof(u64), 2, 0);
        }
 
        vf->num_sbs = (u8)num_rx_queues;
@@ -942,10 +1012,8 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
                SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
                ecore_wr(p_hwfn, p_ptt, addr, val);
 
-               p_info->igu_map.igu_blocks[igu_id].status |=
-                   ECORE_IGU_STATUS_FREE;
-
-               p_hwfn->hw_info.p_igu_info->free_blks++;
+               p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
+               p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
        }
 
        vf->num_sbs = 0;
@@ -1043,34 +1111,28 @@ ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
        vf->vport_id = p_params->vport_id;
        vf->rss_eng_id = p_params->rss_eng_id;
 
-       /* Perform sanity checking on the requested queue_id */
+       /* Since it's possible to relocate SBs, it's a bit difficult to check
+        * things here. Simply check whether the index falls in the range
+        * belonging to the PF.
+        */
        for (i = 0; i < p_params->num_queues; i++) {
-               u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
-               u16 max_vf_qzone = min_vf_qzone +
-                                  FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
-
                qid = p_params->req_rx_queue[i];
-               if (qid < min_vf_qzone || qid > max_vf_qzone) {
+               if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
                        DP_NOTICE(p_hwfn, true,
-                                 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+                                 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
                                  qid, p_params->rel_vf_id,
-                                 min_vf_qzone, max_vf_qzone);
+                                 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
                        return ECORE_INVAL;
                }
 
                qid = p_params->req_tx_queue[i];
-               if (qid > max_vf_qzone) {
+               if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
                        DP_NOTICE(p_hwfn, true,
-                                 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
-                                 qid, p_params->rel_vf_id, max_vf_qzone);
+                                 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+                                 qid, p_params->rel_vf_id,
+                                 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
                        return ECORE_INVAL;
                }
-
-               /* If client *really* wants, Tx qid can be shared with PF */
-               if (qid < min_vf_qzone)
-                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                                  "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
-                                  p_params->rel_vf_id, qid, i);
        }
 
        /* Limit number of queues according to number of CIDs */
@@ -1095,19 +1157,15 @@ ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
        vf->num_txqs = num_of_vf_available_chains;
 
        for (i = 0; i < vf->num_rxqs; i++) {
-               struct ecore_vf_q_info *p_queue = &vf->vf_queues[i];
+               struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
 
                p_queue->fw_rx_qid = p_params->req_rx_queue[i];
                p_queue->fw_tx_qid = p_params->req_tx_queue[i];
 
-               /* CIDs are per-VF, so no problem having them 0-based. */
-               p_queue->fw_cid = i;
-
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                          "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]  CID %04x\n",
+                          "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
                           vf->relative_vf_id, i, vf->igu_sbs[i],
-                          p_queue->fw_rx_qid, p_queue->fw_tx_qid,
-                          p_queue->fw_cid);
+                          p_queue->fw_rx_qid, p_queue->fw_tx_qid);
        }
 
        /* Update the link configuration in bulletin.
@@ -1443,7 +1501,7 @@ struct ecore_public_vf_info
 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
                                 struct ecore_vf_info *p_vf)
 {
-       u32 i;
+       u32 i, j;
        p_vf->vf_bulletin = 0;
        p_vf->vport_instance = 0;
        p_vf->configured_features = 0;
@@ -1455,18 +1513,15 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
        p_vf->num_active_rxqs = 0;
 
        for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
-               struct ecore_vf_q_info *p_queue = &p_vf->vf_queues[i];
+               struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
 
-               if (p_queue->p_rx_cid) {
-                       ecore_eth_queue_cid_release(p_hwfn,
-                                                   p_queue->p_rx_cid);
-                       p_queue->p_rx_cid = OSAL_NULL;
-               }
+               for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+                       if (!p_queue->cids[j].p_cid)
+                               continue;
 
-               if (p_queue->p_tx_cid) {
                        ecore_eth_queue_cid_release(p_hwfn,
-                                                   p_queue->p_tx_cid);
-                       p_queue->p_tx_cid = OSAL_NULL;
+                                                   p_queue->cids[j].p_cid);
+                       p_queue->cids[j].p_cid = OSAL_NULL;
                }
        }
 
@@ -1481,7 +1536,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
                                        struct vf_pf_resc_request *p_req,
                                        struct pf_vf_resc *p_resp)
 {
-       int i;
+       u8 i;
 
        /* Queue related information */
        p_resp->num_rxqs = p_vf->num_rxqs;
@@ -1502,7 +1557,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
        for (i = 0; i < p_resp->num_rxqs; i++) {
                ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
                                  (u16 *)&p_resp->hw_qid[i]);
-               p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+               p_resp->cid[i] = i;
        }
 
        /* Filter related information */
@@ -1511,6 +1566,10 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
        p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
                                              p_req->num_vlan_filters);
 
+       p_resp->num_cids =
+               OSAL_MIN_T(u8, p_req->num_cids,
+                          p_hwfn->pf_params.eth_pf_params.num_vf_cons);
+
        /* This isn't really needed/enforced, but some legacy VFs might depend
         * on the correct filling of this field.
         */
@@ -1522,18 +1581,18 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
            p_resp->num_sbs < p_req->num_sbs ||
            p_resp->num_mac_filters < p_req->num_mac_filters ||
            p_resp->num_vlan_filters < p_req->num_vlan_filters ||
-           p_resp->num_mc_filters < p_req->num_mc_filters) {
+           p_resp->num_mc_filters < p_req->num_mc_filters ||
+           p_resp->num_cids < p_req->num_cids) {
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                          "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
-                          " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
-                          " vlan [%02x/%02x] mc [%02x/%02x]\n",
+                          "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
                           p_vf->abs_vf_id,
                           p_req->num_rxqs, p_resp->num_rxqs,
                           p_req->num_rxqs, p_resp->num_txqs,
                           p_req->num_sbs, p_resp->num_sbs,
                           p_req->num_mac_filters, p_resp->num_mac_filters,
                           p_req->num_vlan_filters, p_resp->num_vlan_filters,
-                          p_req->num_mc_filters, p_resp->num_mc_filters);
+                          p_req->num_mc_filters, p_resp->num_mc_filters,
+                          p_req->num_cids, p_resp->num_cids);
 
                /* Some legacy OSes are incapable of correctly handling this
                 * failure.
@@ -1668,6 +1727,12 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
        if (p_hwfn->p_dev->num_hwfns > 1)
                pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
 
+       /* Share our ability to use multiple queue-ids only with VFs
+        * that request it.
+        */
+       if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+               pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
        ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
 
        OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
@@ -1905,9 +1970,12 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
 
                /* Update all the Rx queues */
                for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
-                       struct ecore_queue_cid *p_cid;
+                       struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+                       struct ecore_queue_cid *p_cid = OSAL_NULL;
 
-                       p_cid = p_vf->vf_queues[i].p_rx_cid;
+                       /* There can be at most 1 Rx queue on qzone. Find it */
+                       p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
+                                                             p_queue);
                        if (p_cid == OSAL_NULL)
                                continue;
 
@@ -2053,6 +2121,8 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
                          "VF [%02x] - considered malicious;"
                          " Unable to stop RX/TX queuess\n",
                          vf->abs_vf_id);
+               status = PFVF_STATUS_MALICIOUS;
+               goto out;
        }
 
        rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
@@ -2066,6 +2136,7 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
        vf->configured_features = 0;
        OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
 
+out:
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
                               sizeof(struct pfvf_def_resp_tlv), status);
 }
@@ -2108,74 +2179,125 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
        ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
 }
 
+static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
+                              struct ecore_vf_info *p_vf, bool b_is_tx)
+{
+       struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+       struct vfpf_qid_tlv *p_qid_tlv;
+
+       /* Search for the qid if the VF published if its going to provide it */
+       if (!(p_vf->acquire.vfdev_info.capabilities &
+             VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+               if (b_is_tx)
+                       return ECORE_IOV_LEGACY_QID_TX;
+               else
+                       return ECORE_IOV_LEGACY_QID_RX;
+       }
+
+       p_qid_tlv = (struct vfpf_qid_tlv *)
+                   ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+                                              CHANNEL_TLV_QID);
+       if (p_qid_tlv == OSAL_NULL) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%2x]: Failed to provide qid\n",
+                          p_vf->relative_vf_id);
+
+               return ECORE_IOV_QID_INVALID;
+       }
+
+       if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%02x]: Provided qid out-of-bounds %02x\n",
+                          p_vf->relative_vf_id, p_qid_tlv->qid);
+               return ECORE_IOV_QID_INVALID;
+       }
+
+       return p_qid_tlv->qid;
+}
+
 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
                                       struct ecore_ptt *p_ptt,
                                       struct ecore_vf_info *vf)
 {
        struct ecore_queue_start_common_params params;
+       struct ecore_queue_cid_vf_params vf_params;
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
        u8 status = PFVF_STATUS_NO_RESOURCE;
-       struct ecore_vf_q_info *p_queue;
+       u8 qid_usage_idx, vf_legacy = 0;
+       struct ecore_vf_queue *p_queue;
        struct vfpf_start_rxq_tlv *req;
-       bool b_legacy_vf = false;
+       struct ecore_queue_cid *p_cid;
+       struct ecore_sb_info sb_dummy;
        enum _ecore_status_t rc;
 
        req = &mbx->req_virt->start_rxq;
 
-       if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+       if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+                                   ECORE_IOV_VALIDATE_Q_DISABLE) ||
            !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
                goto out;
 
-       /* Acquire a new queue-cid */
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+               goto out;
+
        p_queue = &vf->vf_queues[req->rx_qid];
+       if (p_queue->cids[qid_usage_idx].p_cid)
+               goto out;
+
+       vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
 
+       /* Acquire a new queue-cid */
        OSAL_MEMSET(&params, 0, sizeof(params));
        params.queue_id = (u8)p_queue->fw_rx_qid;
        params.vport_id = vf->vport_id;
        params.stats_id = vf->abs_vf_id + 0x10;
-       params.sb = req->hw_sb;
+
+       /* Since IGU index is passed via sb_info, construct a dummy one */
+       OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+       sb_dummy.igu_sb_id = req->hw_sb;
+       params.p_sb = &sb_dummy;
        params.sb_idx = req->sb_index;
 
-       p_queue->p_rx_cid = _ecore_eth_queue_to_cid(p_hwfn,
-                                                   vf->opaque_fid,
-                                                   p_queue->fw_cid,
-                                                   (u8)req->rx_qid,
-                                                   &params);
-       if (p_queue->p_rx_cid == OSAL_NULL)
+       OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+       vf_params.vfid = vf->relative_vf_id;
+       vf_params.vf_qid = (u8)req->rx_qid;
+       vf_params.vf_legacy = vf_legacy;
+       vf_params.qid_usage_idx = qid_usage_idx;
+
+       p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+                                      &params, &vf_params);
+       if (p_cid == OSAL_NULL)
                goto out;
 
        /* Legacy VFs have their Producers in a different location, which they
         * calculate on their own and clean the producer prior to this.
         */
-       if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
-           ETH_HSI_VER_NO_PKT_LEN_TUNN)
-               b_legacy_vf = true;
-       else
+       if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
                REG_WR(p_hwfn,
                       GTT_BAR0_MAP_REG_MSDM_RAM +
                       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
                       0);
-       p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
-
 
-       rc = ecore_eth_rxq_start_ramrod(p_hwfn,
-                                       p_queue->p_rx_cid,
+       rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
                                        req->bd_max_bytes,
                                        req->rxq_addr,
                                        req->cqe_pbl_addr,
                                        req->cqe_pbl_size);
        if (rc != ECORE_SUCCESS) {
                status = PFVF_STATUS_FAILURE;
-               ecore_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
-               p_queue->p_rx_cid = OSAL_NULL;
+               ecore_eth_queue_cid_release(p_hwfn, p_cid);
        } else {
+               p_queue->cids[qid_usage_idx].p_cid = p_cid;
+               p_queue->cids[qid_usage_idx].b_is_tx = false;
                status = PFVF_STATUS_SUCCESS;
                vf->num_active_rxqs++;
        }
 
 out:
        ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
-                                       b_legacy_vf);
+                                       !!(vf_legacy &
+                                          ECORE_QCID_LEGACY_VF_RX_PROD));
 }
 
 static void
@@ -2303,7 +2425,7 @@ static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
        if (b_update_required) {
                u16 geneve_port;
 
-               rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+               rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
                                                 ECORE_SPQ_MODE_EBLOCK,
                                                 OSAL_NULL);
                if (rc != ECORE_SUCCESS)
@@ -2331,6 +2453,7 @@ send_resp:
 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
                                            struct ecore_ptt *p_ptt,
                                            struct ecore_vf_info *p_vf,
+                                           u32 cid,
                                            u8 status)
 {
        struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
@@ -2359,12 +2482,8 @@ static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
                      sizeof(struct channel_list_end_tlv));
 
        /* Update the TLV with the response */
-       if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
-               u16 qid = mbx->req_virt->start_txq.tx_qid;
-
-               p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
-                                          DQ_DEMS_LEGACY);
-       }
+       if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+               p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
 
        ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
 }
@@ -2374,110 +2493,148 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
                                       struct ecore_vf_info *vf)
 {
        struct ecore_queue_start_common_params params;
+       struct ecore_queue_cid_vf_params vf_params;
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
        u8 status = PFVF_STATUS_NO_RESOURCE;
-       struct ecore_vf_q_info *p_queue;
+       struct ecore_vf_queue *p_queue;
        struct vfpf_start_txq_tlv *req;
+       struct ecore_queue_cid *p_cid;
+       struct ecore_sb_info sb_dummy;
+       u8 qid_usage_idx, vf_legacy;
+       u32 cid = 0;
        enum _ecore_status_t rc;
        u16 pq;
 
        OSAL_MEMSET(&params, 0, sizeof(params));
        req = &mbx->req_virt->start_txq;
 
-       if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+       if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+                                   ECORE_IOV_VALIDATE_Q_NA) ||
            !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
                goto out;
 
-       /* Acquire a new queue-cid */
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+               goto out;
+
        p_queue = &vf->vf_queues[req->tx_qid];
+       if (p_queue->cids[qid_usage_idx].p_cid)
+               goto out;
+
+       vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
 
+       /* Acquire a new queue-cid */
        params.queue_id = p_queue->fw_tx_qid;
        params.vport_id = vf->vport_id;
        params.stats_id = vf->abs_vf_id + 0x10;
-       params.sb = req->hw_sb;
+
+       /* Since IGU index is passed via sb_info, construct a dummy one */
+       OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+       sb_dummy.igu_sb_id = req->hw_sb;
+       params.p_sb = &sb_dummy;
        params.sb_idx = req->sb_index;
 
-       p_queue->p_tx_cid = _ecore_eth_queue_to_cid(p_hwfn,
-                                                   vf->opaque_fid,
-                                                   p_queue->fw_cid,
-                                                   (u8)req->tx_qid,
-                                                   &params);
-       if (p_queue->p_tx_cid == OSAL_NULL)
+       OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+       vf_params.vfid = vf->relative_vf_id;
+       vf_params.vf_qid = (u8)req->tx_qid;
+       vf_params.vf_legacy = vf_legacy;
+       vf_params.qid_usage_idx = qid_usage_idx;
+
+       p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+                                      &params, &vf_params);
+       if (p_cid == OSAL_NULL)
                goto out;
 
        pq = ecore_get_cm_pq_idx_vf(p_hwfn,
                                    vf->relative_vf_id);
-       rc = ecore_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
+       rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
                                        req->pbl_addr, req->pbl_size, pq);
        if (rc != ECORE_SUCCESS) {
                status = PFVF_STATUS_FAILURE;
-               ecore_eth_queue_cid_release(p_hwfn,
-                                           p_queue->p_tx_cid);
-               p_queue->p_tx_cid = OSAL_NULL;
+               ecore_eth_queue_cid_release(p_hwfn, p_cid);
        } else {
                status = PFVF_STATUS_SUCCESS;
+               p_queue->cids[qid_usage_idx].p_cid = p_cid;
+               p_queue->cids[qid_usage_idx].b_is_tx = true;
+               cid = p_cid->cid;
        }
 
 out:
-       ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
+       ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
+                                       cid, status);
 }
 
 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
                                                   struct ecore_vf_info *vf,
                                                   u16 rxq_id,
-                                                  u8 num_rxqs,
+                                                  u8 qid_usage_idx,
                                                   bool cqe_completion)
 {
-       struct ecore_vf_q_info *p_queue;
+       struct ecore_vf_queue *p_queue;
        enum _ecore_status_t rc = ECORE_SUCCESS;
-       int qid;
 
-       if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+       if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
+                                   ECORE_IOV_VALIDATE_Q_NA)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+                          vf->relative_vf_id, rxq_id, qid_usage_idx);
                return ECORE_INVAL;
+       }
 
-       for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
-               p_queue = &vf->vf_queues[qid];
-
-               if (!p_queue->p_rx_cid)
-                       continue;
+       p_queue = &vf->vf_queues[rxq_id];
 
-               rc = ecore_eth_rx_queue_stop(p_hwfn,
-                                            p_queue->p_rx_cid,
-                                            false, cqe_completion);
-               if (rc != ECORE_SUCCESS)
-                       return rc;
+       /* We've validated the index and the existence of the active RXQ -
+        * now we need to make sure that it's using the correct qid.
+        */
+       if (!p_queue->cids[qid_usage_idx].p_cid ||
+           p_queue->cids[qid_usage_idx].b_is_tx) {
+               struct ecore_queue_cid *p_cid;
 
-               vf->vf_queues[qid].p_rx_cid = OSAL_NULL;
-               vf->num_active_rxqs--;
+               p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf, p_queue);
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+                           vf->relative_vf_id, rxq_id, qid_usage_idx,
+                           rxq_id, p_cid->qid_usage_idx);
+               return ECORE_INVAL;
        }
 
-       return rc;
+       /* Now that we know we have a valid Rx-queue - close it */
+       rc = ecore_eth_rx_queue_stop(p_hwfn,
+                                    p_queue->cids[qid_usage_idx].p_cid,
+                                    false, cqe_completion);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+       vf->num_active_rxqs--;
+
+       return ECORE_SUCCESS;
 }
 
 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
                                                   struct ecore_vf_info *vf,
-                                                  u16 txq_id, u8 num_txqs)
+                                                  u16 txq_id,
+                                                  u8 qid_usage_idx)
 {
+       struct ecore_vf_queue *p_queue;
        enum _ecore_status_t rc = ECORE_SUCCESS;
-       struct ecore_vf_q_info *p_queue;
-       int qid;
 
-       if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+       if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
+                                   ECORE_IOV_VALIDATE_Q_NA))
                return ECORE_INVAL;
 
-       for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
-               p_queue = &vf->vf_queues[qid];
-               if (!p_queue->p_tx_cid)
-                       continue;
+       p_queue = &vf->vf_queues[txq_id];
+       if (!p_queue->cids[qid_usage_idx].p_cid ||
+           !p_queue->cids[qid_usage_idx].b_is_tx)
+               return ECORE_INVAL;
 
-               rc = ecore_eth_tx_queue_stop(p_hwfn,
-                                            p_queue->p_tx_cid);
-               if (rc != ECORE_SUCCESS)
-                       return rc;
+       rc = ecore_eth_tx_queue_stop(p_hwfn,
+                                    p_queue->cids[qid_usage_idx].p_cid);
+       if (rc != ECORE_SUCCESS)
+               return rc;
 
-               p_queue->p_tx_cid = OSAL_NULL;
-       }
-       return rc;
+       p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+       return ECORE_SUCCESS;
 }
 
 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
@@ -2486,20 +2643,34 @@ static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
 {
        u16 length = sizeof(struct pfvf_def_resp_tlv);
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       u8 status = PFVF_STATUS_SUCCESS;
+       u8 status = PFVF_STATUS_FAILURE;
        struct vfpf_stop_rxqs_tlv *req;
+       u8 qid_usage_idx;
        enum _ecore_status_t rc;
 
-       /* We give the option of starting from qid != 0, in this case we
-        * need to make sure that qid + num_qs doesn't exceed the actual
-        * amount of queues that exist.
+       /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
+        * would be one. Since no older ecore passed multiple queues
+        * using this API, sanitize on the value.
         */
        req = &mbx->req_virt->stop_rxqs;
-       rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
-                                   req->num_rxqs, req->cqe_completion);
-       if (rc)
-               status = PFVF_STATUS_FAILURE;
+       if (req->num_rxqs != 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Odd; VF[%d] tried stopping multiple Rx queues\n",
+                          vf->relative_vf_id);
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
 
+       /* Find which qid-index is associated with the queue */
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+               goto out;
+
+       rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+                                   qid_usage_idx, req->cqe_completion);
+       if (rc == ECORE_SUCCESS)
+               status = PFVF_STATUS_SUCCESS;
+out:
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
                               length, status);
 }
@@ -2510,19 +2681,35 @@ static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
 {
        u16 length = sizeof(struct pfvf_def_resp_tlv);
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       u8 status = PFVF_STATUS_SUCCESS;
+       u8 status = PFVF_STATUS_FAILURE;
        struct vfpf_stop_txqs_tlv *req;
+       u8 qid_usage_idx;
        enum _ecore_status_t rc;
 
-       /* We give the option of starting from qid != 0, in this case we
-        * need to make sure that qid + num_qs doesn't exceed the actual
-        * amount of queues that exist.
+       /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
+        * would be one. Since no older ecore passed multiple queues
+        * using this API, sanitize on the value.
         */
        req = &mbx->req_virt->stop_txqs;
-       rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
-       if (rc)
-               status = PFVF_STATUS_FAILURE;
+       if (req->num_txqs != 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Odd; VF[%d] tried stopping multiple Tx queues\n",
+                          vf->relative_vf_id);
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
 
+       /* Find which qid-index is associated with the queue */
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+               goto out;
+
+       rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
+                                   qid_usage_idx);
+       if (rc == ECORE_SUCCESS)
+               status = PFVF_STATUS_SUCCESS;
+
+out:
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
                               length, status);
 }
@@ -2538,33 +2725,50 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
        u8 status = PFVF_STATUS_FAILURE;
        u8 complete_event_flg;
        u8 complete_cqe_flg;
-       u16 qid;
+       u8 qid_usage_idx;
        enum _ecore_status_t rc;
-       u8 i;
+       u16 i;
 
        req = &mbx->req_virt->update_rxq;
        complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
        complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 
-       /* Validaute inputs */
-       if (req->num_rxqs + req->rx_qid > ECORE_MAX_VF_CHAINS_PER_PF ||
-           !ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
-               DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
-                       vf->relative_vf_id, req->rx_qid, req->num_rxqs);
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
                goto out;
-       }
 
-       for (i = 0; i < req->num_rxqs; i++) {
-               qid = req->rx_qid + i;
+       /* Starting with the addition of CHANNEL_TLV_QID, this API started
+        * expecting a single queue at a time. Validate this.
+        */
+       if ((vf->acquire.vfdev_info.capabilities &
+            VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
+            req->num_rxqs != 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] supports QIDs but sends multiple queues\n",
+                          vf->relative_vf_id);
+               goto out;
+       }
 
-               if (!vf->vf_queues[qid].p_rx_cid) {
-                       DP_INFO(p_hwfn,
-                               "VF[%d] rx_qid = %d isn`t active!\n",
-                               vf->relative_vf_id, qid);
+       /* Validate inputs - for the legacy case this is still true since
+        * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+        */
+       for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
+               if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
+                                           ECORE_IOV_VALIDATE_Q_NA) ||
+                   !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+                   vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+                                  vf->relative_vf_id, req->rx_qid,
+                                  req->num_rxqs);
                        goto out;
                }
+       }
 
-               handlers[i] = vf->vf_queues[qid].p_rx_cid;
+       for (i = 0; i < req->num_rxqs; i++) {
+               u16 qid = req->rx_qid + i;
+
+               handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
        }
 
        rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
@@ -2573,7 +2777,7 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
                                           complete_event_flg,
                                           ECORE_SPQ_MODE_EBLOCK,
                                           OSAL_NULL);
-       if (rc)
+       if (rc != ECORE_SUCCESS)
                goto out;
 
        status = PFVF_STATUS_SUCCESS;
@@ -2796,8 +3000,11 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
                                (1 << p_rss_tlv->rss_table_size_log));
 
        for (i = 0; i < table_size; i++) {
+               struct ecore_queue_cid *p_cid;
+
                q_idx = p_rss_tlv->rss_ind_table[i];
-               if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+               if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
+                                           ECORE_IOV_VALIDATE_Q_ENABLE)) {
                        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                                   "VF[%d]: Omitting RSS due to wrong queue %04x\n",
                                   vf->relative_vf_id, q_idx);
@@ -2805,15 +3012,9 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
                        goto out;
                }
 
-               if (!vf->vf_queues[q_idx].p_rx_cid) {
-                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                                  "VF[%d]: Omitting RSS due to inactive queue %08x\n",
-                                  vf->relative_vf_id, q_idx);
-                       b_reject = true;
-                       goto out;
-               }
-
-               p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
+               p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+                                                     &vf->vf_queues[q_idx]);
+               p_rss->rss_ind_table[i] = p_cid;
        }
 
        p_data->rss_params = p_rss;
@@ -3272,22 +3473,26 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
        u8 status = PFVF_STATUS_FAILURE;
        struct ecore_queue_cid *p_cid;
        u16 rx_coal, tx_coal;
-       u16  qid;
+       u16 qid;
+       int i;
 
        req = &mbx->req_virt->update_coalesce;
 
        rx_coal = req->rx_coal;
        tx_coal = req->tx_coal;
        qid = req->qid;
-       p_cid = vf->vf_queues[qid].p_rx_cid;
 
-       if (!ecore_iov_validate_rxq(p_hwfn, vf, qid)) {
+       if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+                                   ECORE_IOV_VALIDATE_Q_ENABLE) &&
+           rx_coal) {
                DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
                       vf->abs_vf_id, qid);
                goto out;
        }
 
-       if (!ecore_iov_validate_txq(p_hwfn, vf, qid)) {
+       if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+                                   ECORE_IOV_VALIDATE_Q_ENABLE) &&
+           tx_coal) {
                DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
                       vf->abs_vf_id, qid);
                goto out;
@@ -3296,7 +3501,11 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                   "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
                   vf->abs_vf_id, rx_coal, tx_coal, qid);
+
        if (rx_coal) {
+               p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+                                                     &vf->vf_queues[qid]);
+
                rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
                if (rc != ECORE_SUCCESS) {
                        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -3304,15 +3513,32 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
                                   vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
                        goto out;
                }
+               vf->rx_coal = rx_coal;
        }
+
+       /* TODO - in future, it might be possible to pass this in a per-cid
+        * granularity. For now, do this for all Tx queues.
+        */
        if (tx_coal) {
-               rc =  ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
-               if (rc != ECORE_SUCCESS) {
-                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                                  "VF[%d]: Unable to set tx queue = %d coalesce\n",
-                                  vf->abs_vf_id, vf->vf_queues[qid].fw_tx_qid);
-                       goto out;
+               struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+               for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+                       if (p_queue->cids[i].p_cid == OSAL_NULL)
+                               continue;
+
+                       if (!p_queue->cids[i].b_is_tx)
+                               continue;
+
+                       rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+                                                   p_queue->cids[i].p_cid);
+                       if (rc != ECORE_SUCCESS) {
+                               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                          "VF[%d]: Unable to set tx queue coalesce\n",
+                                          vf->abs_vf_id);
+                               goto out;
+                       }
                }
+               vf->tx_coal = tx_coal;
        }
 
        status = PFVF_STATUS_SUCCESS;
@@ -3321,6 +3547,93 @@ out:
                               sizeof(struct pfvf_def_resp_tlv), status);
 }
 
+enum _ecore_status_t
+ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
+                                        u16 rx_coal, u16 tx_coal,
+                                        u16 vf_id, u16 qid)
+{
+       struct ecore_queue_cid *p_cid;
+       struct ecore_vf_info *vf;
+       struct ecore_ptt *p_ptt;
+       int i, rc = 0;
+
+       if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
+               DP_NOTICE(p_hwfn, true,
+                         "VF[%d] - Can not set coalescing: VF is not active\n",
+                         vf_id);
+               return ECORE_INVAL;
+       }
+
+       vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_AGAIN;
+
+       if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+                                   ECORE_IOV_VALIDATE_Q_ENABLE) &&
+           rx_coal) {
+               DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+                      vf->abs_vf_id, qid);
+               goto out;
+       }
+
+       if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+                                   ECORE_IOV_VALIDATE_Q_ENABLE) &&
+           tx_coal) {
+               DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+                      vf->abs_vf_id, qid);
+               goto out;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+                  vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+       if (rx_coal) {
+               p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+                                                     &vf->vf_queues[qid]);
+
+               rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+               if (rc != ECORE_SUCCESS) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Unable to set rx queue = %d coalesce\n",
+                                  vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+                       goto out;
+               }
+               vf->rx_coal = rx_coal;
+       }
+
+       /* TODO - in future, it might be possible to pass this in a per-cid
+        * granularity. For now, do this for all Tx queues.
+        */
+       if (tx_coal) {
+               struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+               for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+                       if (p_queue->cids[i].p_cid == OSAL_NULL)
+                               continue;
+
+                       if (!p_queue->cids[i].b_is_tx)
+                               continue;
+
+                       rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+                                                   p_queue->cids[i].p_cid);
+                       if (rc != ECORE_SUCCESS) {
+                               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                          "VF[%d]: Unable to set tx queue coalesce\n",
+                                          vf->abs_vf_id);
+                               goto out;
+                       }
+               }
+               vf->tx_coal = tx_coal;
+       }
+
+out:
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
 static enum _ecore_status_t
 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
                           struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
@@ -3480,8 +3793,7 @@ cleanup:
                ack_vfs[vfid / 32] |= (1 << (vfid % 32));
                p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
                    ~(1ULL << (rel_vf_id % 64));
-               p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
-                   ~(1ULL << (rel_vf_id % 64));
+               p_vf->vf_mbx.b_pending_msg = false;
        }
 
        return rc;
@@ -3611,12 +3923,22 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
        mbx = &p_vf->vf_mbx;
 
        /* ecore_iov_process_mbx_request */
-       DP_VERBOSE(p_hwfn,
-                  ECORE_MSG_IOV,
-                  "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
+#ifndef CONFIG_ECORE_SW_CHANNEL
+       if (!mbx->b_pending_msg) {
+               DP_NOTICE(p_hwfn, true,
+                         "VF[%02x]: Trying to process mailbox message when none is pending\n",
+                         p_vf->abs_vf_id);
+               return;
+       }
+       mbx->b_pending_msg = false;
+#endif
 
        mbx->first_tlv = mbx->req_virt->first_tlv;
 
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%02x]: Processing mailbox message [type %04x]\n",
+                  p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
        OSAL_IOV_VF_MSG_TYPE(p_hwfn,
                             p_vf->relative_vf_id,
                             mbx->first_tlv.tl.type);
@@ -3741,26 +4063,20 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
 #endif
 }
 
-void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
+void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
+                                    u64 *events)
 {
-       u64 add_bit = 1ULL << (vfid % 64);
+       int i;
 
-       /* TODO - add locking mechanisms [no atomics in ecore, so we can't
-       * add the lock inside the ecore_pf_iov struct].
-       */
-       p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
-}
+       OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
 
-void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
-                                              u64 *events)
-{
-       u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+       ecore_for_each_vf(p_hwfn, i) {
+               struct ecore_vf_info *p_vf;
 
-       /* TODO - Take a lock */
-       OSAL_MEMCPY(events, p_pending_events,
-                   sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
-       OSAL_MEMSET(p_pending_events, 0,
-                   sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+               p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
+               if (p_vf->vf_mbx.b_pending_msg)
+                       events[i / 64] |= 1ULL << (i % 64);
+       }
 }
 
 static struct ecore_vf_info *
@@ -3794,6 +4110,8 @@ static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
         */
        p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
 
+       p_vf->vf_mbx.b_pending_msg = true;
+
        return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
 }
 
@@ -4243,30 +4561,6 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
        return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
 }
 
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
-                                                    int vfid, u32 rate)
-{
-       struct ecore_vf_info *vf;
-       u8 vport_id;
-       int i;
-
-       for_each_hwfn(p_dev, i) {
-               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
-               if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
-                       DP_NOTICE(p_hwfn, true,
-                                 "SR-IOV sanity check failed,"
-                                 " can't set min rate\n");
-                       return ECORE_INVAL;
-               }
-       }
-
-       vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
-       vport_id = vf->vport_id;
-
-       return ecore_configure_vport_wfq(p_dev, vport_id, rate);
-}
-
 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
                                            struct ecore_ptt *p_ptt,
                                            int vfid,