net/enic: add single queue Tx and Rx to VF representor
authorHyong Youb Kim <hyonkim@cisco.com>
Wed, 9 Sep 2020 13:56:54 +0000 (06:56 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 21 Sep 2020 16:05:38 +0000 (18:05 +0200)
A VF representor allocates queues from PF's pool of queues and use
them for its Tx and Rx. It supports 1 Tx queue and 1 Rx queue.

Implicit packet forwarding between representor queues and VF does not
yet exist. It will be enabled in subsequent commits using flowman API.

Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: John Daley <johndale@cisco.com>
drivers/net/enic/enic.h
drivers/net/enic/enic_main.c
drivers/net/enic/enic_vf_representor.c

index 929ea90..d51781d 100644 (file)
@@ -214,6 +214,10 @@ struct enic {
        uint8_t switchdev_mode;
        uint16_t switch_domain_id;
        uint16_t max_vf_id;
+       /* Number of queues needed for VF representor paths */
+       uint32_t vf_required_wq;
+       uint32_t vf_required_cq;
+       uint32_t vf_required_rq;
        /*
         * Lock to serialize devcmds from PF, VF representors as they all share
         * the same PF devcmd instance in firmware.
@@ -232,6 +236,11 @@ struct enic_vf_representor {
        uint16_t vf_id;
        int allmulti;
        int promisc;
+       /* Representor path uses PF queues. These are reserved during init */
+       uint16_t pf_wq_idx;      /* WQ dedicated to VF rep */
+       uint16_t pf_wq_cq_idx;   /* CQ for WQ */
+       uint16_t pf_rq_sop_idx;  /* SOP RQ dedicated to VF rep */
+       uint16_t pf_rq_data_idx; /* Data RQ */
 };
 
 #define VF_ENIC_TO_VF_REP(vf_enic) \
@@ -293,6 +302,67 @@ static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
        return enic->rq_count + wq;
 }
 
+/*
+ * WQ, RQ, CQ allocation scheme. Firmware gives the driver an array of
+ * WQs, an array of RQs, and an array of CQs. Fow now, these are
+ * statically allocated between PF app send/receive queues and VF
+ * representor app send/receive queues. VF representor supports only 1
+ * send and 1 receive queue. The number of PF app queue is not known
+ * until the queue setup time.
+ *
+ * R = number of receive queues for PF app
+ * S = number of send queues for PF app
+ * V = number of VF representors
+ *
+ * wI = WQ for PF app send queue I
+ * rI = SOP RQ for PF app receive queue I
+ * dI = Data RQ for rI
+ * cwI = CQ for wI
+ * crI = CQ for rI
+ * vwI = WQ for VF representor send queue I
+ * vrI = SOP RQ for VF representor receive queue I
+ * vdI = Data RQ for vrI
+ * vcwI = CQ for vwI
+ * vcrI = CQ for vrI
+ *
+ * WQ array: | w0 |..| wS-1 |..| vwV-1 |..| vw0 |
+ *             ^         ^         ^         ^
+ *    index    0        S-1       W-V       W-1    W=len(WQ array)
+ *
+ * RQ array: | r0  |..| rR-1  |d0 |..|dR-1|  ..|vdV-1 |..| vd0 |vrV-1 |..|vr0 |
+ *             ^         ^     ^       ^         ^          ^     ^        ^
+ *    index    0        R-1    R      2R-1      X-2V    X-(V+1)  X-V      X-1
+ * X=len(RQ array)
+ *
+ * CQ array: | cr0 |..| crR-1 |cw0|..|cwS-1|..|vcwV-1|..| vcw0|vcrV-1|..|vcr0|..
+ *              ^         ^     ^       ^        ^         ^      ^        ^
+ *    index     0        R-1    R     R+S-1     X-2V    X-(V+1)  X-V      X-1
+ * X is not a typo. It really is len(RQ array) to accommodate enic_cq_rq() used
+ * throughout RX handlers. The current scheme requires
+ * len(CQ array) >= len(RQ array).
+ */
+
+static inline unsigned int vf_wq_cq_idx(struct enic_vf_representor *vf)
+{
+       /* rq is not a typo. index(vcwI) coincides with index(vdI) */
+       return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
+}
+
+static inline unsigned int vf_wq_idx(struct enic_vf_representor *vf)
+{
+       return vf->pf->conf_wq_count - vf->vf_id - 1;
+}
+
+static inline unsigned int vf_rq_sop_idx(struct enic_vf_representor *vf)
+{
+       return vf->pf->conf_rq_count - vf->vf_id - 1;
+}
+
+static inline unsigned int vf_rq_data_idx(struct enic_vf_representor *vf)
+{
+       return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
+}
+
 static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
 {
        return eth_dev->data->dev_private;
@@ -397,6 +467,10 @@ void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params);
 int enic_vf_representor_uninit(struct rte_eth_dev *ethdev);
 int enic_fm_allocate_switch_domain(struct enic *pf);
+int enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq);
+void enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq);
+void enic_free_wq_buf(struct rte_mbuf **buf);
+void enic_free_rq_buf(struct rte_mbuf **mbuf);
 extern const struct rte_flow_ops enic_flow_ops;
 extern const struct rte_flow_ops enic_fm_flow_ops;
 
index 4de65f6..d0d4103 100644 (file)
@@ -50,7 +50,7 @@ static int is_eth_addr_valid(uint8_t *addr)
        return !is_mcast_addr(addr) && !is_zero_addr(addr);
 }
 
-static void
+void
 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
 {
        uint16_t i;
@@ -68,7 +68,7 @@ enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
        }
 }
 
-static void enic_free_wq_buf(struct rte_mbuf **buf)
+void enic_free_wq_buf(struct rte_mbuf **buf)
 {
        struct rte_mbuf *mbuf = *buf;
 
@@ -191,8 +191,7 @@ int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
        return err;
 }
 
-static void
-enic_free_rq_buf(struct rte_mbuf **mbuf)
+void enic_free_rq_buf(struct rte_mbuf **mbuf)
 {
        if (*mbuf == NULL)
                return;
@@ -275,7 +274,7 @@ void enic_init_vnic_resources(struct enic *enic)
 }
 
 
-static int
+int
 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
 {
        struct rte_mbuf *mb;
@@ -806,16 +805,36 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
        unsigned int socket_id, struct rte_mempool *mp,
        uint16_t nb_desc, uint16_t free_thresh)
 {
+       struct enic_vf_representor *vf;
        int rc;
-       uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
-       uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx, enic);
-       struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
-       struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
+       uint16_t sop_queue_idx;
+       uint16_t data_queue_idx;
+       uint16_t cq_idx;
+       struct vnic_rq *rq_sop;
+       struct vnic_rq *rq_data;
        unsigned int mbuf_size, mbufs_per_pkt;
        unsigned int nb_sop_desc, nb_data_desc;
        uint16_t min_sop, max_sop, min_data, max_data;
        uint32_t max_rx_pkt_len;
 
+       /*
+        * Representor uses a reserved PF queue. Translate representor
+        * queue number to PF queue number.
+        */
+       if (enic_is_vf_rep(enic)) {
+               RTE_ASSERT(queue_idx == 0);
+               vf = VF_ENIC_TO_VF_REP(enic);
+               sop_queue_idx = vf->pf_rq_sop_idx;
+               data_queue_idx = vf->pf_rq_data_idx;
+               enic = vf->pf;
+               queue_idx = sop_queue_idx;
+       } else {
+               sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
+               data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx, enic);
+       }
+       cq_idx = enic_cq_rq(enic, sop_queue_idx);
+       rq_sop = &enic->rq[sop_queue_idx];
+       rq_data = &enic->rq[data_queue_idx];
        rq_sop->is_sop = 1;
        rq_sop->data_queue_idx = data_queue_idx;
        rq_data->is_sop = 0;
@@ -935,7 +954,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
                }
                nb_data_desc = rq_data->ring.desc_count;
        }
-       rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
+       rc = vnic_cq_alloc(enic->vdev, &enic->cq[cq_idx], cq_idx,
                           socket_id, nb_sop_desc + nb_data_desc,
                           sizeof(struct cq_enet_rq_desc));
        if (rc) {
@@ -979,7 +998,7 @@ err_free_sop_mbuf:
        rte_free(rq_sop->mbuf_ring);
 err_free_cq:
        /* cleanup on error */
-       vnic_cq_free(&enic->cq[queue_idx]);
+       vnic_cq_free(&enic->cq[cq_idx]);
 err_free_rq_data:
        if (rq_data->in_use)
                vnic_rq_free(rq_data);
@@ -1007,12 +1026,27 @@ void enic_free_wq(void *txq)
 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
        unsigned int socket_id, uint16_t nb_desc)
 {
+       struct enic_vf_representor *vf;
        int err;
-       struct vnic_wq *wq = &enic->wq[queue_idx];
-       unsigned int cq_index = enic_cq_wq(enic, queue_idx);
+       struct vnic_wq *wq;
+       unsigned int cq_index;
        char name[RTE_MEMZONE_NAMESIZE];
        static int instance;
 
+       /*
+        * Representor uses a reserved PF queue. Translate representor
+        * queue number to PF queue number.
+        */
+       if (enic_is_vf_rep(enic)) {
+               RTE_ASSERT(queue_idx == 0);
+               vf = VF_ENIC_TO_VF_REP(enic);
+               queue_idx = vf->pf_wq_idx;
+               cq_index = vf->pf_wq_cq_idx;
+               enic = vf->pf;
+       } else {
+               cq_index = enic_cq_wq(enic, queue_idx);
+       }
+       wq = &enic->wq[queue_idx];
        wq->socket_id = socket_id;
        /*
         * rte_eth_tx_queue_setup() checks min, max, and alignment. So just
@@ -1448,6 +1482,17 @@ int enic_set_vnic_res(struct enic *enic)
        if (eth_dev->data->dev_conf.intr_conf.rxq) {
                required_intr += eth_dev->data->nb_rx_queues;
        }
+       ENICPMD_LOG(DEBUG, "Required queues for PF: rq %u wq %u cq %u",
+                   required_rq, required_wq, required_cq);
+       if (enic->vf_required_rq) {
+               /* Queues needed for VF representors */
+               required_rq += enic->vf_required_rq;
+               required_wq += enic->vf_required_wq;
+               required_cq += enic->vf_required_cq;
+               ENICPMD_LOG(DEBUG, "Required queues for VF representors: rq %u wq %u cq %u",
+                           enic->vf_required_rq, enic->vf_required_wq,
+                           enic->vf_required_cq);
+       }
 
        if (enic->conf_rq_count < required_rq) {
                dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
@@ -1493,7 +1538,7 @@ enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
 
        sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
        data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx, enic)];
-       cq_idx = rq_idx;
+       cq_idx = enic_cq_rq(enic, rq_idx);
 
        vnic_cq_clean(&enic->cq[cq_idx]);
        vnic_cq_init(&enic->cq[cq_idx],
index bc2d886..cb41bb1 100644 (file)
 #include "vnic_wq.h"
 #include "vnic_rq.h"
 
-static uint16_t enic_vf_recv_pkts(void *rx_queue __rte_unused,
-                                 struct rte_mbuf **rx_pkts __rte_unused,
-                                 uint16_t nb_pkts __rte_unused)
+static uint16_t enic_vf_recv_pkts(void *rx_queue,
+                                 struct rte_mbuf **rx_pkts,
+                                 uint16_t nb_pkts)
 {
-       return 0;
+       return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts);
 }
 
-static uint16_t enic_vf_xmit_pkts(void *tx_queue __rte_unused,
-                                 struct rte_mbuf **tx_pkts __rte_unused,
-                                 uint16_t nb_pkts __rte_unused)
+static uint16_t enic_vf_xmit_pkts(void *tx_queue,
+                                 struct rte_mbuf **tx_pkts,
+                                 uint16_t nb_pkts)
 {
-       return 0;
+       return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
 }
 
-static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev __rte_unused,
-       uint16_t queue_idx __rte_unused,
-       uint16_t nb_desc __rte_unused,
-       unsigned int socket_id __rte_unused,
-       const struct rte_eth_txconf *tx_conf __rte_unused)
+static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+       uint16_t queue_idx,
+       uint16_t nb_desc,
+       unsigned int socket_id,
+       const struct rte_eth_txconf *tx_conf)
 {
+       struct enic_vf_representor *vf;
+       struct vnic_wq *wq;
+       struct enic *pf;
+       int err;
+
        ENICPMD_FUNC_TRACE();
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return -E_RTE_SECONDARY;
+       /* Only one queue now */
+       if (queue_idx != 0)
+               return -EINVAL;
+       vf = eth_dev->data->dev_private;
+       pf = vf->pf;
+       wq = &pf->wq[vf->pf_wq_idx];
+       wq->offloads = tx_conf->offloads |
+               eth_dev->data->dev_conf.txmode.offloads;
+       eth_dev->data->tx_queues[0] = (void *)wq;
+       /* Pass vf not pf because of cq index calculation. See enic_alloc_wq */
+       err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc);
+       if (err) {
+               ENICPMD_LOG(ERR, "error in allocating wq\n");
+               return err;
+       }
        return 0;
 }
 
-static void enic_vf_dev_tx_queue_release(void *txq __rte_unused)
+static void enic_vf_dev_tx_queue_release(void *txq)
 {
        ENICPMD_FUNC_TRACE();
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return;
+       enic_free_wq(txq);
 }
 
-static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev __rte_unused,
-       uint16_t queue_idx __rte_unused,
-       uint16_t nb_desc __rte_unused,
-       unsigned int socket_id __rte_unused,
-       const struct rte_eth_rxconf *rx_conf __rte_unused,
-       struct rte_mempool *mp __rte_unused)
+static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+       uint16_t queue_idx,
+       uint16_t nb_desc,
+       unsigned int socket_id,
+       const struct rte_eth_rxconf *rx_conf,
+       struct rte_mempool *mp)
 {
+       struct enic_vf_representor *vf;
+       struct enic *pf;
+       int ret;
+
        ENICPMD_FUNC_TRACE();
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return -E_RTE_SECONDARY;
+       /* Only 1 queue now */
+       if (queue_idx != 0)
+               return -EINVAL;
+       vf = eth_dev->data->dev_private;
+       pf = vf->pf;
+       eth_dev->data->rx_queues[queue_idx] =
+               (void *)&pf->rq[vf->pf_rq_sop_idx];
+       ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc,
+                           rx_conf->rx_free_thresh);
+       if (ret) {
+               ENICPMD_LOG(ERR, "error in allocating rq\n");
+               return ret;
+       }
        return 0;
 }
 
-static void enic_vf_dev_rx_queue_release(void *rxq __rte_unused)
+static void enic_vf_dev_rx_queue_release(void *rxq)
 {
        ENICPMD_FUNC_TRACE();
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return;
+       enic_free_rq(rxq);
 }
 
 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
@@ -88,6 +127,9 @@ static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
 {
        struct enic_vf_representor *vf;
+       struct vnic_rq *data_rq;
+       int index, cq_idx;
+       struct enic *pf;
        int ret;
 
        ENICPMD_FUNC_TRACE();
@@ -95,6 +137,7 @@ static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
                return -E_RTE_SECONDARY;
 
        vf = eth_dev->data->dev_private;
+       pf = vf->pf;
        /* Remove all packet filters so no ingress packets go to VF.
         * When PF enables switchdev, it will ensure packet filters
         * are removed.  So, this is not technically needed.
@@ -105,14 +148,90 @@ static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
                ENICPMD_LOG(ERR, "Cannot clear packet filters");
                return ret;
        }
+
+       /* Start WQ: see enic_init_vnic_resources */
+       index = vf->pf_wq_idx;
+       cq_idx = vf->pf_wq_cq_idx;
+       vnic_wq_init(&pf->wq[index], cq_idx, 1, 0);
+       vnic_cq_init(&pf->cq[cq_idx],
+                    0 /* flow_control_enable */,
+                    1 /* color_enable */,
+                    0 /* cq_head */,
+                    0 /* cq_tail */,
+                    1 /* cq_tail_color */,
+                    0 /* interrupt_enable */,
+                    0 /* cq_entry_enable */,
+                    1 /* cq_message_enable */,
+                    0 /* interrupt offset */,
+                    (uint64_t)pf->wq[index].cqmsg_rz->iova);
+       /* enic_start_wq */
+       vnic_wq_enable(&pf->wq[index]);
+       eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
+
+       /* Start RQ: see enic_init_vnic_resources */
+       index = vf->pf_rq_sop_idx;
+       cq_idx = enic_cq_rq(vf->pf, index);
+       vnic_rq_init(&pf->rq[index], cq_idx, 1, 0);
+       data_rq = &pf->rq[vf->pf_rq_data_idx];
+       if (data_rq->in_use)
+               vnic_rq_init(data_rq, cq_idx, 1, 0);
+       vnic_cq_init(&pf->cq[cq_idx],
+                    0 /* flow_control_enable */,
+                    1 /* color_enable */,
+                    0 /* cq_head */,
+                    0 /* cq_tail */,
+                    1 /* cq_tail_color */,
+                    0,
+                    1 /* cq_entry_enable */,
+                    0 /* cq_message_enable */,
+                    0,
+                    0 /* cq_message_addr */);
+       /* enic_enable */
+       ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]);
+       if (ret) {
+               ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs\n");
+               return ret;
+       }
+       ret = enic_alloc_rx_queue_mbufs(pf, data_rq);
+       if (ret) {
+               /* Release the allocated mbufs for the sop rq*/
+               enic_rxmbuf_queue_release(pf, &pf->rq[index]);
+               ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs\n");
+               return ret;
+       }
+       enic_start_rq(pf, vf->pf_rq_sop_idx);
+       eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
+       eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
        return 0;
 }
 
-static void enic_vf_dev_stop(struct rte_eth_dev *eth_dev __rte_unused)
+static void enic_vf_dev_stop(struct rte_eth_dev *eth_dev)
 {
+       struct enic_vf_representor *vf;
+       struct vnic_rq *rq;
+       struct enic *pf;
+
        ENICPMD_FUNC_TRACE();
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return;
+       /* Undo dev_start. Disable/clean WQ */
+       vf = eth_dev->data->dev_private;
+       pf = vf->pf;
+       vnic_wq_disable(&pf->wq[vf->pf_wq_idx]);
+       vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf);
+       vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]);
+       /* Disable/clean RQ */
+       rq = &pf->rq[vf->pf_rq_sop_idx];
+       vnic_rq_disable(rq);
+       vnic_rq_clean(rq, enic_free_rq_buf);
+       rq = &pf->rq[vf->pf_rq_data_idx];
+       if (rq->in_use) {
+               vnic_rq_disable(rq);
+               vnic_rq_clean(rq, enic_free_rq_buf);
+       }
+       vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]);
+       eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
+       eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
 }
 
 /*
@@ -354,6 +473,31 @@ int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
        vf->enic.switchdev_mode = pf->switchdev_mode;
        /* Only switchdev is supported now */
        RTE_ASSERT(vf->enic.switchdev_mode);
+       /* Allocate WQ, RQ, CQ for the representor */
+       vf->pf_wq_idx = vf_wq_idx(vf);
+       vf->pf_wq_cq_idx = vf_wq_cq_idx(vf);
+       vf->pf_rq_sop_idx = vf_rq_sop_idx(vf);
+       vf->pf_rq_data_idx = vf_rq_data_idx(vf);
+       /* Remove these assertions once queue allocation has an easy-to-use
+        * allocator API instead of index number calculations used throughout
+        * the driver..
+        */
+       RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx);
+       RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) ==
+                  vf->pf_rq_sop_idx);
+       /* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */
+       pf->vf_required_wq++;
+       pf->vf_required_rq += 2; /* sop and data */
+       pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */
+       ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u",
+               vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx,
+               vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx));
+       if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) {
+               ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)"
+                           " >= number of RQs (%u) in CIMC or UCSM",
+                           pf->conf_cq_count, pf->conf_rq_count);
+               return -EINVAL;
+       }
 
        /* Check for non-existent VFs */
        pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);