net/bnxt: use dedicated CPR for async events
authorLance Richardson <lance.richardson@broadcom.com>
Wed, 24 Jul 2019 16:49:32 +0000 (12:49 -0400)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 25 Jul 2019 09:43:05 +0000 (11:43 +0200)
This commit enables the creation of a dedicated completion
ring for asynchronous event handling instead of handling these
events on a receive completion ring.

For the stingray platform and other platforms needing tighter
control of resource utilization, we retain the ability to
process async events on a receive completion ring.

For Thor-based adapters, we use a dedicated NQ (notification
queue) ring for async events (async events can't currently
be received on a completion ring due to a firmware limitation).

Rename "def_cp_ring" to "async_cp_ring" to better reflect its
purpose (async event notifications) and to avoid confusion with
VNIC default receive completion rings.

Allow rxq 0 to be stopped when not being used for async events.

Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
drivers/net/bnxt/bnxt.h
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_hwrm.h
drivers/net/bnxt/bnxt_irq.c
drivers/net/bnxt/bnxt_ring.c
drivers/net/bnxt/bnxt_ring.h
drivers/net/bnxt/bnxt_rxq.c
drivers/net/bnxt/bnxt_rxr.c
drivers/net/bnxt/bnxt_rxtx_vec_sse.c

index 93194bb..0c9f994 100644 (file)
 #define BNXT_MAX_RX_RING_DESC  8192
 #define BNXT_DB_SIZE           0x80
 
+#ifdef RTE_ARCH_ARM64
+#define BNXT_NUM_ASYNC_CPR(bp) (BNXT_STINGRAY(bp) ? 0 : 1)
+#else
+#define BNXT_NUM_ASYNC_CPR(bp) 1
+#endif
+
 /* Chimp Communication Channel */
 #define GRCPF_REG_CHIMP_CHANNEL_OFFSET         0x0
 #define GRCPF_REG_CHIMP_COMM_TRIGGER           0x100
@@ -351,6 +357,7 @@ struct bnxt {
 #define BNXT_FLAG_TRUSTED_VF_EN        (1 << 11)
 #define BNXT_FLAG_DFLT_VNIC_SET        (1 << 12)
 #define BNXT_FLAG_THOR_CHIP    (1 << 13)
+#define BNXT_FLAG_STINGRAY     (1 << 14)
 #define BNXT_FLAG_EXT_STATS_SUPPORTED  (1 << 29)
 #define BNXT_FLAG_NEW_RM       (1 << 30)
 #define BNXT_FLAG_INIT_DONE    (1U << 31)
@@ -363,6 +370,7 @@ struct bnxt {
 #define BNXT_USE_KONG(bp)      ((bp)->flags & BNXT_FLAG_KONG_MB_EN)
 #define BNXT_VF_IS_TRUSTED(bp) ((bp)->flags & BNXT_FLAG_TRUSTED_VF_EN)
 #define BNXT_CHIP_THOR(bp)     ((bp)->flags & BNXT_FLAG_THOR_CHIP)
+#define BNXT_STINGRAY(bp)      ((bp)->flags & BNXT_FLAG_STINGRAY)
 #define BNXT_HAS_NQ(bp)                BNXT_CHIP_THOR(bp)
 #define BNXT_HAS_RING_GRPS(bp) (!BNXT_CHIP_THOR(bp))
 
@@ -387,7 +395,7 @@ struct bnxt {
        uint16_t                fw_tx_port_stats_ext_size;
 
        /* Default completion ring */
-       struct bnxt_cp_ring_info        *def_cp_ring;
+       struct bnxt_cp_ring_info        *async_cp_ring;
        uint32_t                max_ring_grps;
        struct bnxt_ring_grp_info       *grp_info;
 
index ded9706..2a8b502 100644 (file)
@@ -200,12 +200,17 @@ static void bnxt_free_mem(struct bnxt *bp)
        bnxt_free_stats(bp);
        bnxt_free_tx_rings(bp);
        bnxt_free_rx_rings(bp);
+       bnxt_free_async_cp_ring(bp);
 }
 
 static int bnxt_alloc_mem(struct bnxt *bp)
 {
        int rc;
 
+       rc = bnxt_alloc_async_ring_struct(bp);
+       if (rc)
+               goto alloc_mem_err;
+
        rc = bnxt_alloc_vnic_mem(bp);
        if (rc)
                goto alloc_mem_err;
@@ -218,6 +223,10 @@ static int bnxt_alloc_mem(struct bnxt *bp)
        if (rc)
                goto alloc_mem_err;
 
+       rc = bnxt_alloc_async_cp_ring(bp);
+       if (rc)
+               goto alloc_mem_err;
+
        return 0;
 
 alloc_mem_err:
@@ -617,8 +626,8 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
        /* Inherit new configurations */
        if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
            eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
-           eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
-           bp->max_cp_rings ||
+           eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
+               + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
            eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
            bp->max_stat_ctx)
                goto resource_error;
@@ -3802,6 +3811,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
            pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF2)
                bp->flags |= BNXT_FLAG_THOR_CHIP;
 
+       if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
+           pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
+           pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
+           pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
+               bp->flags |= BNXT_FLAG_STINGRAY;
+
        rc = bnxt_init_board(eth_dev);
        if (rc) {
                PMD_DRV_LOG(ERR,
index 045ce4a..6437747 100644 (file)
@@ -737,9 +737,12 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
        req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
        req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
                                            AGG_RING_MULTIPLIER);
-       req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
+       req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
+                                            bp->tx_nr_rings +
+                                            BNXT_NUM_ASYNC_CPR(bp));
        req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
-                                             bp->tx_nr_rings);
+                                             bp->tx_nr_rings +
+                                             BNXT_NUM_ASYNC_CPR(bp));
        req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
        if (bp->vf_resv_strategy ==
            HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
@@ -2073,7 +2076,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
        return rc;
 }
 
-static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
@@ -2083,9 +2086,10 @@ static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
        memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
                                     sizeof(*cpr->cp_desc_ring));
        cpr->cp_raw_cons = 0;
+       cpr->valid = 0;
 }
 
-static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
@@ -3212,7 +3216,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
        req.enables = rte_cpu_to_le_32(
                        HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
        req.async_event_cr = rte_cpu_to_le_16(
-                       bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+                       bp->async_cp_ring->cp_ring_struct->fw_ring_id);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
@@ -3232,7 +3236,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
        req.enables = rte_cpu_to_le_32(
                        HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
        req.async_event_cr = rte_cpu_to_le_16(
-                       bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+                       bp->async_cp_ring->cp_ring_struct->fw_ring_id);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
index 37aaa1a..c882fc2 100644 (file)
@@ -119,6 +119,8 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp);
 int bnxt_free_all_hwrm_rings(struct bnxt *bp);
 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp);
 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp);
+void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
+void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 void bnxt_free_all_hwrm_resources(struct bnxt *bp);
index 9016871..a22700a 100644 (file)
@@ -22,7 +22,7 @@ static void bnxt_int_handler(void *param)
 {
        struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
        struct bnxt *bp = eth_dev->data->dev_private;
-       struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+       struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
        struct cmpl_base *cmp;
        uint32_t raw_cons;
        uint32_t cons;
@@ -43,10 +43,13 @@ static void bnxt_int_handler(void *param)
 
                bnxt_event_hwrm_resp_handler(bp, cmp);
                raw_cons = NEXT_RAW_CMP(raw_cons);
-       };
+       }
 
        cpr->cp_raw_cons = raw_cons;
-       B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+       if (BNXT_HAS_NQ(bp))
+               bnxt_db_nq_arm(cpr);
+       else
+               B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
 }
 
 int bnxt_free_int(struct bnxt *bp)
@@ -92,19 +95,35 @@ int bnxt_free_int(struct bnxt *bp)
 
 void bnxt_disable_int(struct bnxt *bp)
 {
-       struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+       struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+       if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+               return;
+
+       if (!cpr || !cpr->cp_db.doorbell)
+               return;
 
        /* Only the default completion ring */
-       if (cpr != NULL && cpr->cp_db.doorbell != NULL)
+       if (BNXT_HAS_NQ(bp))
+               bnxt_db_nq(cpr);
+       else
                B_CP_DB_DISARM(cpr);
 }
 
 void bnxt_enable_int(struct bnxt *bp)
 {
-       struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+       struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+       if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+               return;
+
+       if (!cpr || !cpr->cp_db.doorbell)
+               return;
 
        /* Only the default completion ring */
-       if (cpr != NULL && cpr->cp_db.doorbell != NULL)
+       if (BNXT_HAS_NQ(bp))
+               bnxt_db_nq_arm(cpr);
+       else
                B_CP_DB_ARM(cpr);
 }
 
@@ -112,7 +131,7 @@ int bnxt_setup_int(struct bnxt *bp)
 {
        uint16_t total_vecs;
        const int len = sizeof(bp->irq_tbl[0].name);
-       int i, rc = 0;
+       int i;
 
        /* DPDK host only supports 1 MSI-X vector */
        total_vecs = 1;
@@ -126,14 +145,11 @@ int bnxt_setup_int(struct bnxt *bp)
                        bp->irq_tbl[i].handler = bnxt_int_handler;
                }
        } else {
-               rc = -ENOMEM;
-               goto setup_exit;
+               PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
+               return -ENOMEM;
        }
-       return 0;
 
-setup_exit:
-       PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
-       return rc;
+       return 0;
 }
 
 int bnxt_request_int(struct bnxt *bp)
index a9952e0..be15b4b 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <rte_bitmap.h>
 #include <rte_memzone.h>
+#include <rte_malloc.h>
 #include <unistd.h>
 
 #include "bnxt.h"
@@ -369,6 +370,7 @@ static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
        uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
+       int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
        uint8_t ring_type;
        int rc = 0;
 
@@ -383,13 +385,13 @@ static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
                }
        }
 
-       rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, queue_index,
+       rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
                                  HWRM_NA_SIGNATURE, nq_ring_id);
        if (rc)
                return rc;
 
        cpr->cp_cons = 0;
-       bnxt_set_db(bp, &cpr->cp_db, ring_type, queue_index,
+       bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
                    cp_ring->fw_ring_id);
        bnxt_db_cq(cpr);
 
@@ -400,6 +402,7 @@ static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
                              struct bnxt_cp_ring_info *nqr)
 {
        struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
+       int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
        uint8_t ring_type;
        int rc = 0;
 
@@ -408,12 +411,12 @@ static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
 
        ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
 
-       rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, queue_index,
+       rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
                                  HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
        if (rc)
                return rc;
 
-       bnxt_set_db(bp, &nqr->cp_db, ring_type, queue_index,
+       bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index,
                    nq_ring->fw_ring_id);
        bnxt_db_nq(nqr);
 
@@ -490,14 +493,16 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
        struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
-       int rc = 0;
+       int rc;
 
        if (BNXT_HAS_NQ(bp)) {
-               if (bnxt_alloc_nq_ring(bp, queue_index, nqr))
+               rc = bnxt_alloc_nq_ring(bp, queue_index, nqr);
+               if (rc)
                        goto err_out;
        }
 
-       if (bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr))
+       rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr);
+       if (rc)
                goto err_out;
 
        if (BNXT_HAS_RING_GRPS(bp)) {
@@ -505,22 +510,24 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
                bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
        }
 
-       if (!queue_index) {
+       if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
                /*
-                * In order to save completion resources, use the first
-                * completion ring from PF or VF as the default completion ring
-                * for async event and HWRM forward response handling.
+                * If a dedicated async event completion ring is not enabled,
+                * use the first completion ring from PF or VF as the default
+                * completion ring for async event handling.
                 */
-               bp->def_cp_ring = cpr;
+               bp->async_cp_ring = cpr;
                rc = bnxt_hwrm_set_async_event_cr(bp);
                if (rc)
                        goto err_out;
        }
 
-       if (bnxt_alloc_rx_ring(bp, queue_index))
+       rc = bnxt_alloc_rx_ring(bp, queue_index);
+       if (rc)
                goto err_out;
 
-       if (bnxt_alloc_rx_agg_ring(bp, queue_index))
+       rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
+       if (rc)
                goto err_out;
 
        rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
@@ -539,12 +546,13 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
                bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
        }
        rxq->index = queue_index;
-       PMD_DRV_LOG(INFO,
-                   "queue %d, rx_deferred_start %d, state %d!\n",
-                   queue_index, rxq->rx_deferred_start,
-                   bp->eth_dev->data->rx_queue_state[queue_index]);
+
+       return 0;
 
 err_out:
+       PMD_DRV_LOG(ERR,
+                   "Failed to allocate receive queue %d, rc %d.\n",
+                   queue_index, rc);
        return rc;
 }
 
@@ -583,15 +591,13 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
                }
 
                bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
-
-               if (!i) {
+               if (!BNXT_NUM_ASYNC_CPR(bp) && !i) {
                        /*
-                        * In order to save completion resource, use the first
-                        * completion ring from PF or VF as the default
-                        * completion ring for async event & HWRM
-                        * forward response handling.
+                        * If a dedicated async event completion ring is not
+                        * enabled, use the first completion ring as the default
+                        * completion ring for async event handling.
                         */
-                       bp->def_cp_ring = cpr;
+                       bp->async_cp_ring = cpr;
                        rc = bnxt_hwrm_set_async_event_cr(bp);
                        if (rc)
                                goto err_out;
@@ -652,3 +658,98 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
 err_out:
        return rc;
 }
+
+/* Allocate dedicated async completion ring. */
+int bnxt_alloc_async_cp_ring(struct bnxt *bp)
+{
+       struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+       struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+       uint8_t ring_type;
+       int rc;
+
+       if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+               return 0;
+
+       if (BNXT_HAS_NQ(bp))
+               ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
+       else
+               ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
+
+       rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
+                                 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
+
+       if (rc)
+               return rc;
+
+       cpr->cp_cons = 0;
+       cpr->valid = 0;
+       bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
+                   cp_ring->fw_ring_id);
+
+       if (BNXT_HAS_NQ(bp))
+               bnxt_db_nq(cpr);
+       else
+               bnxt_db_cq(cpr);
+
+       return bnxt_hwrm_set_async_event_cr(bp);
+}
+
+/* Free dedicated async completion ring. */
+void bnxt_free_async_cp_ring(struct bnxt *bp)
+{
+       struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+       if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
+               return;
+
+       if (BNXT_HAS_NQ(bp))
+               bnxt_free_nq_ring(bp, cpr);
+       else
+               bnxt_free_cp_ring(bp, cpr);
+
+       bnxt_free_ring(cpr->cp_ring_struct);
+       rte_free(cpr->cp_ring_struct);
+       cpr->cp_ring_struct = NULL;
+       rte_free(cpr);
+       bp->async_cp_ring = NULL;
+}
+
+int bnxt_alloc_async_ring_struct(struct bnxt *bp)
+{
+       struct bnxt_cp_ring_info *cpr = NULL;
+       struct bnxt_ring *ring = NULL;
+       unsigned int socket_id;
+
+       if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+               return 0;
+
+       socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+
+       cpr = rte_zmalloc_socket("cpr",
+                                sizeof(struct bnxt_cp_ring_info),
+                                RTE_CACHE_LINE_SIZE, socket_id);
+       if (cpr == NULL)
+               return -ENOMEM;
+
+       ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
+                                 sizeof(struct bnxt_ring),
+                                 RTE_CACHE_LINE_SIZE, socket_id);
+       if (ring == NULL) {
+               rte_free(cpr);
+               return -ENOMEM;
+       }
+
+       ring->bd = (void *)cpr->cp_desc_ring;
+       ring->bd_dma = cpr->cp_desc_mapping;
+       ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
+       ring->ring_mask = ring->ring_size - 1;
+       ring->vmem_size = 0;
+       ring->vmem = NULL;
+
+       bp->async_cp_ring = cpr;
+       cpr->cp_ring_struct = ring;
+
+       return bnxt_alloc_rings(bp, 0, NULL, NULL,
+                               bp->async_cp_ring, NULL,
+                               "def_cp");
+}
index e5cef3a..04c7b04 100644 (file)
@@ -75,6 +75,9 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                            const char *suffix);
 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index);
 int bnxt_alloc_hwrm_rings(struct bnxt *bp);
+int bnxt_alloc_async_cp_ring(struct bnxt *bp);
+void bnxt_free_async_cp_ring(struct bnxt *bp);
+int bnxt_alloc_async_ring_struct(struct bnxt *bp);
 
 static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
 {
index e0eb890..1d95f11 100644 (file)
@@ -411,10 +411,11 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
                return -EINVAL;
        }
 
-       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
-
        bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
-       bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
+       rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
+       if (rc)
+               return rc;
+
        PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
 
        if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
@@ -435,8 +436,16 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
                rc = bnxt_vnic_rss_configure(bp, vnic);
        }
 
-       if (rc == 0)
+       if (rc == 0) {
+               dev->data->rx_queue_state[rx_queue_id] =
+                               RTE_ETH_QUEUE_STATE_STARTED;
                rxq->rx_deferred_start = false;
+       }
+
+       PMD_DRV_LOG(INFO,
+                   "queue %d, rx_deferred_start %d, state %d!\n",
+                   rx_queue_id, rxq->rx_deferred_start,
+                   bp->eth_dev->data->rx_queue_state[rx_queue_id]);
 
        return rc;
 }
@@ -449,8 +458,11 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        struct bnxt_rx_queue *rxq = NULL;
        int rc = 0;
 
-       /* Rx CQ 0 also works as Default CQ for async notifications */
-       if (!rx_queue_id) {
+       /* For the stingray platform and other platforms needing tighter
+        * control of resource utilization, Rx CQ 0 also works as
+        * Default CQ for async notifications
+        */
+       if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
                PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
                return -EINVAL;
        }
index 54a2cf5..185a0e3 100644 (file)
@@ -564,7 +564,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                                nb_rx_pkts++;
                        if (rc == -EBUSY)       /* partial completion */
                                break;
-               } else {
+               } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
                        evt =
                        bnxt_event_hwrm_resp_handler(rxq->bp,
                                                     (struct cmpl_base *)rxcmp);
index c358506..adc5020 100644 (file)
@@ -257,7 +257,7 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
                        mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
 
                        rx_pkts[nb_rx_pkts++] = mbuf;
-               } else {
+               } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
                        evt =
                        bnxt_event_hwrm_resp_handler(rxq->bp,
                                                     (struct cmpl_base *)rxcmp);