net/bnxt: create aggregation rings when needed
authorAjit Khaparde <ajit.khaparde@broadcom.com>
Fri, 14 May 2021 01:46:21 +0000 (18:46 -0700)
committerAjit Khaparde <ajit.khaparde@broadcom.com>
Tue, 12 Oct 2021 20:35:13 +0000 (22:35 +0200)
Aggregation rings are needed when PMD needs to support jumbo frames, LRO.
Currently we are creating the aggregation rings whether jumbo frames or
LRO has been enabled or disabled. This causes unnecessary allocation of
mbufs needing larger mbuf pool which is not used at all.

This patch modifies the code to create aggregation rings only when
needed.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_ring.c
drivers/net/bnxt/bnxt_rxq.c
drivers/net/bnxt/bnxt_rxq.h
drivers/net/bnxt/bnxt_rxr.c

index 503add4..181e607 100644 (file)
@@ -2741,6 +2741,14 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
        if (BNXT_HAS_RING_GRPS(bp))
                bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
 
+       /* Check agg ring struct explicitly.
+        * bnxt_need_agg_ring() returns the current state of offload flags,
+        * but we may have to deal with agg ring struct before the offload
+        * flags are updated.
+        */
+       if (!bnxt_need_agg_ring(bp->eth_dev) || rxr->ag_ring_struct == NULL)
+               goto no_agg;
+
        ring = rxr->ag_ring_struct;
        bnxt_hwrm_ring_free(bp, ring,
                            BNXT_CHIP_P5(bp) ?
@@ -2750,6 +2758,7 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
        if (BNXT_HAS_RING_GRPS(bp))
                bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
 
+no_agg:
        bnxt_hwrm_stat_ctx_free(bp, cpr);
 
        bnxt_free_cp_ring(bp, cpr);
index aaad08e..08cefa1 100644 (file)
@@ -104,13 +104,19 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
        struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
        struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
        struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
-       struct bnxt_ring *tx_ring;
-       struct bnxt_ring *rx_ring;
-       struct rte_pci_device *pdev = bp->pdev;
        uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
+       int ag_ring_start, ag_bitmap_start, tpa_info_start;
+       int ag_vmem_start, cp_ring_start, nq_ring_start;
+       int total_alloc_len, rx_ring_start, rx_ring_len;
+       struct rte_pci_device *pdev = bp->pdev;
+       struct bnxt_ring *tx_ring, *rx_ring;
        const struct rte_memzone *mz = NULL;
        char mz_name[RTE_MEMZONE_NAMESIZE];
        rte_iova_t mz_phys_addr;
+       int ag_bitmap_len =  0;
+       int tpa_info_len = 0;
+       int ag_vmem_len = 0;
+       int ag_ring_len = 0;
 
        int stats_len = (tx_ring_info || rx_ring_info) ?
            RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
@@ -138,14 +144,12 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
                RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
                                                rx_ring_struct->vmem_size) : 0;
        rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);
-       int ag_vmem_start = 0;
-       int ag_vmem_len = 0;
-       int cp_ring_start =  0;
-       int nq_ring_start = 0;
 
        ag_vmem_start = rx_vmem_start + rx_vmem_len;
-       ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
-                               rx_ring_info->ag_ring_struct->vmem_size) : 0;
+       if (bnxt_need_agg_ring(bp->eth_dev))
+               ag_vmem_len = rx_ring_info && rx_ring_info->ag_ring_struct ?
+                       RTE_CACHE_LINE_ROUNDUP(rx_ring_info->ag_ring_struct->vmem_size) : 0;
+
        cp_ring_start = ag_vmem_start + ag_vmem_len;
        cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
 
@@ -164,36 +168,36 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
                                   sizeof(struct tx_bd_long)) : 0;
        tx_ring_len = RTE_ALIGN(tx_ring_len, 4096);
 
-       int rx_ring_start = tx_ring_start + tx_ring_len;
+       rx_ring_start = tx_ring_start + tx_ring_len;
        rx_ring_start = RTE_ALIGN(rx_ring_start, 4096);
-       int rx_ring_len =  rx_ring_info ?
+       rx_ring_len =  rx_ring_info ?
                RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
                sizeof(struct rx_prod_pkt_bd)) : 0;
        rx_ring_len = RTE_ALIGN(rx_ring_len, 4096);
 
-       int ag_ring_start = rx_ring_start + rx_ring_len;
+       ag_ring_start = rx_ring_start + rx_ring_len;
        ag_ring_start = RTE_ALIGN(ag_ring_start, 4096);
-       int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
-       ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
 
-       int ag_bitmap_start = ag_ring_start + ag_ring_len;
-       int ag_bitmap_len =  rx_ring_info ?
+       if (bnxt_need_agg_ring(bp->eth_dev)) {
+               ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
+               ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
+
+               ag_bitmap_len =  rx_ring_info ?
                RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
                        rx_ring_info->rx_ring_struct->ring_size *
                        AGG_RING_SIZE_FACTOR)) : 0;
 
-       int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
-       int tpa_info_len = 0;
-
-       if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
-               int tpa_max = BNXT_TPA_MAX_AGGS(bp);
+               if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+                       int tpa_max = BNXT_TPA_MAX_AGGS(bp);
 
-               tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
-               tpa_info_len = RTE_CACHE_LINE_ROUNDUP(tpa_info_len);
+                       tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
+                       tpa_info_len = RTE_CACHE_LINE_ROUNDUP(tpa_info_len);
+               }
        }
 
-       int total_alloc_len = tpa_info_start;
-       total_alloc_len += tpa_info_len;
+       ag_bitmap_start = ag_ring_start + ag_ring_len;
+       tpa_info_start = ag_bitmap_start + ag_bitmap_len;
+       total_alloc_len = tpa_info_start + tpa_info_len;
 
        snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
                 "bnxt_" PCI_PRI_FMT "-%04x_%s", pdev->addr.domain,
@@ -254,34 +258,36 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
                            (struct rte_mbuf **)rx_ring->vmem;
                }
 
-               rx_ring = rx_ring_info->ag_ring_struct;
-
-               rx_ring->bd = ((char *)mz->addr + ag_ring_start);
-               rx_ring_info->ag_desc_ring =
-                   (struct rx_prod_pkt_bd *)rx_ring->bd;
-               rx_ring->bd_dma = mz->iova + ag_ring_start;
-               rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
-               rx_ring->mem_zone = (const void *)mz;
-
-               if (!rx_ring->bd)
-                       return -ENOMEM;
-               if (rx_ring->vmem_size) {
-                       rx_ring->vmem =
-                           (void **)((char *)mz->addr + ag_vmem_start);
-                       rx_ring_info->ag_buf_ring =
-                           (struct rte_mbuf **)rx_ring->vmem;
+               if (bnxt_need_agg_ring(bp->eth_dev)) {
+                       rx_ring = rx_ring_info->ag_ring_struct;
+
+                       rx_ring->bd = ((char *)mz->addr + ag_ring_start);
+                       rx_ring_info->ag_desc_ring =
+                           (struct rx_prod_pkt_bd *)rx_ring->bd;
+                       rx_ring->bd_dma = mz->iova + ag_ring_start;
+                       rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
+                       rx_ring->mem_zone = (const void *)mz;
+
+                       if (!rx_ring->bd)
+                               return -ENOMEM;
+                       if (rx_ring->vmem_size) {
+                               rx_ring->vmem =
+                                   (void **)((char *)mz->addr + ag_vmem_start);
+                               rx_ring_info->ag_buf_ring =
+                                   (struct rte_mbuf **)rx_ring->vmem;
+                       }
+
+                       rx_ring_info->ag_bitmap =
+                           rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
+                                           AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
+                                           ag_bitmap_start, ag_bitmap_len);
+
+                       /* TPA info */
+                       if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+                               rx_ring_info->tpa_info =
+                                       ((struct bnxt_tpa_info *)
+                                        ((char *)mz->addr + tpa_info_start));
                }
-
-               rx_ring_info->ag_bitmap =
-                   rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
-                                   AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
-                                   ag_bitmap_start, ag_bitmap_len);
-
-               /* TPA info */
-               if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
-                       rx_ring_info->tpa_info =
-                               ((struct bnxt_tpa_info *)((char *)mz->addr +
-                                                         tpa_info_start));
        }
 
        cp_ring->bd = ((char *)mz->addr + cp_ring_start);
@@ -550,6 +556,9 @@ static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
        uint8_t ring_type;
        int rc = 0;
 
+       if (!bnxt_need_agg_ring(bp->eth_dev))
+               return 0;
+
        ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
 
        if (BNXT_CHIP_P5(bp)) {
@@ -590,7 +599,7 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
         */
        cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
 
-       if (bp->eth_dev->data->scattered_rx)
+       if (bnxt_need_agg_ring(bp->eth_dev))
                cp_ring->ring_size *= AGG_RING_SIZE_FACTOR;
 
        cp_ring->ring_mask = cp_ring->ring_size - 1;
@@ -645,7 +654,8 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
                        goto err_out;
                }
                bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
-               bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
+               if (bnxt_need_agg_ring(bp->eth_dev))
+                       bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
        }
        rxq->index = queue_index;
 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
@@ -683,8 +693,11 @@ static void bnxt_init_all_rings(struct bnxt *bp)
                ring = rxr->rx_ring_struct;
                ring->fw_ring_id = INVALID_HW_RING_ID;
                /* Rx-AGG */
-               ring = rxr->ag_ring_struct;
-               ring->fw_ring_id = INVALID_HW_RING_ID;
+               if (bnxt_need_agg_ring(bp->eth_dev)) {
+                       ring = rxr->ag_ring_struct;
+                       if (ring != NULL)
+                               ring->fw_ring_id = INVALID_HW_RING_ID;
+               }
        }
        for (i = 0; i < bp->tx_cp_nr_rings; i++) {
                txq = bp->tx_queues[i];
@@ -712,6 +725,29 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
        bnxt_init_all_rings(bp);
 
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+               unsigned int soc_id = bp->eth_dev->device->numa_node;
+               struct bnxt_rx_queue *rxq  = bp->rx_queues[i];
+               struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+               struct bnxt_ring *ring;
+
+               if (bnxt_need_agg_ring(bp->eth_dev)) {
+                       ring = rxr->ag_ring_struct;
+                       if (ring == NULL) {
+                               bnxt_free_rxq_mem(rxq);
+
+                               rc = bnxt_init_rx_ring_struct(rxq, soc_id);
+                               if (rc)
+                                       goto err_out;
+
+                               rc = bnxt_alloc_rings(bp, soc_id,
+                                                     i, NULL, rxq,
+                                                     rxq->cp_ring, NULL,
+                                                     "rxr");
+                               if (rc)
+                                       goto err_out;
+                       }
+               }
+
                rc = bnxt_alloc_hwrm_rx_ring(bp, i);
                if (rc)
                        goto err_out;
index 2eb7a3c..38ec4aa 100644 (file)
  * RX Queues
  */
 
+/* Determine whether the current configuration needs aggregation ring in HW. */
+int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
+{
+       /* scattered_rx will be true if OFFLOAD_SCATTER is enabled,
+        * if LRO is enabled, or if the max packet len is greater than the
+        * mbuf data size. So AGG ring will be needed whenever scattered_rx
+        * is set.
+        */
+       return eth_dev->data->scattered_rx ? 1 : 0;
+}
+
 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
 {
        if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
@@ -203,6 +214,9 @@ void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
                }
        }
        /* Free up mbufs in Agg ring */
+       if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
+               return;
+
        sw_ring = rxq->rx_ring->ag_buf_ring;
        if (sw_ring) {
                for (i = 0;
@@ -240,40 +254,48 @@ void bnxt_free_rx_mbufs(struct bnxt *bp)
        }
 }
 
+void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq)
+{
+       bnxt_rx_queue_release_mbufs(rxq);
+
+       /* Free RX, AGG ring hardware descriptors */
+       if (rxq->rx_ring) {
+               bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+               rte_free(rxq->rx_ring->rx_ring_struct);
+               rxq->rx_ring->rx_ring_struct = NULL;
+               /* Free RX Agg ring hardware descriptors */
+               bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+               rte_free(rxq->rx_ring->ag_ring_struct);
+               rxq->rx_ring->ag_ring_struct = NULL;
+
+               rte_free(rxq->rx_ring);
+               rxq->rx_ring = NULL;
+       }
+       /* Free RX completion ring hardware descriptors */
+       if (rxq->cp_ring) {
+               bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+               rte_free(rxq->cp_ring->cp_ring_struct);
+               rxq->cp_ring->cp_ring_struct = NULL;
+               rte_free(rxq->cp_ring);
+               rxq->cp_ring = NULL;
+       }
+
+       bnxt_free_rxq_stats(rxq);
+       rte_memzone_free(rxq->mz);
+       rxq->mz = NULL;
+}
+
 void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
        struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
 
-       if (rxq) {
+       if (rxq != NULL) {
                if (is_bnxt_in_error(rxq->bp))
                        return;
 
                bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id);
-               bnxt_rx_queue_release_mbufs(rxq);
-
-               /* Free RX ring hardware descriptors */
-               if (rxq->rx_ring) {
-                       bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
-                       rte_free(rxq->rx_ring->rx_ring_struct);
-                       /* Free RX Agg ring hardware descriptors */
-                       bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
-                       rte_free(rxq->rx_ring->ag_ring_struct);
-
-                       rte_free(rxq->rx_ring);
-               }
-               /* Free RX completion ring hardware descriptors */
-               if (rxq->cp_ring) {
-                       bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
-                       rte_free(rxq->cp_ring->cp_ring_struct);
-                       rte_free(rxq->cp_ring);
-               }
-
-               bnxt_free_rxq_stats(rxq);
-               rte_memzone_free(rxq->mz);
-               rxq->mz = NULL;
-
+               bnxt_free_rxq_mem(rxq);
                rte_free(rxq);
-               dev->data->rx_queues[queue_idx] = NULL;
        }
 }
 
index 9bb9352..0331c23 100644 (file)
@@ -63,4 +63,6 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev,
 int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
                       uint16_t rx_queue_id);
 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq);
+int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev);
+void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq);
 #endif
index 4c1ee42..aeacc60 100644 (file)
@@ -1223,57 +1223,75 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
 
        rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
 
-       rxr = rte_zmalloc_socket("bnxt_rx_ring",
-                                sizeof(struct bnxt_rx_ring_info),
-                                RTE_CACHE_LINE_SIZE, socket_id);
-       if (rxr == NULL)
-               return -ENOMEM;
-       rxq->rx_ring = rxr;
-
-       ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
-                                  sizeof(struct bnxt_ring),
-                                  RTE_CACHE_LINE_SIZE, socket_id);
-       if (ring == NULL)
-               return -ENOMEM;
-       rxr->rx_ring_struct = ring;
-       ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
-       ring->ring_mask = ring->ring_size - 1;
-       ring->bd = (void *)rxr->rx_desc_ring;
-       ring->bd_dma = rxr->rx_desc_mapping;
-
-       /* Allocate extra rx ring entries for vector rx. */
-       ring->vmem_size = sizeof(struct rte_mbuf *) *
-                         (ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES);
+       if (rxq->rx_ring != NULL) {
+               rxr = rxq->rx_ring;
+       } else {
 
-       ring->vmem = (void **)&rxr->rx_buf_ring;
-       ring->fw_ring_id = INVALID_HW_RING_ID;
+               rxr = rte_zmalloc_socket("bnxt_rx_ring",
+                                        sizeof(struct bnxt_rx_ring_info),
+                                        RTE_CACHE_LINE_SIZE, socket_id);
+               if (rxr == NULL)
+                       return -ENOMEM;
+               rxq->rx_ring = rxr;
+       }
 
-       cpr = rte_zmalloc_socket("bnxt_rx_ring",
-                                sizeof(struct bnxt_cp_ring_info),
-                                RTE_CACHE_LINE_SIZE, socket_id);
-       if (cpr == NULL)
-               return -ENOMEM;
-       rxq->cp_ring = cpr;
+       if (rxr->rx_ring_struct == NULL) {
+               ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+                                          sizeof(struct bnxt_ring),
+                                          RTE_CACHE_LINE_SIZE, socket_id);
+               if (ring == NULL)
+                       return -ENOMEM;
+               rxr->rx_ring_struct = ring;
+               ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
+               ring->ring_mask = ring->ring_size - 1;
+               ring->bd = (void *)rxr->rx_desc_ring;
+               ring->bd_dma = rxr->rx_desc_mapping;
+
+               /* Allocate extra rx ring entries for vector rx. */
+               ring->vmem_size = sizeof(struct rte_mbuf *) *
+                                 (ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES);
+
+               ring->vmem = (void **)&rxr->rx_buf_ring;
+               ring->fw_ring_id = INVALID_HW_RING_ID;
+       }
 
-       ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
-                                  sizeof(struct bnxt_ring),
-                                  RTE_CACHE_LINE_SIZE, socket_id);
-       if (ring == NULL)
-               return -ENOMEM;
-       cpr->cp_ring_struct = ring;
+       if (rxq->cp_ring != NULL) {
+               cpr = rxq->cp_ring;
+       } else {
+               cpr = rte_zmalloc_socket("bnxt_rx_ring",
+                                        sizeof(struct bnxt_cp_ring_info),
+                                        RTE_CACHE_LINE_SIZE, socket_id);
+               if (cpr == NULL)
+                       return -ENOMEM;
+               rxq->cp_ring = cpr;
+       }
 
-       /* Allocate two completion slots per entry in desc ring. */
-       ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
-       ring->ring_size *= AGG_RING_SIZE_FACTOR;
+       if (cpr->cp_ring_struct == NULL) {
+               ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+                                          sizeof(struct bnxt_ring),
+                                          RTE_CACHE_LINE_SIZE, socket_id);
+               if (ring == NULL)
+                       return -ENOMEM;
+               cpr->cp_ring_struct = ring;
+
+               /* Allocate two completion slots per entry in desc ring. */
+               ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+               if (bnxt_need_agg_ring(rxq->bp->eth_dev))
+                       ring->ring_size *= AGG_RING_SIZE_FACTOR;
+
+               ring->ring_size = rte_align32pow2(ring->ring_size);
+               ring->ring_mask = ring->ring_size - 1;
+               ring->bd = (void *)cpr->cp_desc_ring;
+               ring->bd_dma = cpr->cp_desc_mapping;
+               ring->vmem_size = 0;
+               ring->vmem = NULL;
+               ring->fw_ring_id = INVALID_HW_RING_ID;
+       }
 
-       ring->ring_size = rte_align32pow2(ring->ring_size);
-       ring->ring_mask = ring->ring_size - 1;
-       ring->bd = (void *)cpr->cp_desc_ring;
-       ring->bd_dma = cpr->cp_desc_mapping;
-       ring->vmem_size = 0;
-       ring->vmem = NULL;
-       ring->fw_ring_id = INVALID_HW_RING_ID;
+       if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
+               return 0;
 
+       rxr = rxq->rx_ring;
        /* Allocate Aggregator rings */
        ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
                                   sizeof(struct bnxt_ring),
@@ -1351,6 +1369,9 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
                rxr->rx_buf_ring[i] = &rxq->fake_mbuf;
        }
 
+       if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
+               return 0;
+
        ring = rxr->ag_ring_struct;
        type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
        bnxt_init_rxbds(ring, type, size);