net/bnxt: fix Rx completion ring size calculation
authorLance Richardson <lance.richardson@broadcom.com>
Fri, 29 Jan 2021 18:07:09 +0000 (13:07 -0500)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 2 Feb 2021 23:48:11 +0000 (00:48 +0100)
The size of the receive completion ring should be recalculated
when MTU is increased to a size that requires scattered receive
or when LRO is enabled. Move logic for this calculation from
the ring configuration path to the device start path.
   - Made size calculation dependent only on scattered_rx
     status.
   - Moved calculation of scattered_rx up in the initialization
     sequence.
   - Made LRO offload status part of scattered_rx calculation.

When the completion ring size is too small, completion overflows
can occur causing the ring to be disabled in hardware.

Fixes: 04067844a3e9 ("net/bnxt: reduce CQ queue size without aggregation ring")
Cc: stable@dpdk.org
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_ring.c
drivers/net/bnxt/bnxt_rxr.c

index 4b76cd5..22c880c 100644 (file)
@@ -1143,6 +1143,9 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
        if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
                return 1;
 
+       if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+               return 1;
+
        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
                struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
 
@@ -1418,11 +1421,12 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 
        bnxt_enable_int(bp);
 
+       eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
+
        rc = bnxt_start_nic(bp);
        if (rc)
                goto error;
 
-       eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
        eth_dev->data->dev_started = 1;
 
        bnxt_link_update_op(eth_dev, 1);
index 4e51324..ba23c1f 100644 (file)
@@ -583,6 +583,17 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
        int rc;
 
+       /*
+        * Storage for the cp ring is allocated based on worst-case
+        * usage, the actual size to be used by hw is computed here.
+        */
+       cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+
+       if (bp->eth_dev->data->scattered_rx)
+               cp_ring->ring_size *= AGG_RING_SIZE_FACTOR;
+
+       cp_ring->ring_mask = cp_ring->ring_size - 1;
+
        rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr);
        if (rc)
                goto err_out;
@@ -693,6 +704,17 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
                struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
                struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 
+               /*
+                * Storage for the cp ring is allocated based on worst-case
+                * usage, the actual size to be used by hw is computed here.
+                */
+               cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+
+               if (bp->eth_dev->data->scattered_rx)
+                       cp_ring->ring_size *= AGG_RING_SIZE_FACTOR;
+
+               cp_ring->ring_mask = cp_ring->ring_size - 1;
+
                if (bnxt_alloc_cmpl_ring(bp, i, cpr))
                        goto err_out;
 
index 8c2781c..4674f7c 100644 (file)
@@ -1116,12 +1116,9 @@ void bnxt_free_rx_rings(struct bnxt *bp)
 
 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
 {
-       struct rte_eth_dev *eth_dev = rxq->bp->eth_dev;
-       struct rte_eth_rxmode *rxmode;
        struct bnxt_cp_ring_info *cpr;
        struct bnxt_rx_ring_info *rxr;
        struct bnxt_ring *ring;
-       bool use_agg_ring;
 
        rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
 
@@ -1164,19 +1161,9 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
                return -ENOMEM;
        cpr->cp_ring_struct = ring;
 
-       rxmode = &eth_dev->data->dev_conf.rxmode;
-       use_agg_ring = (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
-                      (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) ||
-                      (rxmode->max_rx_pkt_len >
-                        (uint32_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
-                                   RTE_PKTMBUF_HEADROOM));
-
        /* Allocate two completion slots per entry in desc ring. */
        ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
-
-       /* Allocate additional slots if aggregation ring is in use. */
-       if (use_agg_ring)
-               ring->ring_size *= AGG_RING_SIZE_FACTOR;
+       ring->ring_size *= AGG_RING_SIZE_FACTOR;
 
        ring->ring_size = rte_align32pow2(ring->ring_size);
        ring->ring_mask = ring->ring_size - 1;