The size of the receive completion ring should be recalculated
when MTU is increased to a size that requires scattered receive
or when LRO is enabled. Move logic for this calculation from
the ring configuration path to the device start path.
- Made size calculation dependent only on scattered_rx
status.
- Moved calculation of scattered_rx up in the initialization
sequence.
- Made LRO offload status part of scattered_rx calculation.
When the completion ring size is too small, completion overflows
can occur causing the ring to be disabled in hardware.
Fixes:
04067844a3e9 ("net/bnxt: reduce CQ queue size without aggregation ring")
Cc: stable@dpdk.org
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
return 1;
+ if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ return 1;
+
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
bnxt_enable_int(bp);
+ eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
+
rc = bnxt_start_nic(bp);
if (rc)
goto error;
- eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
eth_dev->data->dev_started = 1;
bnxt_link_update_op(eth_dev, 1);
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
int rc;
+ /*
+ * Storage for the cp ring is allocated based on worst-case
+ * usage, the actual size to be used by hw is computed here.
+ */
+ cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+
+ if (bp->eth_dev->data->scattered_rx)
+ cp_ring->ring_size *= AGG_RING_SIZE_FACTOR;
+
+ cp_ring->ring_mask = cp_ring->ring_size - 1;
+
rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr);
if (rc)
goto err_out;
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ /*
+ * Storage for the cp ring is allocated based on worst-case
+ * usage, the actual size to be used by hw is computed here.
+ */
+ cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+
+ if (bp->eth_dev->data->scattered_rx)
+ cp_ring->ring_size *= AGG_RING_SIZE_FACTOR;
+
+ cp_ring->ring_mask = cp_ring->ring_size - 1;
+
if (bnxt_alloc_cmpl_ring(bp, i, cpr))
goto err_out;
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
{
- struct rte_eth_dev *eth_dev = rxq->bp->eth_dev;
- struct rte_eth_rxmode *rxmode;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring *ring;
- bool use_agg_ring;
rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
return -ENOMEM;
cpr->cp_ring_struct = ring;
- rxmode = ð_dev->data->dev_conf.rxmode;
- use_agg_ring = (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
- (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) ||
- (rxmode->max_rx_pkt_len >
- (uint32_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
- RTE_PKTMBUF_HEADROOM));
-
/* Allocate two completion slots per entry in desc ring. */
ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
-
- /* Allocate additional slots if aggregation ring is in use. */
- if (use_agg_ring)
- ring->ring_size *= AGG_RING_SIZE_FACTOR;
+ ring->ring_size *= AGG_RING_SIZE_FACTOR;
ring->ring_size = rte_align32pow2(ring->ring_size);
ring->ring_mask = ring->ring_size - 1;