}
rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
- rxq->rx_deferred_start = rx_conf->rx_deferred_start;
- queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED :
- RTE_ETH_QUEUE_STATE_STARTED;
+ /* rxq 0 must not be stopped when used as async CPR */
+ if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
+ rxq->rx_deferred_start = false;
+ else
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+
+ if (rxq->rx_deferred_start) {
+ queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_started = false;
+ } else {
+ queue_state = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_started = true;
+ }
eth_dev->data->rx_queue_state[queue_idx] = queue_state;
rte_spinlock_init(&rxq->lock);
return -EINVAL;
}
+ /* Set the queue state to started here.
+ * We check the status of the queue while posting buffer.
+ * If queue is it started, we do not post buffers for Rx.
+ */
+ rxq->rx_started = true;
bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
if (rc)
vnic->fw_grp_ids[rx_queue_id] =
bp->grp_info[rx_queue_id].fw_grp_id;
+ PMD_DRV_LOG(DEBUG,
+ "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id].fw_grp_id);
}
- PMD_DRV_LOG(DEBUG,
- "vnic = %p fw_grp_id = %d\n",
- vnic, bp->grp_info[rx_queue_id].fw_grp_id);
-
rc = bnxt_vnic_rss_configure(bp, vnic);
}
- if (rc == 0) {
+ if (rc == 0)
dev->data->rx_queue_state[rx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
- rxq->rx_deferred_start = false;
- }
+ else
+ rxq->rx_started = false;
PMD_DRV_LOG(INFO,
"queue %d, rx_deferred_start %d, state %d!\n",
}
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- rxq->rx_deferred_start = true;
+ rxq->rx_started = false;
PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (unlikely(is_bnxt_in_error(rxq->bp)))
return 0;
- /* If Rx Q was stopped return. RxQ0 cannot be stopped. */
- if (unlikely(((rxq->rx_deferred_start ||
- !rte_spinlock_trylock(&rxq->lock)) &&
- rxq->queue_id)))
+ /* If Rx Q was stopped return */
+ if (unlikely(!rxq->rx_started ||
+ !rte_spinlock_trylock(&rxq->lock)))
return 0;
/* Handle RX burst request */