+ rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
+
+ /* rxq 0 must not be stopped when used as async CPR */
+ if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
+ rxq->rx_deferred_start = false;
+ else
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+
+ if (rxq->rx_deferred_start) {
+ queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_started = false;
+ } else {
+ queue_state = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_started = true;
+ }
+ eth_dev->data->rx_queue_state[queue_idx] = queue_state;
+
+ /* Configure mtu if it is different from what was configured before */
+ if (!queue_idx)
+ bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
+
+ return 0;
+err:
+ bnxt_rx_queue_release_op(rxq);
+ return rc;
+}
+
+int
+bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+ int rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_id];
+ if (!rxq)
+ return -EINVAL;
+
+ cpr = rxq->cp_ring;
+ B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+ }
+ return rc;
+}
+
+int
+bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+ int rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_id];
+ if (!rxq)
+ return -EINVAL;
+
+ cpr = rxq->cp_ring;
+ B_CP_DB_DISARM(cpr);
+ }
+ return rc;
+}
+
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+ int rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ /* Set the queue state to started here.
+ * We check the status of the queue while posting buffer.
+ * If queue is it started, we do not post buffers for Rx.
+ */
+ rxq->rx_started = true;
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
+ rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
+ if (rc)
+ return rc;
+
+ if (BNXT_CHIP_P5(bp)) {
+ /* Reconfigure default receive ring and MRU. */
+ bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
+ }
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
+
+ vnic->fw_grp_ids[rx_queue_id] =
+ bp->grp_info[rx_queue_id].fw_grp_id;
+ PMD_DRV_LOG(DEBUG,
+ "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id].fw_grp_id);
+ }
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ if (rc != 0) {
+ dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_started = false;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "queue %d, rx_deferred_start %d, state %d!\n",
+ rx_queue_id, rxq->rx_deferred_start,
+ bp->eth_dev->data->rx_queue_state[rx_queue_id]);
+
+ return rc;
+}
+
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_vnic_info *vnic = NULL;
+ struct bnxt_rx_queue *rxq = NULL;
+ int active_queue_cnt = 0;
+ int i, rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
+ /* For the stingray platform and other platforms needing tighter
+ * control of resource utilization, Rx CQ 0 also works as
+ * Default CQ for async notifications
+ */
+ if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
+ PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ rxq = bp->rx_queues[rx_queue_id];
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ vnic = rxq->vnic;
+ if (!vnic) {
+ PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
+ rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_started = false;
+ PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (BNXT_HAS_RING_GRPS(bp))
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ /* Compute current number of active receive queues. */
+ for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
+ if (bp->rx_queues[i]->rx_started)
+ active_queue_cnt++;
+
+ if (BNXT_CHIP_P5(bp)) {
+ /*
+ * For Thor, we need to ensure that the VNIC default receive
+ * ring corresponds to an active receive queue. When no queue
+ * is active, we need to temporarily set the MRU to zero so
+ * that packets are dropped early in the receive pipeline in
+ * order to prevent the VNIC default receive ring from being
+ * accessed.
+ */
+ if (active_queue_cnt == 0) {
+ uint16_t saved_mru = vnic->mru;
+
+ vnic->mru = 0;
+ /* Reconfigure default receive ring and MRU. */
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ vnic->mru = saved_mru;
+ } else {
+ /* Reconfigure default receive ring. */
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ }
+ } else if (active_queue_cnt) {
+ /*
+ * If the queue being stopped is the current default queue and
+ * there are other active queues, pick one of them as the
+ * default and reconfigure the vnic.
+ */
+ if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
+ for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
+ if (bp->rx_queues[i]->rx_started) {
+ vnic->dflt_ring_grp =
+ bp->grp_info[i].fw_grp_id;
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ break;
+ }
+ }
+ }
+ }
+
+ if (rc == 0)
+ bnxt_rx_queue_release_mbufs(rxq);