+
+static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
+{
+ unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->ag_ring_struct;
+ uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ uint8_t ring_type;
+ int rc = 0;
+
+ ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
+
+ if (BNXT_CHIP_THOR(bp)) {
+ ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
+ hw_stats_ctx_id = cpr->hw_stats_ctx_id;
+ } else {
+ ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
+ }
+
+ rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
+ hw_stats_ctx_id, cp_ring->fw_ring_id, 0);
+
+ if (rc)
+ return rc;
+
+ rxr->ag_prod = 0;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
+ bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);
+ bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
+
+ return 0;
+}
+
+int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
+{
+ struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int rc;
+
+ rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr);
+ if (rc)
+ goto err_out;
+
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
+ bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
+ }
+
+ if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
+ /*
+ * If a dedicated async event completion ring is not enabled,
+ * use the first completion ring from PF or VF as the default
+ * completion ring for async event handling.
+ */
+ bp->async_cp_ring = cpr;
+ rc = bnxt_hwrm_set_async_event_cr(bp);
+ if (rc)
+ goto err_out;
+ }
+
+ rc = bnxt_alloc_rx_ring(bp, queue_index);
+ if (rc)
+ goto err_out;
+
+ rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
+ if (rc)
+ goto err_out;
+
+ if (rxq->rx_started) {
+ if (bnxt_init_one_rx_ring(rxq)) {
+ PMD_DRV_LOG(ERR,
+ "bnxt_init_one_rx_ring failed!\n");
+ bnxt_rx_queue_release_op(rxq);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
+ bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
+ }
+ rxq->index = queue_index;
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ bnxt_rxq_vec_setup(rxq);
+#endif
+
+ return 0;
+
+err_out:
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate receive queue %d, rc %d.\n",
+ queue_index, rc);
+ return rc;
+}
+
+/* Initialise all rings to -1, its used to free rings later if allocation
+ * of few rings fails.
+ */
+static void bnxt_init_all_rings(struct bnxt *bp)
+{
+ unsigned int i = 0;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_ring *cp_ring;
+ struct bnxt_ring *ring;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_tx_queue *txq;
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ rxq = bp->rx_queues[i];
+ /* Rx-compl */
+ cp_ring = rxq->cp_ring->cp_ring_struct;
+ cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+ /* Rx-Reg */
+ rxr = rxq->rx_ring;
+ ring = rxr->rx_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ /* Rx-AGG */
+ ring = rxr->ag_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ txq = bp->tx_queues[i];
+ /* Tx cmpl */
+ cp_ring = txq->cp_ring->cp_ring_struct;
+ cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+ /*Tx Ring */
+ ring = txq->tx_ring->tx_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+}
+
+/* ring_grp usage:
+ * [0] = default completion ring
+ * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
+ * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
+ */
+int bnxt_alloc_hwrm_rings(struct bnxt *bp)
+{
+ struct bnxt_coal coal;
+ unsigned int i;
+ uint8_t ring_type;
+ int rc = 0;
+
+ bnxt_init_dflt_coal(&coal);
+ bnxt_init_all_rings(bp);
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+
+ if (bnxt_alloc_cmpl_ring(bp, i, cpr))
+ goto err_out;
+
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+ bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
+ }
+
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
+ if (!BNXT_NUM_ASYNC_CPR(bp) && !i) {
+ /*
+ * If a dedicated async event completion ring is not
+ * enabled, use the first completion ring as the default
+ * completion ring for async event handling.
+ */
+ bp->async_cp_ring = cpr;
+ rc = bnxt_hwrm_set_async_event_cr(bp);
+ if (rc)
+ goto err_out;
+ }
+
+ if (bnxt_alloc_rx_ring(bp, i))
+ goto err_out;
+
+ if (bnxt_alloc_rx_agg_ring(bp, i))
+ goto err_out;
+
+ if (bnxt_init_one_rx_ring(rxq)) {
+ PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
+ bnxt_rx_queue_release_op(rxq);
+ return -ENOMEM;
+ }
+ bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
+ bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
+ rxq->index = i;
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ bnxt_rxq_vec_setup(rxq);
+#endif
+ }
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ unsigned int idx = i + bp->rx_cp_nr_rings;
+ uint16_t tx_cosq_id = 0;
+
+ if (bnxt_alloc_cmpl_ring(bp, idx, cpr))
+ goto err_out;
+
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
+ tx_cosq_id = bp->tx_cosq_id[i < bp->max_lltc ? i : 0];
+ else
+ tx_cosq_id = bp->tx_cosq_id[0];
+ /* Tx ring */
+ ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX;
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ ring_type,
+ i, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id,
+ tx_cosq_id);
+ if (rc)
+ goto err_out;
+
+ bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id);
+ txq->index = idx;
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
+ }
+
+err_out:
+ return rc;
+}
+
+/* Allocate dedicated async completion ring. */
+int bnxt_alloc_async_cp_ring(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+ struct bnxt_ring *cp_ring;
+ uint8_t ring_type;
+ int rc;
+
+ if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
+ return 0;
+
+ cp_ring = cpr->cp_ring_struct;
+
+ if (BNXT_HAS_NQ(bp))
+ ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
+ else
+ ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
+
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
+ HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
+
+ if (rc)
+ return rc;
+
+ cpr->cp_cons = 0;
+ cpr->valid = 0;
+ bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
+ cp_ring->fw_ring_id);
+
+ if (BNXT_HAS_NQ(bp))
+ bnxt_db_nq(cpr);
+ else
+ bnxt_db_cq(cpr);
+
+ return bnxt_hwrm_set_async_event_cr(bp);
+}
+
+/* Free dedicated async completion ring. */
+void bnxt_free_async_cp_ring(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+ if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
+ return;
+
+ if (BNXT_HAS_NQ(bp))
+ bnxt_free_nq_ring(bp, cpr);
+ else
+ bnxt_free_cp_ring(bp, cpr);
+
+ bnxt_free_ring(cpr->cp_ring_struct);
+ rte_free(cpr->cp_ring_struct);
+ cpr->cp_ring_struct = NULL;
+ rte_free(cpr);
+ bp->async_cp_ring = NULL;
+}
+
+int bnxt_alloc_async_ring_struct(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = NULL;
+ struct bnxt_ring *ring = NULL;
+ unsigned int socket_id;
+
+ if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+ return 0;
+
+ socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+
+ cpr = rte_zmalloc_socket("cpr",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+
+ ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL) {
+ rte_free(cpr);
+ return -ENOMEM;
+ }
+
+ ring->bd = (void *)cpr->cp_desc_ring;
+ ring->bd_dma = cpr->cp_desc_mapping;
+ ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+
+ bp->async_cp_ring = cpr;
+ cpr->cp_ring_struct = ring;
+
+ return bnxt_alloc_rings(bp, 0, NULL, NULL,
+ bp->async_cp_ring, NULL,
+ "def_cp");
+}