/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
* All rights reserved.
*/
* Ring groups
*/
-int bnxt_init_ring_grps(struct bnxt *bp)
+static void bnxt_init_ring_grps(struct bnxt *bp)
{
unsigned int i;
for (i = 0; i < bp->max_ring_grps; i++)
memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
sizeof(struct bnxt_ring_grp_info));
-
- return 0;
}
int bnxt_alloc_ring_grps(struct bnxt *bp)
/* THOR does not support ring groups.
* But we will use the array to save RSS context IDs.
*/
- if (BNXT_CHIP_THOR(bp)) {
- bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
+ if (BNXT_CHIP_P5(bp)) {
+ bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
} else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
"Failed to alloc grp info tbl.\n");
return -ENOMEM;
}
+ bnxt_init_ring_grps(bp);
}
return 0;
uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
- rte_iova_t mz_phys_addr_base;
rte_iova_t mz_phys_addr;
- int sz;
int stats_len = (tx_ring_info || rx_ring_info) ?
RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
total_alloc_len += tpa_info_len;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
- "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
+ "bnxt_" PCI_PRI_FMT "-%04x_%s", pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
suffix);
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr_base = mz->iova;
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr_base) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- for (sz = 0; sz < total_alloc_len; sz += getpagesize())
- rte_mem_lock_page(((char *)mz->addr) + sz);
- mz_phys_addr_base = rte_mem_virt2iova(mz->addr);
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map ring address to physical memory\n");
- return -ENOMEM;
- }
- }
if (tx_ring_info) {
txq->mz = mz;
rx_ring->vmem =
(void **)((char *)mz->addr + rx_vmem_start);
rx_ring_info->rx_buf_ring =
- (struct bnxt_sw_rx_bd *)rx_ring->vmem;
+ (struct rte_mbuf **)rx_ring->vmem;
}
rx_ring = rx_ring_info->ag_ring_struct;
rx_ring->vmem =
(void **)((char *)mz->addr + ag_vmem_start);
rx_ring_info->ag_buf_ring =
- (struct bnxt_sw_rx_bd *)rx_ring->vmem;
+ (struct rte_mbuf **)rx_ring->vmem;
}
rx_ring_info->ag_bitmap =
struct bnxt_db_info *db,
uint32_t ring_type,
uint32_t map_idx,
- uint32_t fid)
+ uint32_t fid,
+ uint32_t ring_mask)
{
- if (BNXT_CHIP_THOR(bp)) {
- if (BNXT_PF(bp))
- db->doorbell = (char *)bp->doorbell_base + 0x10000;
- else
- db->doorbell = (char *)bp->doorbell_base + 0x4000;
+ if (BNXT_CHIP_P5(bp)) {
+ int db_offset = DB_PF_OFFSET;
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
db->db_key64 = DBR_PATH_L2;
break;
}
+ if (BNXT_CHIP_SR2(bp)) {
+ db->db_key64 |= DBR_VALID;
+ db_offset = bp->legacy_db_size;
+ } else if (BNXT_VF(bp)) {
+ db_offset = DB_VF_OFFSET;
+ }
+
+ db->doorbell = (char *)bp->doorbell_base + db_offset;
db->db_key64 |= (uint64_t)fid << DBR_XID_SFT;
db->db_64 = true;
} else {
}
db->db_64 = false;
}
+ db->db_ring_mask = ring_mask;
+
+ if (BNXT_CHIP_SR2(bp)) {
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT -
+ rte_log2_u32(db->db_epoch_mask);
+ }
}
static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
if (rc)
return rc;
- cpr->cp_cons = 0;
+ cpr->cp_raw_cons = 0;
bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
- cp_ring->fw_ring_id);
+ cp_ring->fw_ring_id, cp_ring->ring_mask);
bnxt_db_cq(cpr);
return 0;
if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
return 0;
- socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+ socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
nqr = rte_zmalloc_socket("nqr",
sizeof(struct bnxt_cp_ring_info),
ring->ring_mask = ring->ring_size - 1;
ring->vmem_size = 0;
ring->vmem = NULL;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
nqr->cp_ring_struct = ring;
rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr");
}
bnxt_set_db(bp, &nqr->cp_db, ring_type, ring_index,
- ring->fw_ring_id);
+ ring->fw_ring_id, ring->ring_mask);
bnxt_db_nq(nqr);
bp->rxtx_nq_ring = nqr;
if (rc)
return rc;
- rxr->rx_prod = 0;
+ rxr->rx_raw_prod = 0;
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
- bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id);
- bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
+ bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id,
+ ring->ring_mask);
+ bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
return 0;
}
ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
hw_stats_ctx_id = cpr->hw_stats_ctx_id;
} else {
if (rc)
return rc;
- rxr->ag_prod = 0;
+ rxr->ag_raw_prod = 0;
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
- bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);
- bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
+ bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id,
+ ring->ring_mask);
+ bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
return 0;
}
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
int rc;
+ /*
+ * Storage for the cp ring is allocated based on worst-case
+ * usage, the actual size to be used by hw is computed here.
+ */
+ cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+
+ if (bp->eth_dev->data->scattered_rx)
+ cp_ring->ring_size *= AGG_RING_SIZE_FACTOR;
+
+ cp_ring->ring_mask = cp_ring->ring_size - 1;
+
rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr);
if (rc)
goto err_out;
if (rxq->rx_started) {
if (bnxt_init_one_rx_ring(rxq)) {
- RTE_LOG(ERR, PMD,
- "bnxt_init_one_rx_ring failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
goto err_out;
}
- bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
- bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
+ bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
+ bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
}
rxq->index = queue_index;
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
bnxt_rxq_vec_setup(rxq);
#endif
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ /*
+ * Storage for the cp ring is allocated based on worst-case
+ * usage, the actual size to be used by hw is computed here.
+ */
+ cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+
+ if (bp->eth_dev->data->scattered_rx)
+ cp_ring->ring_size *= AGG_RING_SIZE_FACTOR;
+
+ cp_ring->ring_mask = cp_ring->ring_size - 1;
+
if (bnxt_alloc_cmpl_ring(bp, i, cpr))
goto err_out;
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}
- bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
- bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
+ bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
+ bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
rxq->index = i;
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
bnxt_rxq_vec_setup(rxq);
#endif
}
if (rc)
goto err_out;
- bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id);
+ bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id,
+ ring->ring_mask);
txq->index = idx;
bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
}
if (rc)
return rc;
- cpr->cp_cons = 0;
+ cpr->cp_raw_cons = 0;
cpr->valid = 0;
bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
- cp_ring->fw_ring_id);
+ cp_ring->fw_ring_id, cp_ring->ring_mask);
if (BNXT_HAS_NQ(bp))
bnxt_db_nq(cpr);
if (BNXT_NUM_ASYNC_CPR(bp) == 0)
return 0;
- socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+ socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
cpr = rte_zmalloc_socket("cpr",
sizeof(struct bnxt_cp_ring_info),