Refactor code to remove references to Thor.
Instead use P5 as in phase 5 of development cycle since it is applicable
to boards other than Thor as well.
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Lance Richardson <lance.richardson@broadcom.com>
#define TPA_MAX_SEGS 5 /* 32 segments in log2 units */
#define BNXT_TPA_MAX_AGGS(bp) \
- (BNXT_CHIP_THOR(bp) ? TPA_MAX_AGGS_TH : \
+ (BNXT_CHIP_P5(bp) ? TPA_MAX_AGGS_TH : \
TPA_MAX_AGGS)
#define BNXT_TPA_MAX_SEGS(bp) \
- (BNXT_CHIP_THOR(bp) ? TPA_MAX_SEGS_TH : \
+ (BNXT_CHIP_P5(bp) ? TPA_MAX_SEGS_TH : \
TPA_MAX_SEGS)
/*
#define DBR_TYPE_NQ (0xaULL << 60)
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
-#define BNXT_RSS_TBL_SIZE_THOR 512U
-#define BNXT_RSS_ENTRIES_PER_CTX_THOR 64
-#define BNXT_MAX_RSS_CTXTS_THOR \
- (BNXT_RSS_TBL_SIZE_THOR / BNXT_RSS_ENTRIES_PER_CTX_THOR)
+#define BNXT_RSS_TBL_SIZE_P5 512U
+#define BNXT_RSS_ENTRIES_PER_CTX_P5 64
+#define BNXT_MAX_RSS_CTXTS_P5 \
+ (BNXT_RSS_TBL_SIZE_P5 / BNXT_RSS_ENTRIES_PER_CTX_P5)
#define BNXT_MAX_TC 8
#define BNXT_MAX_QUEUE 8
#define BNXT_FLAG_KONG_MB_EN BIT(10)
#define BNXT_FLAG_TRUSTED_VF_EN BIT(11)
#define BNXT_FLAG_DFLT_VNIC_SET BIT(12)
-#define BNXT_FLAG_THOR_CHIP BIT(13)
+#define BNXT_FLAG_CHIP_P5 BIT(13)
#define BNXT_FLAG_STINGRAY BIT(14)
#define BNXT_FLAG_FW_RESET BIT(15)
#define BNXT_FLAG_FATAL_ERROR BIT(16)
#define BNXT_USE_CHIMP_MB 0 //For non-CFA commands, everything uses Chimp.
#define BNXT_USE_KONG(bp) ((bp)->flags & BNXT_FLAG_KONG_MB_EN)
#define BNXT_VF_IS_TRUSTED(bp) ((bp)->flags & BNXT_FLAG_TRUSTED_VF_EN)
-#define BNXT_CHIP_THOR(bp) ((bp)->flags & BNXT_FLAG_THOR_CHIP)
+#define BNXT_CHIP_P5(bp) ((bp)->flags & BNXT_FLAG_CHIP_P5)
#define BNXT_STINGRAY(bp) ((bp)->flags & BNXT_FLAG_STINGRAY)
-#define BNXT_HAS_NQ(bp) BNXT_CHIP_THOR(bp)
-#define BNXT_HAS_RING_GRPS(bp) (!BNXT_CHIP_THOR(bp))
+#define BNXT_HAS_NQ(bp) BNXT_CHIP_P5(bp)
+#define BNXT_HAS_RING_GRPS(bp) (!BNXT_CHIP_P5(bp))
#define BNXT_FLOW_XSTATS_EN(bp) ((bp)->flags & BNXT_FLAG_FLOW_XSTATS_EN)
#define BNXT_HAS_DFLT_MAC_SET(bp) ((bp)->flags & BNXT_FLAG_DFLT_MAC_SET)
#define BNXT_TRUFLOW_EN(bp) ((bp)->flags & BNXT_FLAG_TRUFLOW_EN)
static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
{
unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings,
- BNXT_RSS_TBL_SIZE_THOR);
+ BNXT_RSS_TBL_SIZE_P5);
- if (!BNXT_CHIP_THOR(bp))
+ if (!BNXT_CHIP_P5(bp))
return 1;
return RTE_ALIGN_MUL_CEIL(num_rss_rings,
- BNXT_RSS_ENTRIES_PER_CTX_THOR) /
- BNXT_RSS_ENTRIES_PER_CTX_THOR;
+ BNXT_RSS_ENTRIES_PER_CTX_P5) /
+ BNXT_RSS_ENTRIES_PER_CTX_P5;
}
uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
{
- if (!BNXT_CHIP_THOR(bp))
+ if (!BNXT_CHIP_P5(bp))
return HW_HASH_INDEX_SIZE;
- return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
+ return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5;
}
static void bnxt_free_parent_info(struct bnxt *bp)
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
int j, nr_ctxs = bnxt_rss_ctxts(bp);
- if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_THOR) {
+ if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) {
PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n",
- bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_THOR);
+ bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5);
PMD_DRV_LOG(ERR,
"Only queues 0-%d will be in RSS table\n",
- BNXT_RSS_TBL_SIZE_THOR - 1);
+ BNXT_RSS_TBL_SIZE_P5 - 1);
}
rc = 0;
/* THOR does not support ring groups.
* But we will use the array to save RSS context IDs.
*/
- if (BNXT_CHIP_THOR(bp))
- bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
+ if (BNXT_CHIP_P5(bp))
+ bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
return -EINVAL;
}
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
vnic->rss_table[i * 2] =
rxq->rx_ring->rx_ring_struct->fw_ring_id;
vnic->rss_table[i * 2 + 1] =
if (reta_conf[idx].mask & (1ULL << sft)) {
uint16_t qid;
- if (BNXT_CHIP_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
qid = bnxt_rss_to_qid(bp,
vnic->rss_table[i * 2]);
else
if (!ptp)
return 0;
- if (BNXT_CHIP_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
&systime_cycles);
else
ptp->tx_tstamp_tc.cc_shift = shift;
ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
- if (!BNXT_CHIP_THOR(bp))
+ if (!BNXT_CHIP_P5(bp))
bnxt_map_ptp_regs(bp);
return 0;
bnxt_hwrm_ptp_cfg(bp);
- if (!BNXT_CHIP_THOR(bp))
+ if (!BNXT_CHIP_P5(bp))
bnxt_unmap_ptp_regs(bp);
return 0;
if (!ptp)
return 0;
- if (BNXT_CHIP_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
rx_tstamp_cycles = ptp->rx_timestamp;
else
bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
if (!ptp)
return 0;
- if (BNXT_CHIP_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
&tx_tstamp_cycles);
else
}
}
-static bool bnxt_thor_device(uint16_t device_id)
+/* Phase 5 device */
+static bool bnxt_p5_device(uint16_t device_id)
{
switch (device_id) {
case BROADCOM_DEV_ID_57508:
if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
- if (bnxt_thor_device(pci_dev->id.device_id))
- bp->flags |= BNXT_FLAG_THOR_CHIP;
+ if (bnxt_p5_device(pci_dev->id.device_id))
+ bp->flags |= BNXT_FLAG_CHIP_P5;
if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
HWRM_CHECK_RESULT();
- if (!BNXT_CHIP_THOR(bp) &&
+ if (!BNXT_CHIP_P5(bp) &&
!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
return 0;
if (!ptp)
return -ENOMEM;
- if (!BNXT_CHIP_THOR(bp)) {
+ if (!BNXT_CHIP_P5(bp)) {
ptp->rx_regs[BNXT_PTP_RX_TS_L] =
rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
ptp->rx_regs[BNXT_PTP_RX_TS_H] =
bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
+ if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
bp->max_l2_ctx += bp->max_rx_em_flows;
/* TODO: For now, do not support VMDq/RFS on VFs. */
if (BNXT_PF(bp)) {
* So use the value provided by func_qcaps.
*/
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
+ if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
bp->max_l2_ctx += bp->max_rx_em_flows;
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
req.ring_type = ring_type;
req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
mb_pool = bp->rx_queues[0]->mb_pool;
rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
RTE_PKTMBUF_HEADROOM;
HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
int dflt_rxq = vnic->start_grp_id;
struct bnxt_rx_ring_info *rxr;
struct bnxt_cp_ring_info *cpr;
{
int rc = 0;
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
int j;
for (j = 0; j < vnic->num_lb_ctxts; j++) {
}
static int
-bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int i;
int rc = 0;
if (!vnic->rss_table)
return 0;
- if (BNXT_CHIP_THOR(bp))
- return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
+ if (BNXT_CHIP_P5(bp))
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
if (enable)
PMD_DRV_LOG(ERR, "No HW support for LRO\n");
return -ENOTSUP;
ring = rxr->ag_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
- BNXT_CHIP_THOR(bp) ?
+ BNXT_CHIP_P5(bp) ?
HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
if (BNXT_HAS_RING_GRPS(bp))
goto port_phy_cfg;
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
- if (BNXT_CHIP_THOR(bp) &&
+ if (BNXT_CHIP_P5(bp) &&
dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
/* 40G is not supported as part of media auto detect.
* The speed should be forced and autoneg disabled
* to 40G until link comes up at new speed.
*/
if (autoneg == 1 &&
- !(!BNXT_CHIP_THOR(bp) &&
+ !(!BNXT_CHIP_P5(bp) &&
(bp->link_info->auto_link_speed ||
bp->link_info->force_link_speed))) {
link_req.phy_flags |=
}
static int
-bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr +
- i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
+ i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
2 * sizeof(*ring_tbl));
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
if (!(vnic->rss_table && vnic->hash_type))
return 0;
- if (BNXT_CHIP_THOR(bp))
- return bnxt_vnic_rss_configure_thor(bp, vnic);
+ if (BNXT_CHIP_P5(bp))
+ return bnxt_vnic_rss_configure_p5(bp, vnic);
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
return 0;
req->flags = rte_cpu_to_le_16(flags);
}
-static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
+static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
{
struct hwrm_ring_aggint_qcaps_input req = {0};
int rc;
/* Set ring coalesce parameters only for 100G NICs */
- if (BNXT_CHIP_THOR(bp)) {
- if (bnxt_hwrm_set_coal_params_thor(bp, &req))
+ if (BNXT_CHIP_P5(bp)) {
+ if (bnxt_hwrm_set_coal_params_p5(bp, &req))
return -1;
} else if (bnxt_stratus_device(bp)) {
bnxt_hwrm_set_coal_params(coal, &req);
int total_alloc_len;
int rc, i, tqm_rings;
- if (!BNXT_CHIP_THOR(bp) ||
+ if (!BNXT_CHIP_P5(bp) ||
bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
BNXT_VF(bp) ||
bp->ctx)
/* THOR does not support ring groups.
* But we will use the array to save RSS context IDs.
*/
- if (BNXT_CHIP_THOR(bp)) {
- bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
+ if (BNXT_CHIP_P5(bp)) {
+ bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
} else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
uint32_t map_idx,
uint32_t fid)
{
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
if (BNXT_PF(bp))
db->doorbell = (char *)bp->doorbell_base + 0x10000;
else
ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
hw_stats_ctx_id = cpr->hw_stats_ctx_id;
} else {
if (rc)
return rc;
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
/* Reconfigure default receive ring and MRU. */
bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
}
if (bp->rx_queues[i]->rx_started)
active_queue_cnt++;
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
/*
* For Thor, we need to ensure that the VNIC default receive
* ring corresponds to an active receive queue. When no queue
uint16_t cp_cons, ag_cons;
struct rx_pkt_cmpl *rxcmp;
struct rte_mbuf *last = mbuf;
- bool is_thor_tpa = tpa_info && BNXT_CHIP_THOR(rxq->bp);
+ bool is_p5_tpa = tpa_info && BNXT_CHIP_P5(rxq->bp);
for (i = 0; i < agg_buf; i++) {
struct rte_mbuf **ag_buf;
struct rte_mbuf *ag_mbuf;
- if (is_thor_tpa) {
+ if (is_p5_tpa) {
rxcmp = (void *)&tpa_info->agg_arr[i];
} else {
*tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
uint8_t payload_offset;
struct bnxt_tpa_info *tpa_info;
- if (BNXT_CHIP_THOR(rxq->bp)) {
+ if (BNXT_CHIP_P5(rxq->bp)) {
struct rx_tpa_v2_end_cmpl *th_tpa_end;
struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
#ifdef RTE_LIBRTE_IEEE1588
static void
-bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl)
+bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
{
uint64_t systime_cycles = 0;
- if (!BNXT_CHIP_THOR(bp))
+ if (!BNXT_CHIP_P5(bp))
return;
/* On Thor, Rx timestamps are provided directly in the
RX_PKT_CMPL_FLAGS_MASK) ==
RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) {
mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
- bnxt_get_rx_ts_thor(rxq->bp, rxcmp1->reorder);
+ bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder);
}
#endif
static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp,
struct rx_tpa_start_cmpl *cmp)
{
- if (BNXT_CHIP_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
return BNXT_TPA_START_AGG_ID_TH(cmp);
else
return BNXT_TPA_START_AGG_ID_PRE_TH(cmp);
entry_length = HW_HASH_KEY_SIZE +
BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN;
- if (BNXT_CHIP_THOR(bp))
- rss_table_size = BNXT_RSS_TBL_SIZE_THOR *
+ if (BNXT_CHIP_P5(bp))
+ rss_table_size = BNXT_RSS_TBL_SIZE_P5 *
2 * sizeof(*vnic->rss_table);
else
rss_table_size = HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
bnxt_ulp_devid_get(struct bnxt *bp,
enum bnxt_ulp_device_id *ulp_dev_id)
{
- if (BNXT_CHIP_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
return -EINVAL;
/* Assuming Whitney */
*ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;