HWRM_CHECK_RESULT();
- if (!BNXT_CHIP_THOR(bp) &&
+ if (!BNXT_CHIP_P5(bp) &&
!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
return 0;
if (!ptp)
return -ENOMEM;
- if (!BNXT_CHIP_THOR(bp)) {
+ if (!BNXT_CHIP_P5(bp)) {
ptp->rx_regs[BNXT_PTP_RX_TS_L] =
rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
ptp->rx_regs[BNXT_PTP_RX_TS_H] =
sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
if (bp->pf->vf_info == NULL) {
PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
+ HWRM_UNLOCK();
return -ENOMEM;
}
bp->pf->max_vfs = new_max_vfs;
bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
+ if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
bp->max_l2_ctx += bp->max_rx_em_flows;
/* TODO: For now, do not support VMDq/RFS on VFs. */
if (BNXT_PF(bp)) {
* So use the value provided by func_qcaps.
*/
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
+ if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
bp->max_l2_ctx += bp->max_rx_em_flows;
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
req.force_pam4_link_speed =
rte_cpu_to_le_16(conf->link_speed);
- }
- req.force_link_speed =
+ } else {
+ req.force_link_speed =
rte_cpu_to_le_16(conf->link_speed);
+ }
}
/* AutoNeg - Advertise speeds specified. */
if (conf->auto_link_speed_mask &&
link_info->link_speed, link_info->auto_mode,
link_info->auto_link_speed, link_info->auto_link_speed_mask,
link_info->support_speeds, link_info->force_link_speed);
+ PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
+ link_info->link_signal_mode,
+ link_info->auto_pam4_link_speeds,
+ link_info->support_pam4_speeds,
+ link_info->force_pam4_link_speed);
return rc;
}
req.ring_type = ring_type;
req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
mb_pool = bp->rx_queues[0]->mb_pool;
rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
RTE_PKTMBUF_HEADROOM;
HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
int dflt_rxq = vnic->start_grp_id;
struct bnxt_rx_ring_info *rxr;
struct bnxt_cp_ring_info *cpr;
{
int rc = 0;
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
int j;
for (j = 0; j < vnic->num_lb_ctxts; j++) {
}
static int
-bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int i;
int rc = 0;
if (!vnic->rss_table)
return 0;
- if (BNXT_CHIP_THOR(bp))
- return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
+ if (BNXT_CHIP_P5(bp))
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
if (enable)
PMD_DRV_LOG(ERR, "No HW support for LRO\n");
return -ENOTSUP;
ring = rxr->ag_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
- BNXT_CHIP_THOR(bp) ?
+ BNXT_CHIP_P5(bp) ?
HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
if (BNXT_HAS_RING_GRPS(bp))
memset(txr->tx_buf_ring, 0,
txr->tx_ring_struct->ring_size *
sizeof(*txr->tx_buf_ring));
- txr->tx_prod = 0;
- txr->tx_cons = 0;
+ txr->tx_raw_prod = 0;
+ txr->tx_raw_cons = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr);
goto port_phy_cfg;
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
- if (BNXT_CHIP_THOR(bp) &&
+ if (BNXT_CHIP_P5(bp) &&
dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
/* 40G is not supported as part of media auto detect.
* The speed should be forced and autoneg disabled
autoneg = 0;
}
+ /* No auto speeds and no auto_pam4_link. Disable autoneg */
+ if (bp->link_info->auto_link_speed == 0 &&
+ bp->link_info->link_signal_mode &&
+ bp->link_info->auto_pam4_link_speeds == 0)
+ autoneg = 0;
+
speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
bp->link_info->link_signal_mode);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
* to 40G until link comes up at new speed.
*/
if (autoneg == 1 &&
- !(!BNXT_CHIP_THOR(bp) &&
+ !(!BNXT_CHIP_P5(bp) &&
(bp->link_info->auto_link_speed ||
bp->link_info->force_link_speed))) {
link_req.phy_flags |=
link_req.link_speed = bp->link_info->force_link_speed;
else
link_req.link_speed = bp->link_info->auto_link_speed;
+ /* Auto PAM4 link speed is zero, but auto_link_speed is not
+ * zero. Use the auto_link_speed.
+ */
+ if (bp->link_info->auto_link_speed != 0 &&
+ bp->link_info->auto_pam4_link_speeds == 0)
+ link_req.link_speed = bp->link_info->auto_link_speed;
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
link_req.auto_pause = bp->link_info->auto_pause;
return 0;
}
+/* Update the PF resource values based on how many resources
+ * got allocated to it.
+ */
+static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Get the actual allocated values now */
+ HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
+ req.fid = rte_cpu_to_le_16(0xffff);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
+ bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
+ bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
+
+ HWRM_UNLOCK();
+
+ return 0;
+}
+
int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
{
struct hwrm_func_qcfg_input req = {0};
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
bp->pf->func_cfg_flags |=
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
+
rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
- rc = __bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_update_max_resources_pf_only(bp);
+
return rc;
}
return -ENOMEM;
dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
+ rte_free(buf);
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
+ rte_free(buf);
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
+ rte_free(buf);
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
static int
-bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr +
- i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
+ i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
2 * sizeof(*ring_tbl));
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
{
unsigned int rss_idx, fw_idx, i;
- if (!(vnic->rss_table && vnic->hash_type))
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
return 0;
- if (BNXT_CHIP_THOR(bp))
- return bnxt_vnic_rss_configure_thor(bp, vnic);
-
- if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ if (!(vnic->rss_table && vnic->hash_type))
return 0;
- if (vnic->rss_table && vnic->hash_type) {
- /*
- * Fill the RSS hash & redirection table with
- * ring group ids for all VNICs
- */
- for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
- rss_idx++, fw_idx++) {
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- fw_idx %= bp->rx_cp_nr_rings;
- if (vnic->fw_grp_ids[fw_idx] !=
- INVALID_HW_RING_ID)
- break;
- fw_idx++;
- }
- if (i == bp->rx_cp_nr_rings)
- return 0;
- vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
+ if (BNXT_CHIP_P5(bp))
+ return bnxt_vnic_rss_configure_p5(bp, vnic);
+
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
}
- return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+
+ vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
}
- return 0;
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
}
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
req->flags = rte_cpu_to_le_16(flags);
}
-static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
+static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
{
struct hwrm_ring_aggint_qcaps_input req = {0};
int rc;
/* Set ring coalesce parameters only for 100G NICs */
- if (BNXT_CHIP_THOR(bp)) {
- if (bnxt_hwrm_set_coal_params_thor(bp, &req))
+ if (BNXT_CHIP_P5(bp)) {
+ if (bnxt_hwrm_set_coal_params_p5(bp, &req))
return -1;
} else if (bnxt_stratus_device(bp)) {
bnxt_hwrm_set_coal_params(coal, &req);
int total_alloc_len;
int rc, i, tqm_rings;
- if (!BNXT_CHIP_THOR(bp) ||
+ if (!BNXT_CHIP_P5(bp) ||
bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
BNXT_VF(bp) ||
bp->ctx)
return 0;
}
-int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx)
-{
- struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_cfa_vfr_alloc_input req = {0};
- int rc;
-
- if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
- return 0;
- }
-
- HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB);
- req.vf_id = rte_cpu_to_le_16(vf_idx);
- snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
- bp->eth_dev->data->name, vf_idx);
-
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
-
- HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx);
- return rc;
-}
-
-int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx)
-{
- struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_cfa_vfr_free_input req = {0};
- int rc;
-
- if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
- return 0;
- }
-
- HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB);
- req.vf_id = rte_cpu_to_le_16(vf_idx);
- snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
- bp->eth_dev->data->name, vf_idx);
-
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx);
- return rc;
-}
-
int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
uint16_t *first_vf_id)
{
snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
bp->eth_dev->data->name, rep_bp->vf_id);
- req.pf_b_id = rte_cpu_to_le_32(rep_bp->rep_based_pf);
- req.vf_b_id = rte_cpu_to_le_16(rep_bp->vf_id);
+ req.pf_b_id = rep_bp->parent_pf_idx;
+ req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
+ rte_cpu_to_le_16(rep_bp->vf_id);
req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
req.host_b_id = 1; /* TBD - Confirm if this is OK */
HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
bp->eth_dev->data->name, rep_bp->vf_id);
- req.pf_b_id = rte_cpu_to_le_32(rep_bp->rep_based_pf);
- req.vf_id = rte_cpu_to_le_16(rep_bp->vf_id);
+ req.pf_b_id = rep_bp->parent_pf_idx;
req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
-
+ req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
+ rte_cpu_to_le_16(rep_bp->vf_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();