/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
* All rights reserved.
*/
#define HWRM_SPEC_CODE_1_8_3 0x10803
#define HWRM_VERSION_1_9_1 0x10901
#define HWRM_VERSION_1_9_2 0x10903
-
+#define HWRM_VERSION_1_10_2_13 0x10a020d
struct bnxt_plcmodes_cfg {
uint32_t flags;
uint16_t jumbo_thresh;
if (size <= 1 << 30)
return 30;
PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
- return sizeof(void *) * 8 - 1;
+ return sizeof(int) * 8 - 1;
}
static int page_roundup(size_t size)
uint8_t *pg_attr,
uint64_t *pg_dir)
{
+ if (rmem->nr_pages == 0)
+ return;
+
if (rmem->nr_pages > 1) {
*pg_attr = 1;
*pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
}
}
+static struct bnxt_cp_ring_info*
+bnxt_get_ring_info_by_id(struct bnxt *bp, uint16_t rid, uint16_t type)
+{
+ struct bnxt_cp_ring_info *cp_ring = NULL;
+ uint16_t i;
+
+ switch (type) {
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
+ /* FALLTHROUGH */
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+ if (rxq->cp_ring->cp_ring_struct->fw_ring_id ==
+ rte_cpu_to_le_16(rid)) {
+ return rxq->cp_ring;
+ }
+ }
+ break;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+
+ if (txq->cp_ring->cp_ring_struct->fw_ring_id ==
+ rte_cpu_to_le_16(rid)) {
+ return txq->cp_ring;
+ }
+ }
+ break;
+ default:
+ return cp_ring;
+ }
+ return cp_ring;
+}
+
+/* Complete a sweep of the CQ ring for the corresponding Tx/Rx/AGG ring.
+ * If the CMPL_BASE_TYPE_HWRM_DONE is not encountered by the last pass,
+ * before timeout, we force the done bit for the cleanup to proceed.
+ * Also if cpr is null, do nothing.. The HWRM command is not for a
+ * Tx/Rx/AGG ring cleanup.
+ */
+static int
+bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
+ bool tx, bool rx, bool timeout)
+{
+ int done = 0;
+
+ if (cpr != NULL) {
+ if (tx)
+ done = bnxt_flush_tx_cmp(cpr);
+
+ if (rx)
+ done = bnxt_flush_rx_cmp(cpr);
+
+ if (done)
+ PMD_DRV_LOG(DEBUG, "HWRM DONE for %s ring\n",
+ rx ? "Rx" : "Tx");
+
+ /* We are about to timeout and still haven't seen the
+ * HWRM done for the Ring free. Force the cleanup.
+ */
+ if (!done && timeout) {
+ done = 1;
+ PMD_DRV_LOG(DEBUG, "Timing out for %s ring\n",
+ rx ? "Rx" : "Tx");
+ }
+ } else {
+ /* This HWRM command is not for a Tx/Rx/AGG ring cleanup.
+ * Otherwise the cpr would have been valid. So do nothing.
+ */
+ done = 1;
+ }
+
+ return done;
+}
+
/*
* HWRM Functions (sent to HWRM)
* These are named bnxt_hwrm_*() and return 0 on success or -110 if the
GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
uint16_t mb_trigger_offset = use_kong_mb ?
GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
+ struct bnxt_cp_ring_info *cpr = NULL;
+ bool is_rx = false;
+ bool is_tx = false;
uint32_t timeout;
/* Do not send HWRM commands to firmware in error state */
timeout = bp->hwrm_cmd_timeout;
+ /* Update the message length for backing store config for new FW. */
+ if (bp->fw_ver >= HWRM_VERSION_1_10_2_13 &&
+ rte_cpu_to_le_16(req->req_type) == HWRM_FUNC_BACKING_STORE_CFG)
+ msg_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
+
if (bp->flags & BNXT_FLAG_SHORT_CMD ||
msg_len > bp->max_req_len) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
*/
rte_io_mb();
+ /* Check ring flush is done.
+ * This is valid only for Tx and Rx rings (including AGG rings).
+ * The Tx and Rx rings should be freed once the HW confirms all
+ * the internal buffers and BDs associated with the rings are
+ * consumed and the corresponding DMA is handled.
+ */
+ if (rte_cpu_to_le_16(req->cmpl_ring) != INVALID_HW_RING_ID) {
+ /* Check if the TxCQ matches. If that fails check if RxCQ
+ * matches. And if neither match, is_rx = false, is_tx = false.
+ */
+ cpr = bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+ if (cpr == NULL) {
+ /* Not a TxCQ. Check if the RxCQ matches. */
+ cpr =
+ bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ if (cpr != NULL)
+ is_rx = true;
+ } else {
+ is_tx = true;
+ }
+ }
+
/* Poll for the valid bit */
for (i = 0; i < timeout; i++) {
+ int done;
+
+ done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx,
+ i == timeout - 1);
/* Sanity check on the resp->resp_len */
- rte_cio_rmb();
+ rte_io_rmb();
if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
/* Last byte of resp contains the valid key */
valid = (uint8_t *)resp + resp->resp_len - 1;
- if (*valid == HWRM_RESP_VALID_KEY)
+ if (*valid == HWRM_RESP_VALID_KEY && done)
break;
}
rte_delay_us(1);
HWRM_CHECK_RESULT();
- if (!BNXT_CHIP_THOR(bp) &&
+ if (!BNXT_CHIP_P5(bp) &&
!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
return 0;
if (!ptp)
return -ENOMEM;
- if (!BNXT_CHIP_THOR(bp)) {
+ if (!BNXT_CHIP_P5(bp)) {
ptp->rx_regs[BNXT_PTP_RX_TS_L] =
rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
ptp->rx_regs[BNXT_PTP_RX_TS_H] =
return 0;
}
-void bnxt_hwrm_free_vf_info(struct bnxt *bp)
+void bnxt_free_vf_info(struct bnxt *bp)
{
int i;
+ if (bp->pf == NULL)
+ return;
+
+ if (bp->pf->vf_info == NULL)
+ return;
+
for (i = 0; i < bp->pf->max_vfs; i++) {
rte_free(bp->pf->vf_info[i].vlan_table);
bp->pf->vf_info[i].vlan_table = NULL;
bp->pf->vf_info = NULL;
}
+static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs)
+{
+ struct bnxt_child_vf_info *vf_info = bp->pf->vf_info;
+ int i;
+
+ if (vf_info)
+ bnxt_free_vf_info(bp);
+
+ vf_info = rte_zmalloc("bnxt_vf_info", sizeof(*vf_info) * max_vfs, 0);
+ if (vf_info == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc vf info\n");
+ return -ENOMEM;
+ }
+
+ bp->pf->max_vfs = max_vfs;
+ for (i = 0; i < max_vfs; i++) {
+ vf_info[i].fid = bp->pf->first_vf_id + i;
+ vf_info[i].vlan_table = rte_zmalloc("VF VLAN table",
+ getpagesize(), getpagesize());
+ if (vf_info[i].vlan_table == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc VLAN table for VF %d\n", i);
+ goto err;
+ }
+ rte_mem_lock_page(vf_info[i].vlan_table);
+
+ vf_info[i].vlan_as_table = rte_zmalloc("VF VLAN AS table",
+ getpagesize(), getpagesize());
+ if (vf_info[i].vlan_as_table == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc VLAN AS table for VF %d\n", i);
+ goto err;
+ }
+ rte_mem_lock_page(vf_info[i].vlan_as_table);
+
+ STAILQ_INIT(&vf_info[i].filter);
+ }
+
+ bp->pf->vf_info = vf_info;
+
+ return 0;
+err:
+ bnxt_free_vf_info(bp);
+ return -ENOMEM;
+}
+
static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t new_max_vfs;
uint32_t flags;
- int i;
HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
new_max_vfs = bp->pdev->max_vfs;
if (new_max_vfs != bp->pf->max_vfs) {
- if (bp->pf->vf_info)
- bnxt_hwrm_free_vf_info(bp);
- bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
- sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
- if (bp->pf->vf_info == NULL) {
- PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
- return -ENOMEM;
- }
- bp->pf->max_vfs = new_max_vfs;
- for (i = 0; i < new_max_vfs; i++) {
- bp->pf->vf_info[i].fid =
- bp->pf->first_vf_id + i;
- bp->pf->vf_info[i].vlan_table =
- rte_zmalloc("VF VLAN table",
- getpagesize(),
- getpagesize());
- if (bp->pf->vf_info[i].vlan_table == NULL)
- PMD_DRV_LOG(ERR,
- "Fail to alloc VLAN table for VF %d\n",
- i);
- else
- rte_mem_lock_page(
- bp->pf->vf_info[i].vlan_table);
- bp->pf->vf_info[i].vlan_as_table =
- rte_zmalloc("VF VLAN AS table",
- getpagesize(),
- getpagesize());
- if (bp->pf->vf_info[i].vlan_as_table == NULL)
- PMD_DRV_LOG(ERR,
- "Alloc VLAN AS table for VF %d fail\n",
- i);
- else
- rte_mem_lock_page(
- bp->pf->vf_info[i].vlan_as_table);
- STAILQ_INIT(&bp->pf->vf_info[i].filter);
- }
+ rc = bnxt_alloc_vf_info(bp, new_max_vfs);
+ if (rc)
+ goto unlock;
}
}
bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- if (!BNXT_CHIP_THOR(bp))
+ if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
bp->max_l2_ctx += bp->max_rx_em_flows;
/* TODO: For now, do not support VMDq/RFS on VFs. */
if (BNXT_PF(bp)) {
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
+
+unlock:
HWRM_UNLOCK();
return rc;
int rc;
rc = __bnxt_hwrm_func_qcaps(bp);
+ if (rc == -ENOMEM)
+ return rc;
+
if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
rc = bnxt_alloc_ctx_mem(bp);
if (rc)
return rc;
+ /* On older FW,
+ * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
+ * But the error can be ignored. Return success.
+ */
rc = bnxt_hwrm_func_resc_qcaps(bp);
if (!rc)
bp->flags |= BNXT_FLAG_NEW_RM;
}
- /* On older FW,
- * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
- * But the error can be ignored. Return success.
- */
-
return 0;
}
int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
{
int rc = 0;
+ uint32_t flags;
struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_CHECK_RESULT();
- if (rte_le_to_cpu_32(resp->flags) &
- HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
+ flags = rte_le_to_cpu_32(resp->flags);
+
+ if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
}
+ if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
+ bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
+
+ if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
+ bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
+
bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
HWRM_UNLOCK();
memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
RTE_MIN(sizeof(req.vf_req_fwd),
sizeof(bp->pf->vf_req_fwd)));
-
- /*
- * PF can sniff HWRM API issued by VF. This can be set up by
- * linux driver and inherited by the DPDK PF driver. Clear
- * this HWRM sniffer list in FW because DPDK PF driver does
- * not support this.
- */
- flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
}
req.flags = rte_cpu_to_le_32(flags);
req.async_event_fwd[1] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
- if (BNXT_VF_IS_TRUSTED(bp))
+ if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
req.async_event_fwd[1] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
+ req.async_event_fwd[2] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST);
+
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_CHECK_RESULT_SILENT();
- if (BNXT_VF(bp)) {
- bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
- bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
- bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
- bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
- bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
- /* func_resource_qcaps does not return max_rx_em_flows.
- * So use the value provided by func_qcaps.
- */
- bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- if (!BNXT_CHIP_THOR(bp))
- bp->max_l2_ctx += bp->max_rx_em_flows;
- bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
- bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
- }
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+ /* func_resource_qcaps does not return max_rx_em_flows.
+ * So use the value provided by func_qcaps.
+ */
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
+ bp->max_l2_ctx += bp->max_rx_em_flows;
+ bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
if (bp->vf_resv_strategy >
else
HWRM_CHECK_RESULT();
- PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
+ if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY) {
+ rc = -EAGAIN;
+ goto error;
+ }
+
+ PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
- resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
+ resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
+ resp->hwrm_fw_rsvd_8b);
bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
(resp->hwrm_fw_min_8b << 16) |
(resp->hwrm_fw_bld_8b << 8) |
if (bp->max_req_len > resp->max_req_win_len) {
PMD_DRV_LOG(ERR, "Unsupported request length\n");
rc = -EINVAL;
+ goto error;
}
+
+ bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
+
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
- if (bp->max_resp_len != max_resp_len) {
- sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
- bp->pdev->addr.domain, bp->pdev->addr.bus,
- bp->pdev->addr.devid, bp->pdev->addr.function);
-
- rte_free(bp->hwrm_cmd_resp_addr);
-
- bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
- if (bp->hwrm_cmd_resp_addr == NULL) {
- rc = -ENOMEM;
- goto error;
- }
- bp->hwrm_cmd_resp_dma_addr =
- rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
- if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "Unable to map response buffer to physical memory.\n");
- rc = -ENOMEM;
- goto error;
- }
- bp->max_resp_len = max_resp_len;
- }
+ RTE_VERIFY(max_resp_len <= bp->max_resp_len);
+ bp->max_resp_len = max_resp_len;
if ((dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
}
-
error:
HWRM_UNLOCK();
return rc;
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n",
+ bp->eth_dev->data->port_id);
+
return rc;
}
}
req.flags = rte_cpu_to_le_32(conf->phy_flags);
- req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
- enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
/*
* Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
* any auto mode, even "none".
*/
if (!conf->link_speed) {
/* No speeds specified. Enable AutoNeg - all speeds */
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
+ } else {
+ if (bp->link_info->link_signal_mode) {
+ enables |=
+ HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
+ req.force_pam4_link_speed =
+ rte_cpu_to_le_16(conf->link_speed);
+ } else {
+ req.force_link_speed =
+ rte_cpu_to_le_16(conf->link_speed);
+ }
}
/* AutoNeg - Advertise speeds specified. */
if (conf->auto_link_speed_mask &&
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
req.auto_link_speed_mask =
conf->auto_link_speed_mask;
- enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
+ if (conf->auto_pam4_link_speeds) {
+ enables |=
+ HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
+ req.auto_link_pam4_speed_mask =
+ conf->auto_pam4_link_speeds;
+ } else {
+ enables |=
+ HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
+ }
}
+ if (conf->auto_link_speed &&
+ !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
+ enables |=
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
req.auto_duplex = conf->duplex;
enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
link_info->phy_ver[0] = resp->phy_maj;
link_info->phy_ver[1] = resp->phy_min;
link_info->phy_ver[2] = resp->phy_bld;
-
+ link_info->link_signal_mode =
+ rte_le_to_cpu_16(resp->active_fec_signal_mode);
+ link_info->force_pam4_link_speed =
+ rte_le_to_cpu_16(resp->force_pam4_link_speed);
+ link_info->support_pam4_speeds =
+ rte_le_to_cpu_16(resp->support_pam4_speeds);
+ link_info->auto_pam4_link_speeds =
+ rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
HWRM_UNLOCK();
PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
link_info->link_speed, link_info->auto_mode,
link_info->auto_link_speed, link_info->auto_link_speed_mask,
link_info->support_speeds, link_info->force_link_speed);
+ PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
+ link_info->link_signal_mode,
+ link_info->auto_pam4_link_speeds,
+ link_info->support_pam4_speeds,
+ link_info->force_pam4_link_speed);
return rc;
}
int rc = 0;
struct hwrm_port_phy_qcaps_input req = {0};
struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_link_info *link_info = bp->link_info;
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
return 0;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
+ HWRM_CHECK_RESULT_SILENT();
bp->port_cnt = resp->port_cnt;
+ if (resp->supported_speeds_auto_mode)
+ link_info->support_auto_speeds =
+ rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
+ if (resp->supported_pam4_speeds_auto_mode)
+ link_info->support_pam4_auto_speeds =
+ rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
HWRM_UNLOCK();
req.ring_type = ring_type;
req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
mb_pool = bp->rx_queues[0]->mb_pool;
rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
RTE_PKTMBUF_HEADROOM;
}
int bnxt_hwrm_ring_free(struct bnxt *bp,
- struct bnxt_ring *ring, uint32_t ring_type)
+ struct bnxt_ring *ring, uint32_t ring_type,
+ uint16_t cp_ring_id)
{
int rc;
struct hwrm_ring_free_input req = {.req_type = 0 };
req.ring_type = ring_type;
req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
+ req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
int dflt_rxq = vnic->start_grp_id;
struct bnxt_rx_ring_info *rxr;
struct bnxt_cp_ring_info *cpr;
rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
+ enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE;
+ req.rx_csum_v2_mode =
+ HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK;
+ }
goto config_mru;
}
if (vnic->bd_stall)
req.flags |=
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
- if (vnic->roce_dual)
- req.flags |= rte_cpu_to_le_32(
- HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
- if (vnic->roce_only)
- req.flags |= rte_cpu_to_le_32(
- HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
if (vnic->rss_dflt_cr)
req.flags |= rte_cpu_to_le_32(
HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
- vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
- HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
- vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
- HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
{
int rc = 0;
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
int j;
for (j = 0; j < vnic->num_lb_ctxts; j++) {
}
static int
-bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int i;
int rc = 0;
if (!vnic->rss_table)
return 0;
- if (BNXT_CHIP_THOR(bp))
- return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
+ if (BNXT_CHIP_P5(bp))
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
if (enable)
PMD_DRV_LOG(ERR, "No HW support for LRO\n");
return -ENOTSUP;
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
bnxt_hwrm_ring_free(bp, cp_ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
+ HWRM_RING_FREE_INPUT_RING_TYPE_NQ,
+ INVALID_HW_RING_ID);
cp_ring->fw_ring_id = INVALID_HW_RING_ID;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
bnxt_hwrm_ring_free(bp, cp_ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
+ HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
cp_ring->fw_ring_id = INVALID_HW_RING_ID;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX,
+ cpr->cp_ring_struct->fw_ring_id);
ring->fw_ring_id = INVALID_HW_RING_ID;
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].rx_fw_ring_id =
ring = rxr->ag_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
- BNXT_CHIP_THOR(bp) ?
+ BNXT_CHIP_P5(bp) ?
HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX,
+ cpr->cp_ring_struct->fw_ring_id);
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].ag_fw_ring_id =
INVALID_HW_RING_ID;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX,
+ cpr->cp_ring_struct->fw_ring_id);
ring->fw_ring_id = INVALID_HW_RING_ID;
memset(txr->tx_desc_ring, 0,
txr->tx_ring_struct->ring_size *
memset(txr->tx_buf_ring, 0,
txr->tx_ring_struct->ring_size *
sizeof(*txr->tx_buf_ring));
- txr->tx_prod = 0;
- txr->tx_cons = 0;
+ txr->tx_raw_prod = 0;
+ txr->tx_raw_cons = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr);
sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
- bp->max_resp_len = HWRM_MAX_RESP_LEN;
+ bp->max_resp_len = BNXT_PAGE_SIZE;
bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
if (bp->vxlan_port_cnt)
bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
- bp->vxlan_port = 0;
+
if (bp->geneve_port_cnt)
bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
- bp->geneve_port = 0;
}
void bnxt_free_all_hwrm_resources(struct bnxt *bp)
static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
{
- return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
+ return !conf_link;
}
-static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
+static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
+ uint16_t pam4_link)
{
uint16_t eth_link_speed = 0;
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
break;
case ETH_LINK_SPEED_50G:
- eth_link_speed =
+ eth_link_speed = pam4_link ?
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
case ETH_LINK_SPEED_100G:
- eth_link_speed =
+ eth_link_speed = pam4_link ?
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
break;
case ETH_LINK_SPEED_200G:
eth_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_200GB;
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
break;
default:
PMD_DRV_LOG(ERR,
if (link_speed & ETH_LINK_SPEED_100G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
if (link_speed & ETH_LINK_SPEED_200G)
- ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_200GB;
+ ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
return ret;
}
int rc = 0;
struct bnxt_link_info *link_info = bp->link_info;
+ rc = bnxt_hwrm_port_phy_qcaps(bp);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
+
rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Get link config failed with rc %d\n", rc);
+ PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
goto exit;
}
+
if (link_info->link_speed)
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
goto port_phy_cfg;
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
- if (BNXT_CHIP_THOR(bp) &&
+ if (BNXT_CHIP_P5(bp) &&
dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
/* 40G is not supported as part of media auto detect.
* The speed should be forced and autoneg disabled
autoneg = 0;
}
- speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
+ /* No auto speeds and no auto_pam4_link. Disable autoneg */
+ if (bp->link_info->auto_link_speed == 0 &&
+ bp->link_info->link_signal_mode &&
+ bp->link_info->auto_pam4_link_speeds == 0)
+ autoneg = 0;
+
+ speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
+ bp->link_info->link_signal_mode);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
/* Autoneg can be done only when the FW allows.
* When user configures fixed speed of 40G and later changes to
* to 40G until link comes up at new speed.
*/
if (autoneg == 1 &&
- !(!BNXT_CHIP_THOR(bp) &&
+ !(!BNXT_CHIP_P5(bp) &&
(bp->link_info->auto_link_speed ||
bp->link_info->force_link_speed))) {
link_req.phy_flags |=
/* If user wants a particular speed try that first. */
if (speed)
link_req.link_speed = speed;
+ else if (bp->link_info->force_pam4_link_speed)
+ link_req.link_speed =
+ bp->link_info->force_pam4_link_speed;
+ else if (bp->link_info->auto_pam4_link_speeds)
+ link_req.link_speed =
+ bp->link_info->auto_pam4_link_speeds;
+ else if (bp->link_info->support_pam4_speeds)
+ link_req.link_speed =
+ bp->link_info->support_pam4_speeds;
else if (bp->link_info->force_link_speed)
link_req.link_speed = bp->link_info->force_link_speed;
else
link_req.link_speed = bp->link_info->auto_link_speed;
+ /* Auto PAM4 link speed is zero, but auto_link_speed is not
+ * zero. Use the auto_link_speed.
+ */
+ if (bp->link_info->auto_link_speed != 0 &&
+ bp->link_info->auto_pam4_link_speeds == 0)
+ link_req.link_speed = bp->link_info->auto_link_speed;
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
link_req.auto_pause = bp->link_info->auto_pause;
break;
}
+ bp->legacy_db_size =
+ rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
+
HWRM_UNLOCK();
return rc;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
+ HWRM_CHECK_RESULT_SILENT();
memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
/* FIXME: Temporary workaround - remove when firmware issue is fixed. */
if (bp->parent->vnic == 0) {
- PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
+ PMD_DRV_LOG(DEBUG, "parent VNIC unavailable.\n");
/* Use hard-coded values appropriate for current Wh+ fw. */
if (bp->parent->fid == 2)
bp->parent->vnic = 0x100;
return 0;
}
-static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
- struct hwrm_func_qcaps_output *qcaps)
-{
- qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
- memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
- sizeof(qcaps->mac_address));
- qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
- qcaps->max_rx_rings = fcfg->num_rx_rings;
- qcaps->max_tx_rings = fcfg->num_tx_rings;
- qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
- qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
- qcaps->max_vfs = 0;
- qcaps->first_vf_id = 0;
- qcaps->max_vnics = fcfg->num_vnics;
- qcaps->max_decap_records = 0;
- qcaps->max_encap_records = 0;
- qcaps->max_tx_wm_flows = 0;
- qcaps->max_tx_em_flows = 0;
- qcaps->max_rx_wm_flows = 0;
- qcaps->max_rx_em_flows = 0;
- qcaps->max_flow_id = 0;
- qcaps->max_mcast_filters = fcfg->num_mcast_filters;
- qcaps->max_sp_tx_rings = 0;
- qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
-}
-
-static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
+ struct bnxt_pf_resource_info *pf_resc)
{
struct hwrm_func_cfg_input req = {0};
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_HAS_RING_GRPS(bp)) {
enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
- req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+ req.num_hw_ring_grps =
+ rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
} else if (BNXT_HAS_NQ(bp)) {
enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
- req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
- req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
- req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
- req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
- req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
- req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
+ req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
+ req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
+ req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
+ req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
+ req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
req.fid = rte_cpu_to_le_16(0xffff);
req.enables = rte_cpu_to_le_32(enables);
return rc;
}
-static void populate_vf_func_cfg_req(struct bnxt *bp,
- struct hwrm_func_cfg_input *req,
- int num_vfs)
+/* min values are the guaranteed resources and max values are subject
+ * to availability. The strategy for now is to keep both min & max
+ * values the same.
+ */
+static void
+bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
+ struct hwrm_func_vf_resource_cfg_input *req,
+ int num_vfs)
+{
+ req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
+ (num_vfs + 1));
+ req->min_rsscos_ctx = req->max_rsscos_ctx;
+ req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
+ req->min_stat_ctx = req->max_stat_ctx;
+ req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
+ (num_vfs + 1));
+ req->min_cmpl_rings = req->max_cmpl_rings;
+ req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
+ req->min_tx_rings = req->max_tx_rings;
+ req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
+ req->min_rx_rings = req->max_rx_rings;
+ req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
+ req->min_l2_ctxs = req->max_l2_ctxs;
+ /* TODO: For now, do not support VMDq/RFS on VFs. */
+ req->max_vnics = rte_cpu_to_le_16(1);
+ req->min_vnics = req->max_vnics;
+ req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
+ (num_vfs + 1));
+ req->min_hw_ring_grps = req->max_hw_ring_grps;
+ req->flags =
+ rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
+}
+
+static void
+bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
+ struct hwrm_func_cfg_input *req,
+ int num_vfs)
{
req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
(num_vfs + 1));
}
-static void add_random_mac_if_needed(struct bnxt *bp,
- struct hwrm_func_cfg_input *cfg_req,
+/* Update the port wide resource values based on how many resources
+ * got allocated to the VF.
+ */
+static int bnxt_update_max_resources(struct bnxt *bp,
int vf)
{
- struct rte_ether_addr mac;
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
- if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
- return;
+ /* Get the actual allocated values now */
+ HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
- if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
- cfg_req->enables |=
- rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
- rte_eth_random_addr(cfg_req->dflt_mac_addr);
- bp->pf->vf_info[vf].random_mac = true;
- } else {
- memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
- RTE_ETHER_ADDR_LEN);
- }
+ bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+ bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
+ bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+ bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
+ bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
+ bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
+ bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
+
+ HWRM_UNLOCK();
+
+ return 0;
}
-static int reserve_resources_from_vf(struct bnxt *bp,
- struct hwrm_func_cfg_input *cfg_req,
- int vf)
+/* Update the PF resource values based on how many resources
+ * got allocated to it.
+ */
+static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
{
- struct hwrm_func_qcaps_input req = {0};
- struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
/* Get the actual allocated values now */
- HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
+ HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
+ req.fid = rte_cpu_to_le_16(0xffff);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
- if (rc) {
- PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
- copy_func_cfg_to_qcaps(cfg_req, resp);
- } else if (resp->error_code) {
- rc = rte_le_to_cpu_16(resp->error_code);
- PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
- copy_func_cfg_to_qcaps(cfg_req, resp);
- }
-
- bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
- bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
- bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
- bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
- bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
- bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
- /*
- * TODO: While not supporting VMDq with VFs, max_vnics is always
- * forced to 1 in this case
- */
- //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
- bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
+ bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
+ bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
HWRM_UNLOCK();
return rc;
}
-static int update_pf_resource_max(struct bnxt *bp)
+static int bnxt_query_pf_resources(struct bnxt *bp,
+ struct bnxt_pf_resource_info *pf_resc)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
- /* Only TX ring value reflects actual allocation? TODO */
- bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+ pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+ pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+ pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
+ pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+ pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
+ pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
+ pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
bp->pf->evb_mode = resp->evb_mode;
HWRM_UNLOCK();
return rc;
}
-int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
-{
- int rc;
+static void
+bnxt_calculate_pf_resources(struct bnxt *bp,
+ struct bnxt_pf_resource_info *pf_resc,
+ int num_vfs)
+{
+ if (!num_vfs) {
+ pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
+ pf_resc->num_stat_ctxs = bp->max_stat_ctx;
+ pf_resc->num_cp_rings = bp->max_cp_rings;
+ pf_resc->num_tx_rings = bp->max_tx_rings;
+ pf_resc->num_rx_rings = bp->max_rx_rings;
+ pf_resc->num_l2_ctxs = bp->max_l2_ctx;
+ pf_resc->num_hw_ring_grps = bp->max_ring_grps;
- if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
- return -EINVAL;
+ return;
}
- rc = bnxt_hwrm_func_qcaps(bp);
- if (rc)
- return rc;
-
- bp->pf->func_cfg_flags &=
- ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
- HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
- bp->pf->func_cfg_flags |=
- HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
- rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
- rc = __bnxt_hwrm_func_qcaps(bp);
- return rc;
+ pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
+ bp->max_rsscos_ctx % (num_vfs + 1);
+ pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
+ bp->max_stat_ctx % (num_vfs + 1);
+ pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
+ bp->max_cp_rings % (num_vfs + 1);
+ pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
+ bp->max_tx_rings % (num_vfs + 1);
+ pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
+ bp->max_rx_rings % (num_vfs + 1);
+ pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
+ bp->max_l2_ctx % (num_vfs + 1);
+ pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
+ bp->max_ring_grps % (num_vfs + 1);
}
-int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
{
- struct hwrm_func_cfg_input req = {0};
- struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- int i;
- size_t sz;
- int rc = 0;
- size_t req_buf_sz;
+ struct bnxt_pf_resource_info pf_resc = { 0 };
+ int rc;
if (!BNXT_PF(bp)) {
PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
}
rc = bnxt_hwrm_func_qcaps(bp);
-
if (rc)
return rc;
- bp->pf->active_vfs = num_vfs;
+ bnxt_calculate_pf_resources(bp, &pf_resc, 0);
- /*
- * First, configure the PF to only use one TX ring. This ensures that
- * there are enough rings for all VFs.
- *
- * If we don't do this, when we call func_alloc() later, we will lock
- * extra rings to the PF that won't be available during func_cfg() of
- * the VFs.
- *
- * This has been fixed with firmware versions above 20.6.54
- */
bp->pf->func_cfg_flags &=
~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
bp->pf->func_cfg_flags |=
- HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
- rc = bnxt_hwrm_pf_func_cfg(bp, 1);
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
+
+ rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
if (rc)
return rc;
- /*
- * Now, create and register a buffer to hold forwarded VF requests
- */
+ rc = bnxt_update_max_resources_pf_only(bp);
+
+ return rc;
+}
+
+static int
+bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
+{
+ size_t req_buf_sz, sz;
+ int i, rc;
+
req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
if (bp->pf->vf_req_buf == NULL) {
- rc = -ENOMEM;
- goto error_free;
+ return -ENOMEM;
}
+
for (sz = 0; sz < req_buf_sz; sz += getpagesize())
rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
+
for (i = 0; i < num_vfs; i++)
bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
- (i * HWRM_MAX_REQ_LEN);
+ (i * HWRM_MAX_REQ_LEN);
- rc = bnxt_hwrm_func_buf_rgtr(bp);
+ rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
if (rc)
- goto error_free;
+ rte_free(bp->pf->vf_req_buf);
- populate_vf_func_cfg_req(bp, &req, num_vfs);
+ return rc;
+}
+
+static int
+bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_vf_resource_cfg_input req = {0};
+ int i, rc = 0;
+ bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
bp->pf->active_vfs = 0;
for (i = 0; i < num_vfs; i++) {
- add_random_mac_if_needed(bp, &req, i);
+ HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
+ req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
+ rc = bnxt_hwrm_send_message(bp,
+ &req,
+ sizeof(req),
+ BNXT_USE_CHIMP_MB);
+ if (rc || resp->error_code) {
+ PMD_DRV_LOG(ERR,
+ "Failed to initialize VF %d\n", i);
+ PMD_DRV_LOG(ERR,
+ "Not all VFs available. (%d, %d)\n",
+ rc, resp->error_code);
+ HWRM_UNLOCK();
+
+ /* If the first VF configuration itself fails,
+ * unregister the vf_fwd_request buffer.
+ */
+ if (i == 0)
+ bnxt_hwrm_func_buf_unrgtr(bp);
+ break;
+ }
+ HWRM_UNLOCK();
+
+ /* Update the max resource values based on the resource values
+ * allocated to the VF.
+ */
+ bnxt_update_max_resources(bp, i);
+ bp->pf->active_vfs++;
+ bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
+ }
+ return 0;
+}
+
+static int
+bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int i, rc;
+
+ bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
+
+ bp->pf->active_vfs = 0;
+ for (i = 0; i < num_vfs; i++) {
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
if (rc || resp->error_code) {
PMD_DRV_LOG(ERR,
- "Failed to initizlie VF %d\n", i);
+ "Failed to initialize VF %d\n", i);
PMD_DRV_LOG(ERR,
"Not all VFs available. (%d, %d)\n",
rc, resp->error_code);
HWRM_UNLOCK();
+
+ /* If the first VF configuration itself fails,
+ * unregister the vf_fwd_request buffer.
+ */
+ if (i == 0)
+ bnxt_hwrm_func_buf_unrgtr(bp);
break;
}
HWRM_UNLOCK();
- reserve_resources_from_vf(bp, &req, i);
+ /* Update the max resource values based on the resource values
+ * allocated to the VF.
+ */
+ bnxt_update_max_resources(bp, i);
bp->pf->active_vfs++;
bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
}
+ return 0;
+}
+
+static void
+bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
+{
+ if (bp->flags & BNXT_FLAG_NEW_RM)
+ bnxt_process_vf_resc_config_new(bp, num_vfs);
+ else
+ bnxt_process_vf_resc_config_old(bp, num_vfs);
+}
+
+static void
+bnxt_update_pf_resources(struct bnxt *bp,
+ struct bnxt_pf_resource_info *pf_resc)
+{
+ bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
+ bp->max_stat_ctx = pf_resc->num_stat_ctxs;
+ bp->max_cp_rings = pf_resc->num_cp_rings;
+ bp->max_tx_rings = pf_resc->num_tx_rings;
+ bp->max_rx_rings = pf_resc->num_rx_rings;
+ bp->max_ring_grps = pf_resc->num_hw_ring_grps;
+}
+
+static int32_t
+bnxt_configure_pf_resources(struct bnxt *bp,
+ struct bnxt_pf_resource_info *pf_resc)
+{
/*
- * Now configure the PF to use "the rest" of the resources
- * We're using STD_TX_RING_MODE here though which will limit the TX
- * rings. This will allow QoS to function properly. Not setting this
+ * We're using STD_TX_RING_MODE here which will limit the TX
+ * rings. This will allow QoS to function properly. Not setting this
* will cause PF rings to break bandwidth settings.
*/
- rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+ bp->pf->func_cfg_flags &=
+ ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+ bp->pf->func_cfg_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
+ return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
+}
+
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+{
+ struct bnxt_pf_resource_info pf_resc = { 0 };
+ int rc;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
+ return -EINVAL;
+ }
+
+ rc = bnxt_hwrm_func_qcaps(bp);
if (rc)
- goto error_free;
+ return rc;
+
+ bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
- rc = update_pf_resource_max(bp);
+ rc = bnxt_configure_pf_resources(bp, &pf_resc);
if (rc)
- goto error_free;
+ return rc;
- return rc;
+ rc = bnxt_query_pf_resources(bp, &pf_resc);
+ if (rc)
+ return rc;
-error_free:
- bnxt_hwrm_func_buf_unrgtr(bp);
- return rc;
+ /*
+ * Now, create and register a buffer to hold forwarded VF requests
+ */
+ rc = bnxt_configure_vf_req_buf(bp, num_vfs);
+ if (rc)
+ return rc;
+
+ bnxt_configure_vf_resources(bp, num_vfs);
+
+ bnxt_update_pf_resources(bp, &pf_resc);
+
+ return 0;
}
int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
+ bp->vxlan_port = 0;
+ bp->vxlan_port_cnt = 0;
+ }
+
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
+ bp->geneve_port = 0;
+ bp->geneve_port_cnt = 0;
+ }
+
return rc;
}
return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
-int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
{
- int rc = 0;
- struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
+ int rc;
HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
req.req_buf_num_pages = rte_cpu_to_le_16(1);
- req.req_buf_page_size = rte_cpu_to_le_16(
- page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
+ req.req_buf_page_size =
+ rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr0 =
rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
+ HWRM_UNLOCK();
return -ENOMEM;
}
stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
- stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
- stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
+ stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
+ stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
} else {
stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
req.port_id = bp->pf->port_id;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
+ HWRM_CHECK_RESULT_SILENT();
if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
unsigned int i;
return -ENOMEM;
dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
+ rte_free(buf);
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
+ rte_free(buf);
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
+ rte_free(buf);
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
static int
-bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr +
- i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
+ i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
2 * sizeof(*ring_tbl));
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
{
unsigned int rss_idx, fw_idx, i;
- if (!(vnic->rss_table && vnic->hash_type))
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
return 0;
- if (BNXT_CHIP_THOR(bp))
- return bnxt_vnic_rss_configure_thor(bp, vnic);
-
- if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ if (!(vnic->rss_table && vnic->hash_type))
return 0;
- if (vnic->rss_table && vnic->hash_type) {
- /*
- * Fill the RSS hash & redirection table with
- * ring group ids for all VNICs
- */
- for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
- rss_idx++, fw_idx++) {
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- fw_idx %= bp->rx_cp_nr_rings;
- if (vnic->fw_grp_ids[fw_idx] !=
- INVALID_HW_RING_ID)
- break;
- fw_idx++;
- }
- if (i == bp->rx_cp_nr_rings)
- return 0;
- vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
+ if (BNXT_CHIP_P5(bp))
+ return bnxt_vnic_rss_configure_p5(bp, vnic);
+
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
}
- return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+
+ vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
}
- return 0;
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
}
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
req->flags = rte_cpu_to_le_16(flags);
}
-static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
+static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
{
struct hwrm_ring_aggint_qcaps_input req = {0};
int rc;
/* Set ring coalesce parameters only for 100G NICs */
- if (BNXT_CHIP_THOR(bp)) {
- if (bnxt_hwrm_set_coal_params_thor(bp, &req))
+ if (BNXT_CHIP_P5(bp)) {
+ if (bnxt_hwrm_set_coal_params_p5(bp, &req))
return -1;
} else if (bnxt_stratus_device(bp)) {
bnxt_hwrm_set_coal_params(coal, &req);
int total_alloc_len;
int rc, i, tqm_rings;
- if (!BNXT_CHIP_THOR(bp) ||
+ if (!BNXT_CHIP_P5(bp) ||
bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
BNXT_VF(bp) ||
bp->ctx)
ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
- if (!ctx->tqm_fp_rings_count)
- ctx->tqm_fp_rings_count = bp->max_q;
+ ctx->tqm_fp_rings_count = ctx->tqm_fp_rings_count ?
+ RTE_MIN(ctx->tqm_fp_rings_count,
+ BNXT_MAX_TQM_FP_LEGACY_RINGS) :
+ bp->max_q;
+
+ /* Check if the ext ring count needs to be counted.
+ * Ext ring count is available only with new FW so we should not
+ * look at the field on older FW.
+ */
+ if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
+ bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
+ ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
+ ctx->tqm_fp_rings_count = RTE_MIN(BNXT_MAX_TQM_FP_RINGS,
+ ctx->tqm_fp_rings_count);
+ }
tqm_rings = ctx->tqm_fp_rings_count + 1;
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
+ /* DPDK does not need to configure MRAV and TIM type.
+ * So we are skipping over MRAV and TIM. Skip to configure
+ * HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8.
+ */
+ ctx_pg = ctx->tqm_mem[BNXT_MAX_TQM_LEGACY_RINGS];
+ req.tqm_ring8_num_entries = rte_cpu_to_le_16(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.tqm_ring8_pg_size_tqm_ring_lvl,
+ &req.tqm_ring8_page_dir);
+ }
+
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
return 0;
}
-int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx)
+int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
+ uint16_t *first_vf_id)
{
- struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_cfa_vfr_alloc_input req = {0};
+ int rc = 0;
+ struct hwrm_func_qcaps_input req = {.req_type = 0 };
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+
+ if (first_vf_id)
+ *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
+{
+ struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_pair_alloc_input req = {0};
int rc;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
return 0;
}
- HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB);
- req.vf_id = rte_cpu_to_le_16(vf_idx);
- snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
- bp->eth_dev->data->name, vf_idx);
+ HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
+ req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
+ snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
+ bp->eth_dev->data->name, rep_bp->vf_id);
+
+ req.pf_b_id = rep_bp->parent_pf_idx;
+ req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
+ rte_cpu_to_le_16(rep_bp->vf_id);
+ req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
+ req.host_b_id = 1; /* TBD - Confirm if this is OK */
+
+ req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
+ HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
+ req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
+ HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
+ req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
+ HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
+ req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
+ HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
+
+ req.q_ab = rep_bp->rep_q_r2f;
+ req.q_ba = rep_bp->rep_q_f2r;
+ req.fc_ab = rep_bp->rep_fc_r2f;
+ req.fc_ba = rep_bp->rep_fc_f2r;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx);
+ PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
+ BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
return rc;
}
-int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx)
+int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
{
- struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_cfa_vfr_free_input req = {0};
+ struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_pair_free_input req = {0};
int rc;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
return 0;
}
- HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB);
- req.vf_id = rte_cpu_to_le_16(vf_idx);
- snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
- bp->eth_dev->data->name, vf_idx);
+ HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
+ snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
+ bp->eth_dev->data->name, rep_bp->vf_id);
+ req.pf_b_id = rep_bp->parent_pf_idx;
+ req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
+ req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
+ rte_cpu_to_le_16(rep_bp->vf_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
+ rep_bp->vf_id);
+ return rc;
+}
+
+int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
+{
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
+ uint32_t flags = 0;
+ int rc = 0;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
+ return 0;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+ HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_CHIMP_MB);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
HWRM_CHECK_RESULT();
+ flags = rte_le_to_cpu_32(resp->flags);
HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx);
+
+ if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED)
+ bp->flags |= BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2;
+ else
+ bp->flags |= BNXT_FLAG_RFS_NEEDS_VNIC;
+
+ return rc;
+}
+
+int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1,
+ uint32_t echo_req_data2)
+{
+ struct hwrm_func_echo_response_input req = {0};
+ struct hwrm_func_echo_response_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ HWRM_PREP(&req, HWRM_FUNC_ECHO_RESPONSE, BNXT_USE_CHIMP_MB);
+ req.event_data1 = rte_cpu_to_le_32(echo_req_data1);
+ req.event_data2 = rte_cpu_to_le_32(echo_req_data2);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_poll_ver_get(struct bnxt *bp)
+{
+ struct hwrm_ver_get_input req = {.req_type = 0 };
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ bp->max_req_len = HWRM_MAX_REQ_LEN;
+ bp->max_resp_len = BNXT_PAGE_SIZE;
+ bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
+
+ HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT_SILENT();
+
+ if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
+ rc = -EAGAIN;
+
+ HWRM_UNLOCK();
+
return rc;
}