/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
* All rights reserved.
*/
#define HWRM_SPEC_CODE_1_8_3 0x10803
#define HWRM_VERSION_1_9_1 0x10901
#define HWRM_VERSION_1_9_2 0x10903
-
+#define HWRM_VERSION_1_10_2_13 0x10a020d
struct bnxt_plcmodes_cfg {
uint32_t flags;
uint16_t jumbo_thresh;
uint8_t *pg_attr,
uint64_t *pg_dir)
{
+ if (rmem->nr_pages == 0)
+ return;
+
if (rmem->nr_pages > 1) {
*pg_attr = 1;
*pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
}
}
+static struct bnxt_cp_ring_info*
+bnxt_get_ring_info_by_id(struct bnxt *bp, uint16_t rid, uint16_t type)
+{
+ struct bnxt_cp_ring_info *cp_ring = NULL;
+ uint16_t i;
+
+ switch (type) {
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
+ /* FALLTHROUGH */
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+ if (rxq->cp_ring->cp_ring_struct->fw_ring_id ==
+ rte_cpu_to_le_16(rid)) {
+ return rxq->cp_ring;
+ }
+ }
+ break;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+
+ if (txq->cp_ring->cp_ring_struct->fw_ring_id ==
+ rte_cpu_to_le_16(rid)) {
+ return txq->cp_ring;
+ }
+ }
+ break;
+ default:
+ return cp_ring;
+ }
+ return cp_ring;
+}
+
+/* Complete a sweep of the CQ ring for the corresponding Tx/Rx/AGG ring.
+ * If the CMPL_BASE_TYPE_HWRM_DONE is not encountered by the last pass,
+ * before timeout, we force the done bit for the cleanup to proceed.
+ * Also if cpr is null, do nothing.. The HWRM command is not for a
+ * Tx/Rx/AGG ring cleanup.
+ */
+static int
+bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
+ bool tx, bool rx, bool timeout)
+{
+ int done = 0;
+
+ if (cpr != NULL) {
+ if (tx)
+ done = bnxt_flush_tx_cmp(cpr);
+
+ if (rx)
+ done = bnxt_flush_rx_cmp(cpr);
+
+ if (done)
+ PMD_DRV_LOG(DEBUG, "HWRM DONE for %s ring\n",
+ rx ? "Rx" : "Tx");
+
+ /* We are about to timeout and still haven't seen the
+ * HWRM done for the Ring free. Force the cleanup.
+ */
+ if (!done && timeout) {
+ done = 1;
+ PMD_DRV_LOG(DEBUG, "Timing out for %s ring\n",
+ rx ? "Rx" : "Tx");
+ }
+ } else {
+ /* This HWRM command is not for a Tx/Rx/AGG ring cleanup.
+ * Otherwise the cpr would have been valid. So do nothing.
+ */
+ done = 1;
+ }
+
+ return done;
+}
+
/*
* HWRM Functions (sent to HWRM)
* These are named bnxt_hwrm_*() and return 0 on success or -110 if the
GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
uint16_t mb_trigger_offset = use_kong_mb ?
GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
+ struct bnxt_cp_ring_info *cpr = NULL;
+ bool is_rx = false;
+ bool is_tx = false;
uint32_t timeout;
/* Do not send HWRM commands to firmware in error state */
timeout = bp->hwrm_cmd_timeout;
+ /* Update the message length for backing store config for new FW. */
+ if (bp->fw_ver >= HWRM_VERSION_1_10_2_13 &&
+ rte_cpu_to_le_16(req->req_type) == HWRM_FUNC_BACKING_STORE_CFG)
+ msg_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
+
if (bp->flags & BNXT_FLAG_SHORT_CMD ||
msg_len > bp->max_req_len) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
*/
rte_io_mb();
+ /* Check ring flush is done.
+ * This is valid only for Tx and Rx rings (including AGG rings).
+ * The Tx and Rx rings should be freed once the HW confirms all
+ * the internal buffers and BDs associated with the rings are
+ * consumed and the corresponding DMA is handled.
+ */
+ if (rte_cpu_to_le_16(req->cmpl_ring) != INVALID_HW_RING_ID) {
+ /* Check if the TxCQ matches. If that fails check if RxCQ
+ * matches. And if neither match, is_rx = false, is_tx = false.
+ */
+ cpr = bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+ if (cpr == NULL) {
+ /* Not a TxCQ. Check if the RxCQ matches. */
+ cpr =
+ bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ if (cpr != NULL)
+ is_rx = true;
+ } else {
+ is_tx = true;
+ }
+ }
+
/* Poll for the valid bit */
for (i = 0; i < timeout; i++) {
+ int done;
+
+ done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx,
+ i == timeout - 1);
/* Sanity check on the resp->resp_len */
rte_io_rmb();
if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
/* Last byte of resp contains the valid key */
valid = (uint8_t *)resp + resp->resp_len - 1;
- if (*valid == HWRM_RESP_VALID_KEY)
+ if (*valid == HWRM_RESP_VALID_KEY && done)
break;
}
rte_delay_us(1);
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
} else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
- req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
- req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
+ req.num_mc_entries = rte_cpu_to_le_32(bp->nb_mc_addr);
+ req.mc_tbl_addr = rte_cpu_to_le_64(bp->mc_list_dma_addr);
}
if (vlan_table) {
if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
uint16_t j = dst_id - 1;
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
- if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+ if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
HWRM_CHECK_RESULT();
- if (!BNXT_CHIP_THOR(bp) &&
- !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
- return 0;
+ if (BNXT_CHIP_P5(bp)) {
+ if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS))
+ return 0;
+ } else {
+ if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
+ return 0;
+ }
if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
if (!ptp)
return -ENOMEM;
- if (!BNXT_CHIP_THOR(bp)) {
+ if (!BNXT_CHIP_P5(bp)) {
ptp->rx_regs[BNXT_PTP_RX_TS_L] =
rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
ptp->rx_regs[BNXT_PTP_RX_TS_H] =
return 0;
}
-void bnxt_hwrm_free_vf_info(struct bnxt *bp)
+void bnxt_free_vf_info(struct bnxt *bp)
{
int i;
+ if (bp->pf == NULL)
+ return;
+
+ if (bp->pf->vf_info == NULL)
+ return;
+
for (i = 0; i < bp->pf->max_vfs; i++) {
rte_free(bp->pf->vf_info[i].vlan_table);
bp->pf->vf_info[i].vlan_table = NULL;
bp->pf->vf_info = NULL;
}
+static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs)
+{
+ struct bnxt_child_vf_info *vf_info = bp->pf->vf_info;
+ int i;
+
+ if (vf_info)
+ bnxt_free_vf_info(bp);
+
+ vf_info = rte_zmalloc("bnxt_vf_info", sizeof(*vf_info) * max_vfs, 0);
+ if (vf_info == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc vf info\n");
+ return -ENOMEM;
+ }
+
+ bp->pf->max_vfs = max_vfs;
+ for (i = 0; i < max_vfs; i++) {
+ vf_info[i].fid = bp->pf->first_vf_id + i;
+ vf_info[i].vlan_table = rte_zmalloc("VF VLAN table",
+ getpagesize(), getpagesize());
+ if (vf_info[i].vlan_table == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc VLAN table for VF %d\n", i);
+ goto err;
+ }
+ rte_mem_lock_page(vf_info[i].vlan_table);
+
+ vf_info[i].vlan_as_table = rte_zmalloc("VF VLAN AS table",
+ getpagesize(), getpagesize());
+ if (vf_info[i].vlan_as_table == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc VLAN AS table for VF %d\n", i);
+ goto err;
+ }
+ rte_mem_lock_page(vf_info[i].vlan_as_table);
+
+ STAILQ_INIT(&vf_info[i].filter);
+ }
+
+ bp->pf->vf_info = vf_info;
+
+ return 0;
+err:
+ bnxt_free_vf_info(bp);
+ return -ENOMEM;
+}
+
static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t new_max_vfs;
uint32_t flags;
- int i;
HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
new_max_vfs = bp->pdev->max_vfs;
if (new_max_vfs != bp->pf->max_vfs) {
- if (bp->pf->vf_info)
- bnxt_hwrm_free_vf_info(bp);
- bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
- sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
- if (bp->pf->vf_info == NULL) {
- PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
- return -ENOMEM;
- }
- bp->pf->max_vfs = new_max_vfs;
- for (i = 0; i < new_max_vfs; i++) {
- bp->pf->vf_info[i].fid =
- bp->pf->first_vf_id + i;
- bp->pf->vf_info[i].vlan_table =
- rte_zmalloc("VF VLAN table",
- getpagesize(),
- getpagesize());
- if (bp->pf->vf_info[i].vlan_table == NULL)
- PMD_DRV_LOG(ERR,
- "Fail to alloc VLAN table for VF %d\n",
- i);
- else
- rte_mem_lock_page(
- bp->pf->vf_info[i].vlan_table);
- bp->pf->vf_info[i].vlan_as_table =
- rte_zmalloc("VF VLAN AS table",
- getpagesize(),
- getpagesize());
- if (bp->pf->vf_info[i].vlan_as_table == NULL)
- PMD_DRV_LOG(ERR,
- "Alloc VLAN AS table for VF %d fail\n",
- i);
- else
- rte_mem_lock_page(
- bp->pf->vf_info[i].vlan_as_table);
- STAILQ_INIT(&bp->pf->vf_info[i].filter);
- }
+ rc = bnxt_alloc_vf_info(bp, new_max_vfs);
+ if (rc)
+ goto unlock;
}
}
bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
+ if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
bp->max_l2_ctx += bp->max_rx_em_flows;
- /* TODO: For now, do not support VMDq/RFS on VFs. */
- if (BNXT_PF(bp)) {
- if (bp->pf->max_vfs)
- bp->max_vnics = 1;
- else
- bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
- } else {
- bp->max_vnics = 1;
- }
+ bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
bp->max_l2_ctx, bp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+ bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters);
+
if (BNXT_PF(bp)) {
bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
+ if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) {
+ bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+ PMD_DRV_LOG(DEBUG, "VLAN acceleration for TX is enabled\n");
+ }
+unlock:
HWRM_UNLOCK();
return rc;
int rc;
rc = __bnxt_hwrm_func_qcaps(bp);
+ if (rc == -ENOMEM)
+ return rc;
+
if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
rc = bnxt_alloc_ctx_mem(bp);
if (rc)
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
+ if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
+ bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
+
+ if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP) {
+ bp->vnic_cap_flags |= BNXT_VNIC_CAP_VLAN_RX_STRIP;
+ PMD_DRV_LOG(DEBUG, "Rx VLAN strip capability enabled\n");
+ }
+
bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
HWRM_UNLOCK();
HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
- req.ver_maj = RTE_VER_YEAR;
- req.ver_min = RTE_VER_MONTH;
- req.ver_upd = RTE_VER_MINOR;
+ req.ver_maj_8b = RTE_VER_YEAR;
+ req.ver_min_8b = RTE_VER_MONTH;
+ req.ver_upd_8b = RTE_VER_MINOR;
if (BNXT_PF(bp)) {
req.enables |= rte_cpu_to_le_32(
req.async_event_fwd[1] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
+ req.async_event_fwd[2] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST |
+ ASYNC_CMPL_EVENT_ID_ERROR_REPORT);
+
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
* So use the value provided by func_qcaps.
*/
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
+ if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
bp->max_l2_ctx += bp->max_rx_em_flows;
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
else
HWRM_CHECK_RESULT();
- PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
+ if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY) {
+ rc = -EAGAIN;
+ goto error;
+ }
+
+ PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
- resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
- bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
- (resp->hwrm_fw_min_8b << 16) |
- (resp->hwrm_fw_bld_8b << 8) |
+ resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
+ resp->hwrm_fw_rsvd_8b);
+ bp->fw_ver = ((uint32_t)resp->hwrm_fw_maj_8b << 24) |
+ ((uint32_t)resp->hwrm_fw_min_8b << 16) |
+ ((uint32_t)resp->hwrm_fw_bld_8b << 8) |
resp->hwrm_fw_rsvd_8b;
PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
if (bp->max_req_len > resp->max_req_win_len) {
PMD_DRV_LOG(ERR, "Unsupported request length\n");
rc = -EINVAL;
+ goto error;
}
+
+ bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
+
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
- if (bp->max_resp_len != max_resp_len) {
- sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
- bp->pdev->addr.domain, bp->pdev->addr.bus,
- bp->pdev->addr.devid, bp->pdev->addr.function);
-
- rte_free(bp->hwrm_cmd_resp_addr);
-
- bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
- if (bp->hwrm_cmd_resp_addr == NULL) {
- rc = -ENOMEM;
- goto error;
- }
- bp->hwrm_cmd_resp_dma_addr =
- rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
- if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "Unable to map response buffer to physical memory.\n");
- rc = -ENOMEM;
- goto error;
- }
- bp->max_resp_len = max_resp_len;
- }
+ RTE_VERIFY(max_resp_len <= bp->max_resp_len);
+ bp->max_resp_len = max_resp_len;
if ((dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
}
+ if (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED) {
+ PMD_DRV_LOG(DEBUG, "Host-based truflow feature enabled.\n");
+ bp->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN;
+ }
error:
HWRM_UNLOCK();
return rc;
}
-int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
+int bnxt_hwrm_func_driver_unregister(struct bnxt *bp)
{
int rc;
struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
return 0;
HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
- req.flags = flags;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n",
+ bp->eth_dev->data->port_id);
+
return rc;
}
!(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
- req.auto_link_speed_mask =
- conf->auto_link_speed_mask;
- if (conf->auto_pam4_link_speeds) {
+ if (conf->auto_pam4_link_speed_mask &&
+ bp->link_info->link_signal_mode) {
enables |=
HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
req.auto_link_pam4_speed_mask =
- conf->auto_pam4_link_speeds;
+ rte_cpu_to_le_16(conf->auto_pam4_link_speed_mask);
} else {
enables |=
HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
+ req.auto_link_speed_mask =
+ rte_cpu_to_le_16(conf->auto_link_speed_mask);
}
}
if (conf->auto_link_speed &&
link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
+ link_info->auto_link_speed_mask = rte_le_to_cpu_16(resp->auto_link_speed_mask);
link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
link_info->phy_ver[0] = resp->phy_maj;
rte_le_to_cpu_16(resp->force_pam4_link_speed);
link_info->support_pam4_speeds =
rte_le_to_cpu_16(resp->support_pam4_speeds);
- link_info->auto_pam4_link_speeds =
+ link_info->auto_pam4_link_speed_mask =
rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
+ link_info->module_status = resp->module_status;
HWRM_UNLOCK();
PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
link_info->support_speeds, link_info->force_link_speed);
PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
link_info->link_signal_mode,
- link_info->auto_pam4_link_speeds,
+ link_info->auto_pam4_link_speed_mask,
link_info->support_pam4_speeds,
link_info->force_pam4_link_speed);
return rc;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
+ HWRM_CHECK_RESULT_SILENT();
bp->port_cnt = resp->port_cnt;
if (resp->supported_speeds_auto_mode)
HWRM_UNLOCK();
+ /* Older firmware does not have supported_auto_speeds, so assume
+ * that all supported speeds can be autonegotiated.
+ */
+ if (link_info->auto_link_speed_mask && !link_info->support_auto_speeds)
+ link_info->support_auto_speeds = link_info->support_speeds;
+
return 0;
}
req.ring_type = ring_type;
req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
mb_pool = bp->rx_queues[0]->mb_pool;
rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
RTE_PKTMBUF_HEADROOM;
}
int bnxt_hwrm_ring_free(struct bnxt *bp,
- struct bnxt_ring *ring, uint32_t ring_type)
+ struct bnxt_ring *ring, uint32_t ring_type,
+ uint16_t cp_ring_id)
{
int rc;
struct hwrm_ring_free_input req = {.req_type = 0 };
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return -EINVAL;
+
HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
req.ring_type = ring_type;
req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
+ req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
if (rc || resp->error_code) {
if (rc == 0 && resp->error_code)
struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ /* Don't attempt to re-create the ring group if it is already created */
+ if (bp->grp_info[idx].fw_grp_id != INVALID_HW_RING_ID)
+ return 0;
+
HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
struct hwrm_ring_grp_free_input req = {.req_type = 0 };
struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
+ if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
+ return 0;
+
HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
- if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
+ if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
return rc;
HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
return rc;
}
-int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
- unsigned int idx __rte_unused)
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
int rc;
struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE)
+ return 0;
+
HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
req.update_period_ms = rte_cpu_to_le_32(0);
return rc;
}
-int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
- unsigned int idx __rte_unused)
+static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
int rc;
struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
+ if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
+ return 0;
+
HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+
return rc;
}
HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
int dflt_rxq = vnic->start_grp_id;
struct bnxt_rx_ring_info *rxr;
struct bnxt_cp_ring_info *cpr;
rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
+ enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE;
+ req.rx_csum_v2_mode =
+ HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK;
+ }
goto config_mru;
}
if (vnic->bd_stall)
req.flags |=
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
- if (vnic->roce_dual)
- req.flags |= rte_cpu_to_le_32(
- HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
- if (vnic->roce_only)
- req.flags |= rte_cpu_to_le_32(
- HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
if (vnic->rss_dflt_cr)
req.flags |= rte_cpu_to_le_32(
HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
- vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
- HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
- vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
- HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
{
int rc = 0;
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
int j;
for (j = 0; j < vnic->num_lb_ctxts; j++) {
}
static int
-bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int i;
int rc = 0;
if (!vnic->rss_table)
return 0;
- if (BNXT_CHIP_THOR(bp))
- return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
+ if (BNXT_CHIP_P5(bp))
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
if (enable)
PMD_DRV_LOG(ERR, "No HW support for LRO\n");
return -ENOTSUP;
unsigned int i;
struct bnxt_cp_ring_info *cpr;
- for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- if (i >= bp->rx_cp_nr_rings) {
- cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
- } else {
- cpr = bp->rx_queues[i]->cp_ring;
- if (BNXT_HAS_RING_GRPS(bp))
- bp->grp_info[i].fw_stats_ctx = -1;
- }
- if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
- rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
- cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
- if (rc)
- return rc;
- }
+ cpr = bp->rx_queues[i]->cp_ring;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[i].fw_stats_ctx = -1;
+ if (cpr == NULL)
+ continue;
+ rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
+ if (rc)
+ return rc;
}
- return 0;
-}
-
-int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
-{
- unsigned int i;
- int rc = 0;
-
- for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
- struct bnxt_tx_queue *txq;
- struct bnxt_rx_queue *rxq;
- struct bnxt_cp_ring_info *cpr;
-
- if (i >= bp->rx_cp_nr_rings) {
- txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
- cpr = txq->cp_ring;
- } else {
- rxq = bp->rx_queues[i];
- cpr = rxq->cp_ring;
- }
-
- rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ cpr = bp->tx_queues[i]->cp_ring;
+ if (cpr == NULL)
+ continue;
+ rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
if (rc)
return rc;
}
- return rc;
+
+ return 0;
}
static int
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
bnxt_hwrm_ring_free(bp, cp_ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
- cp_ring->fw_ring_id = INVALID_HW_RING_ID;
- memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
- sizeof(*cpr->cp_desc_ring));
+ HWRM_RING_FREE_INPUT_RING_TYPE_NQ,
+ INVALID_HW_RING_ID);
+ memset(cpr->cp_desc_ring, 0,
+ cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
- cpr->valid = 0;
}
void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
bnxt_hwrm_ring_free(bp, cp_ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
- cp_ring->fw_ring_id = INVALID_HW_RING_ID;
- memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
- sizeof(*cpr->cp_desc_ring));
+ HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ memset(cpr->cp_desc_ring, 0,
+ cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
- cpr->valid = 0;
}
void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
{
struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
- struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
- struct bnxt_ring *ring = rxr->rx_ring_struct;
- struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq ? rxq->rx_ring : NULL;
+ struct bnxt_ring *ring = rxr ? rxr->rx_ring_struct : NULL;
+ struct bnxt_cp_ring_info *cpr = rxq ? rxq->cp_ring : NULL;
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ if (BNXT_HAS_RING_GRPS(bp))
+ bnxt_hwrm_ring_grp_free(bp, queue_index);
+
+ if (ring != NULL && cpr != NULL)
bnxt_hwrm_ring_free(bp, ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- if (BNXT_HAS_RING_GRPS(bp))
- bp->grp_info[queue_index].rx_fw_ring_id =
- INVALID_HW_RING_ID;
- }
- ring = rxr->ag_ring_struct;
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX,
+ cpr->cp_ring_struct->fw_ring_id);
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
+
+ /* Check agg ring struct explicitly.
+ * bnxt_need_agg_ring() returns the current state of offload flags,
+ * but we may have to deal with agg ring struct before the offload
+ * flags are updated.
+ */
+ if (!bnxt_need_agg_ring(bp->eth_dev) ||
+ (rxr && rxr->ag_ring_struct == NULL))
+ goto no_agg;
+
+ ring = rxr ? rxr->ag_ring_struct : NULL;
+ if (ring != NULL && cpr != NULL) {
bnxt_hwrm_ring_free(bp, ring,
- BNXT_CHIP_THOR(bp) ?
+ BNXT_CHIP_P5(bp) ?
HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
- if (BNXT_HAS_RING_GRPS(bp))
- bp->grp_info[queue_index].ag_fw_ring_id =
- INVALID_HW_RING_ID;
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX,
+ cpr->cp_ring_struct->fw_ring_id);
}
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
+
+no_agg:
+ if (cpr != NULL) {
+ bnxt_hwrm_stat_ctx_free(bp, cpr);
bnxt_free_cp_ring(bp, cpr);
+ }
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
}
+int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index)
+{
+ int rc;
+ struct hwrm_ring_reset_input req = {.req_type = 0 };
+ struct hwrm_ring_reset_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(&req, HWRM_RING_RESET, BNXT_USE_CHIMP_MB);
+
+ req.ring_type = HWRM_RING_RESET_INPUT_RING_TYPE_RX_RING_GRP;
+ req.ring_id = rte_cpu_to_le_16(bp->grp_info[queue_index].fw_grp_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
static int
bnxt_free_all_hwrm_rings(struct bnxt *bp)
{
unsigned int i;
- for (i = 0; i < bp->tx_cp_nr_rings; i++) {
- struct bnxt_tx_queue *txq = bp->tx_queues[i];
- struct bnxt_tx_ring_info *txr = txq->tx_ring;
- struct bnxt_ring *ring = txr->tx_ring_struct;
- struct bnxt_cp_ring_info *cpr = txq->cp_ring;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_hwrm_ring_free(bp, ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_TX);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- memset(txr->tx_desc_ring, 0,
- txr->tx_ring_struct->ring_size *
- sizeof(*txr->tx_desc_ring));
- memset(txr->tx_buf_ring, 0,
- txr->tx_ring_struct->ring_size *
- sizeof(*txr->tx_buf_ring));
- txr->tx_prod = 0;
- txr->tx_cons = 0;
- }
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_free_cp_ring(bp, cpr);
- cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
+ for (i = 0; i < bp->tx_cp_nr_rings; i++)
+ bnxt_free_hwrm_tx_ring(bp, i);
for (i = 0; i < bp->rx_cp_nr_rings; i++)
bnxt_free_hwrm_rx_ring(bp, i);
sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
- bp->max_resp_len = HWRM_MAX_RESP_LEN;
+ bp->max_resp_len = BNXT_PAGE_SIZE;
bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
{
uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
- if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+ if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
switch (conf_link_speed) {
- case ETH_LINK_SPEED_10M_HD:
- case ETH_LINK_SPEED_100M_HD:
+ case RTE_ETH_LINK_SPEED_10M_HD:
+ case RTE_ETH_LINK_SPEED_100M_HD:
/* FALLTHROUGH */
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
}
{
uint16_t eth_link_speed = 0;
- if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
- return ETH_LINK_SPEED_AUTONEG;
+ if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+ return RTE_ETH_LINK_SPEED_AUTONEG;
- switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
- case ETH_LINK_SPEED_100M:
- case ETH_LINK_SPEED_100M_HD:
+ switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
+ case RTE_ETH_LINK_SPEED_100M:
+ case RTE_ETH_LINK_SPEED_100M_HD:
/* FALLTHROUGH */
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
break;
- case ETH_LINK_SPEED_1G:
+ case RTE_ETH_LINK_SPEED_1G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
break;
- case ETH_LINK_SPEED_2_5G:
+ case RTE_ETH_LINK_SPEED_2_5G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
break;
- case ETH_LINK_SPEED_10G:
+ case RTE_ETH_LINK_SPEED_10G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
break;
- case ETH_LINK_SPEED_20G:
+ case RTE_ETH_LINK_SPEED_20G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
break;
- case ETH_LINK_SPEED_25G:
+ case RTE_ETH_LINK_SPEED_25G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
break;
- case ETH_LINK_SPEED_40G:
+ case RTE_ETH_LINK_SPEED_40G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
break;
- case ETH_LINK_SPEED_50G:
+ case RTE_ETH_LINK_SPEED_50G:
eth_link_speed = pam4_link ?
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
- case ETH_LINK_SPEED_100G:
+ case RTE_ETH_LINK_SPEED_100G:
eth_link_speed = pam4_link ?
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
break;
- case ETH_LINK_SPEED_200G:
+ case RTE_ETH_LINK_SPEED_200G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
break;
return eth_link_speed;
}
-#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
- ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
- ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
- ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
+#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
+ RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
+ RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
+ RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
static int bnxt_validate_link_speed(struct bnxt *bp)
{
uint32_t link_speed_capa;
uint32_t one_speed;
- if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
return 0;
link_speed_capa = bnxt_get_speed_capabilities(bp);
- if (link_speed & ETH_LINK_SPEED_FIXED) {
- one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+ if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
+ one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
if (one_speed & (one_speed - 1)) {
PMD_DRV_LOG(ERR,
{
uint16_t ret = 0;
- if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+ if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
if (bp->link_info->support_speeds)
return bp->link_info->support_speeds;
link_speed = BNXT_SUPPORTED_SPEEDS;
}
- if (link_speed & ETH_LINK_SPEED_100M)
+ if (link_speed & RTE_ETH_LINK_SPEED_100M)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
- if (link_speed & ETH_LINK_SPEED_100M_HD)
+ if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
- if (link_speed & ETH_LINK_SPEED_1G)
+ if (link_speed & RTE_ETH_LINK_SPEED_1G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
- if (link_speed & ETH_LINK_SPEED_2_5G)
+ if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
- if (link_speed & ETH_LINK_SPEED_10G)
+ if (link_speed & RTE_ETH_LINK_SPEED_10G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
- if (link_speed & ETH_LINK_SPEED_20G)
+ if (link_speed & RTE_ETH_LINK_SPEED_20G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
- if (link_speed & ETH_LINK_SPEED_25G)
+ if (link_speed & RTE_ETH_LINK_SPEED_25G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
- if (link_speed & ETH_LINK_SPEED_40G)
+ if (link_speed & RTE_ETH_LINK_SPEED_40G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
- if (link_speed & ETH_LINK_SPEED_50G)
+ if (link_speed & RTE_ETH_LINK_SPEED_50G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
- if (link_speed & ETH_LINK_SPEED_100G)
+ if (link_speed & RTE_ETH_LINK_SPEED_100G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
- if (link_speed & ETH_LINK_SPEED_200G)
+ if (link_speed & RTE_ETH_LINK_SPEED_200G)
ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
return ret;
}
static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
{
- uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+ uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
switch (hw_link_speed) {
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
- eth_link_speed = ETH_SPEED_NUM_100M;
+ eth_link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
- eth_link_speed = ETH_SPEED_NUM_1G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
- eth_link_speed = ETH_SPEED_NUM_2_5G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
- eth_link_speed = ETH_SPEED_NUM_10G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
- eth_link_speed = ETH_SPEED_NUM_20G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
- eth_link_speed = ETH_SPEED_NUM_25G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
- eth_link_speed = ETH_SPEED_NUM_40G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
- eth_link_speed = ETH_SPEED_NUM_50G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
- eth_link_speed = ETH_SPEED_NUM_100G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_100G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
- eth_link_speed = ETH_SPEED_NUM_200G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_200G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
{
- uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+ uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (hw_link_duplex) {
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
/* FALLTHROUGH */
- eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
- eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+ eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
default:
PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
- link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
- ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+ RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
exit:
return rc;
}
goto port_phy_cfg;
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
- if (BNXT_CHIP_THOR(bp) &&
- dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+ if (BNXT_CHIP_P5(bp) &&
+ dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
/* 40G is not supported as part of media auto detect.
* The speed should be forced and autoneg disabled
* to configure 40G speed.
/* No auto speeds and no auto_pam4_link. Disable autoneg */
if (bp->link_info->auto_link_speed == 0 &&
bp->link_info->link_signal_mode &&
- bp->link_info->auto_pam4_link_speeds == 0)
+ bp->link_info->auto_pam4_link_speed_mask == 0)
autoneg = 0;
speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
bp->link_info->link_signal_mode);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
- /* Autoneg can be done only when the FW allows.
- * When user configures fixed speed of 40G and later changes to
- * any other speed, auto_link_speed/force_link_speed is still set
- * to 40G until link comes up at new speed.
- */
+ /* Autoneg can be done only when the FW allows. */
if (autoneg == 1 &&
- !(!BNXT_CHIP_THOR(bp) &&
- (bp->link_info->auto_link_speed ||
- bp->link_info->force_link_speed))) {
+ (bp->link_info->support_auto_speeds || bp->link_info->support_pam4_auto_speeds)) {
link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
link_req.auto_link_speed_mask =
bnxt_parse_eth_link_speed_mask(bp,
dev_conf->link_speeds);
+ link_req.auto_pam4_link_speed_mask =
+ bp->link_info->auto_pam4_link_speed_mask;
} else {
if (bp->link_info->phy_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
else if (bp->link_info->force_pam4_link_speed)
link_req.link_speed =
bp->link_info->force_pam4_link_speed;
- else if (bp->link_info->auto_pam4_link_speeds)
+ else if (bp->link_info->auto_pam4_link_speed_mask)
link_req.link_speed =
- bp->link_info->auto_pam4_link_speeds;
+ bp->link_info->auto_pam4_link_speed_mask;
else if (bp->link_info->support_pam4_speeds)
link_req.link_speed =
bp->link_info->support_pam4_speeds;
* zero. Use the auto_link_speed.
*/
if (bp->link_info->auto_link_speed != 0 &&
- bp->link_info->auto_pam4_link_speeds == 0)
+ bp->link_info->auto_pam4_link_speed_mask == 0)
link_req.link_speed = bp->link_info->auto_link_speed;
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
return rc;
}
-/* JIRA 22088 */
int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
{
struct hwrm_func_qcfg_input req = {0};
HWRM_CHECK_RESULT();
- /* Hard Coded.. 0xfff VLAN ID mask */
- bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
+ bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
svif_info = rte_le_to_cpu_16(resp->svif_info);
if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
}
if (mtu)
- *mtu = rte_le_to_cpu_16(resp->mtu);
+ *mtu = rte_le_to_cpu_16(resp->admin_mtu);
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
break;
}
+ bp->legacy_db_size =
+ rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
+
HWRM_UNLOCK();
return rc;
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t flags;
int rc;
if (!BNXT_VF_IS_TRUSTED(bp))
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
+ HWRM_CHECK_RESULT_SILENT();
memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
bp->parent->fid = rte_le_to_cpu_16(resp->fid);
bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
- /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
- if (bp->parent->vnic == 0) {
- PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
- /* Use hard-coded values appropriate for current Wh+ fw. */
- if (bp->parent->fid == 2)
- bp->parent->vnic = 0x100;
- else
- bp->parent->vnic = 1;
+ flags = rte_le_to_cpu_16(resp->flags);
+ /* check for the multi-root support */
+ if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_ROOT) {
+ bp->flags2 |= BNXT_FLAGS2_MULTIROOT_EN;
+ PMD_DRV_LOG(DEBUG, "PF enabled with multi root capability\n");
}
HWRM_UNLOCK();
uint32_t enables;
int rc;
- enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ enables = HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU |
HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
} else if (BNXT_HAS_NQ(bp)) {
enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
- req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
+ req.num_msix = rte_cpu_to_le_16(pf_resc->num_nq_rings);
}
req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
- req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
+ req.admin_mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
+ req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu);
req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
- req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
+ req.num_vnics = rte_cpu_to_le_16(pf_resc->num_vnics);
req.fid = rte_cpu_to_le_16(0xffff);
req.enables = rte_cpu_to_le_32(enables);
req->min_rx_rings = req->max_rx_rings;
req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
req->min_l2_ctxs = req->max_l2_ctxs;
- /* TODO: For now, do not support VMDq/RFS on VFs. */
- req->max_vnics = rte_cpu_to_le_16(1);
+ req->max_vnics = rte_cpu_to_le_16(bp->max_vnics / (num_vfs + 1));
req->min_vnics = req->max_vnics;
req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
(num_vfs + 1));
req->min_hw_ring_grps = req->max_hw_ring_grps;
- req->flags =
- rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
+ req->max_msix = rte_cpu_to_le_16(bp->max_nq_rings / (num_vfs + 1));
}
static void
struct hwrm_func_cfg_input *req,
int num_vfs)
{
- req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU |
HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
- req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
- BNXT_NUM_VLANS);
+ req->admin_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN *
+ BNXT_NUM_VLANS);
req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
(num_vfs + 1));
bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
+ bp->max_nq_rings -= rte_le_to_cpu_16(resp->alloc_msix);
+ bp->max_vnics -= rte_le_to_cpu_16(resp->alloc_vnics);
+
+ HWRM_UNLOCK();
+
+ return 0;
+}
+
+/* Update the PF resource values based on how many resources
+ * got allocated to it.
+ */
+static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Get the actual allocated values now */
+ HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
+ req.fid = rte_cpu_to_le_16(0xffff);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
+ bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
+ bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
HWRM_UNLOCK();
pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
+ pf_resc->num_nq_rings = rte_le_to_cpu_32(resp->alloc_msix);
+ pf_resc->num_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
bp->pf->evb_mode = resp->evb_mode;
HWRM_UNLOCK();
pf_resc->num_rx_rings = bp->max_rx_rings;
pf_resc->num_l2_ctxs = bp->max_l2_ctx;
pf_resc->num_hw_ring_grps = bp->max_ring_grps;
+ pf_resc->num_nq_rings = bp->max_nq_rings;
+ pf_resc->num_vnics = bp->max_vnics;
return;
}
bp->max_l2_ctx % (num_vfs + 1);
pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
bp->max_ring_grps % (num_vfs + 1);
+ pf_resc->num_nq_rings = bp->max_nq_rings / (num_vfs + 1) +
+ bp->max_nq_rings % (num_vfs + 1);
+ pf_resc->num_vnics = bp->max_vnics / (num_vfs + 1) +
+ bp->max_vnics % (num_vfs + 1);
}
int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
int rc;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
return -EINVAL;
}
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
bp->pf->func_cfg_flags |=
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
+
rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
- rc = __bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_update_max_resources_pf_only(bp);
+
return rc;
}
bp->max_tx_rings = pf_resc->num_tx_rings;
bp->max_rx_rings = pf_resc->num_rx_rings;
bp->max_ring_grps = pf_resc->num_hw_ring_grps;
+ bp->max_nq_rings = pf_resc->num_nq_rings;
+ bp->max_vnics = pf_resc->num_vnics;
}
static int32_t
return rc;
}
-int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
- struct rte_eth_stats *stats, uint8_t rx)
+static void bnxt_update_prev_stat(uint64_t *cntr, uint64_t *prev_cntr)
+{
+ /* One of the HW stat values that make up this counter was zero as
+ * returned by HW in this iteration, so use the previous
+ * iteration's counter value
+ */
+ if (*prev_cntr && *cntr == 0)
+ *cntr = *prev_cntr;
+ else
+ *prev_cntr = *cntr;
+}
+
+int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
+ struct bnxt_ring_stats *ring_stats, bool rx)
{
int rc = 0;
struct hwrm_stat_ctx_query_input req = {.req_type = 0};
HWRM_CHECK_RESULT();
if (rx) {
- stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
- stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
- stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
- stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
- stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
- stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
- stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
- stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
+ struct bnxt_ring_stats *prev_stats = &bp->prev_rx_ring_stats[idx];
+
+ ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts,
+ &prev_stats->rx_ucast_pkts);
+
+ ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts,
+ &prev_stats->rx_mcast_pkts);
+
+ ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts,
+ &prev_stats->rx_bcast_pkts);
+
+ ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes,
+ &prev_stats->rx_ucast_bytes);
+
+ ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes,
+ &prev_stats->rx_mcast_bytes);
+
+ ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes,
+ &prev_stats->rx_bcast_bytes);
+
+ ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_discard_pkts,
+ &prev_stats->rx_discard_pkts);
+
+ ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_error_pkts,
+ &prev_stats->rx_error_pkts);
+
+ ring_stats->rx_agg_pkts = rte_le_to_cpu_64(resp->rx_agg_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_agg_pkts,
+ &prev_stats->rx_agg_pkts);
+
+ ring_stats->rx_agg_bytes = rte_le_to_cpu_64(resp->rx_agg_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_agg_bytes,
+ &prev_stats->rx_agg_bytes);
+
+ ring_stats->rx_agg_events = rte_le_to_cpu_64(resp->rx_agg_events);
+ bnxt_update_prev_stat(&ring_stats->rx_agg_events,
+ &prev_stats->rx_agg_events);
+
+ ring_stats->rx_agg_aborts = rte_le_to_cpu_64(resp->rx_agg_aborts);
+ bnxt_update_prev_stat(&ring_stats->rx_agg_aborts,
+ &prev_stats->rx_agg_aborts);
} else {
- stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
- stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
- stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
- stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
- stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
- stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+ struct bnxt_ring_stats *prev_stats = &bp->prev_tx_ring_stats[idx];
+
+ ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts,
+ &prev_stats->tx_ucast_pkts);
+
+ ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts,
+ &prev_stats->tx_mcast_pkts);
+
+ ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts,
+ &prev_stats->tx_bcast_pkts);
+
+ ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes,
+ &prev_stats->tx_ucast_bytes);
+
+ ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes,
+ &prev_stats->tx_mcast_bytes);
+
+ ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes,
+ &prev_stats->tx_bcast_bytes);
+
+ ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_discard_pkts,
+ &prev_stats->tx_discard_pkts);
}
HWRM_UNLOCK();
req.port_id = bp->pf->port_id;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
+ HWRM_CHECK_RESULT_SILENT();
if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
unsigned int i;
return rc;
}
-
int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
uint16_t dir_ordinal, uint16_t dir_ext,
uint16_t dir_attr, const uint8_t *data,
}
static int
-bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+ uint8_t *rxq_state = bp->eth_dev->data->rx_queue_state;
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct bnxt_rx_queue **rxqs = bp->rx_queues;
uint16_t *ring_tbl = vnic->rss_table;
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr +
- i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
+ i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
2 * sizeof(*ring_tbl));
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
/* Find next active ring. */
for (cnt = 0; cnt < max_rings; cnt++) {
- if (rx_queue_state[k] !=
- RTE_ETH_QUEUE_STATE_STOPPED)
+ if (rxq_state[k] != RTE_ETH_QUEUE_STATE_STOPPED)
break;
if (++k == max_rings)
k = 0;
{
unsigned int rss_idx, fw_idx, i;
- if (!(vnic->rss_table && vnic->hash_type))
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
return 0;
- if (BNXT_CHIP_THOR(bp))
- return bnxt_vnic_rss_configure_thor(bp, vnic);
-
- if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ if (!(vnic->rss_table && vnic->hash_type))
return 0;
- if (vnic->rss_table && vnic->hash_type) {
- /*
- * Fill the RSS hash & redirection table with
- * ring group ids for all VNICs
- */
- for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
- rss_idx++, fw_idx++) {
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- fw_idx %= bp->rx_cp_nr_rings;
- if (vnic->fw_grp_ids[fw_idx] !=
- INVALID_HW_RING_ID)
- break;
- fw_idx++;
- }
- if (i == bp->rx_cp_nr_rings)
- return 0;
- vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
+ if (BNXT_CHIP_P5(bp))
+ return bnxt_vnic_rss_configure_p5(bp, vnic);
+
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
}
- return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+
+ vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
}
- return 0;
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
}
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
req->flags = rte_cpu_to_le_16(flags);
}
-static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
+static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
{
struct hwrm_ring_aggint_qcaps_input req = {0};
int rc;
/* Set ring coalesce parameters only for 100G NICs */
- if (BNXT_CHIP_THOR(bp)) {
- if (bnxt_hwrm_set_coal_params_thor(bp, &req))
+ if (BNXT_CHIP_P5(bp)) {
+ if (bnxt_hwrm_set_coal_params_p5(bp, &req))
return -1;
} else if (bnxt_stratus_device(bp)) {
bnxt_hwrm_set_coal_params(coal, &req);
int total_alloc_len;
int rc, i, tqm_rings;
- if (!BNXT_CHIP_THOR(bp) ||
+ if (!BNXT_CHIP_P5(bp) ||
bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
BNXT_VF(bp) ||
bp->ctx)
ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
- if (!ctx->tqm_fp_rings_count)
- ctx->tqm_fp_rings_count = bp->max_q;
+ ctx->tqm_fp_rings_count = ctx->tqm_fp_rings_count ?
+ RTE_MIN(ctx->tqm_fp_rings_count,
+ BNXT_MAX_TQM_FP_LEGACY_RINGS) :
+ bp->max_q;
+
+ /* Check if the ext ring count needs to be counted.
+ * Ext ring count is available only with new FW so we should not
+ * look at the field on older FW.
+ */
+ if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
+ bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
+ ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
+ ctx->tqm_fp_rings_count = RTE_MIN(BNXT_MAX_TQM_FP_RINGS,
+ ctx->tqm_fp_rings_count);
+ }
tqm_rings = ctx->tqm_fp_rings_count + 1;
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
+ /* DPDK does not need to configure MRAV and TIM type.
+ * So we are skipping over MRAV and TIM. Skip to configure
+ * HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8.
+ */
+ ctx_pg = ctx->tqm_mem[BNXT_MAX_TQM_LEGACY_RINGS];
+ req.tqm_ring8_num_entries = rte_cpu_to_le_16(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.tqm_ring8_pg_size_tqm_ring_lvl,
+ &req.tqm_ring8_page_dir);
+ }
+
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
/* FW returned values are in units of 100msec */
info->driver_polling_freq =
rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
- info->master_func_wait_period =
+ info->primary_func_wait_period =
rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
info->normal_func_wait_period =
rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
- info->master_func_wait_period_after_reset =
+ info->primary_func_wait_period_after_reset =
rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
info->max_bailout_time_after_reset =
rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
return rc;
}
+int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, struct bnxt_representor *rep_bp)
+{
+ struct hwrm_cfa_pair_info_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_pair_info_input req = {0};
+ int rc = 0;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_PAIR_INFO, BNXT_USE_CHIMP_MB);
+ snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
+ bp->eth_dev->data->name, rep_bp->vf_id);
+ req.flags =
+ rte_cpu_to_le_32(HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+ if (rc == HWRM_ERR_CODE_SUCCESS && strlen(resp->pair_name)) {
+ HWRM_UNLOCK();
+ return !rc;
+ }
+ HWRM_UNLOCK();
+ return rc;
+}
+
int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
{
struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
rep_bp->vf_id);
return rc;
}
+
+int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1,
+ uint32_t echo_req_data2)
+{
+ struct hwrm_func_echo_response_input req = {0};
+ struct hwrm_func_echo_response_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ HWRM_PREP(&req, HWRM_FUNC_ECHO_RESPONSE, BNXT_USE_CHIMP_MB);
+ req.event_data1 = rte_cpu_to_le_32(echo_req_data1);
+ req.event_data2 = rte_cpu_to_le_32(echo_req_data2);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_poll_ver_get(struct bnxt *bp)
+{
+ struct hwrm_ver_get_input req = {.req_type = 0 };
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ bp->max_req_len = HWRM_MAX_REQ_LEN;
+ bp->max_resp_len = BNXT_PAGE_SIZE;
+ bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
+
+ HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT_SILENT();
+
+ if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
+ rc = -EAGAIN;
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr,
+ uint16_t page_number, uint16_t start_addr,
+ uint16_t data_length, uint8_t *buf)
+{
+ struct hwrm_port_phy_i2c_read_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_phy_i2c_read_input req = {0};
+ uint32_t enables = HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET;
+ int rc, byte_offset = 0;
+
+ do {
+ uint16_t xfer_size;
+
+ HWRM_PREP(&req, HWRM_PORT_PHY_I2C_READ, BNXT_USE_CHIMP_MB);
+ req.i2c_slave_addr = i2c_addr;
+ req.page_number = rte_cpu_to_le_16(page_number);
+ req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
+
+ xfer_size = RTE_MIN(data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
+ req.page_offset = rte_cpu_to_le_16(start_addr + byte_offset);
+ req.data_length = xfer_size;
+ req.enables = rte_cpu_to_le_32(start_addr + byte_offset ? enables : 0);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ memcpy(buf + byte_offset, resp->data, xfer_size);
+
+ data_length -= xfer_size;
+ byte_offset += xfer_size;
+
+ HWRM_UNLOCK();
+ } while (data_length > 0);
+
+ return rc;
+}
+
+void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index)
+{
+ struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX,
+ cpr->cp_ring_struct->fw_ring_id);
+ txr->tx_raw_prod = 0;
+ txr->tx_raw_cons = 0;
+ memset(txr->tx_desc_ring, 0,
+ txr->tx_ring_struct->ring_size * sizeof(*txr->tx_desc_ring));
+ memset(txr->tx_buf_ring, 0,
+ txr->tx_ring_struct->ring_size * sizeof(*txr->tx_buf_ring));
+
+ bnxt_hwrm_stat_ctx_free(bp, cpr);
+
+ bnxt_free_cp_ring(bp, cpr);
+}
+
+int bnxt_hwrm_config_host_mtu(struct bnxt *bp)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ if (!BNXT_PF(bp))
+ return 0;
+
+ HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
+
+ req.fid = rte_cpu_to_le_16(0xffff);
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU);
+ req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int
+bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_vnic_rss_cfg_input req = {0};
+ int nr_ctxs = vnic->num_lb_ctxts;
+ int i, rc = 0;
+
+ for (i = 0; i < nr_ctxs; i++) {
+ HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+
+ req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ }
+
+ return rc;
+}