*/
static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
- uint32_t msg_len)
+ uint32_t msg_len, bool use_kong_mb)
{
unsigned int i;
struct input *req = msg;
uint8_t *valid;
uint16_t max_req_len = bp->max_req_len;
struct hwrm_short_input short_input = { 0 };
+ uint16_t bar_offset = use_kong_mb ?
+ GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
+ uint16_t mb_trigger_offset = use_kong_mb ?
+ GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
if (bp->flags & BNXT_FLAG_SHORT_CMD) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
/* Write request msg to hwrm channel */
for (i = 0; i < msg_len; i += 4) {
- bar = (uint8_t *)bp->bar0 + i;
+ bar = (uint8_t *)bp->bar0 + bar_offset + i;
rte_write32(*data, bar);
data++;
}
/* Zero the rest of the request space */
for (; i < max_req_len; i += 4) {
- bar = (uint8_t *)bp->bar0 + i;
+ bar = (uint8_t *)bp->bar0 + bar_offset + i;
rte_write32(0, bar);
}
/* Ring channel doorbell */
- bar = (uint8_t *)bp->bar0 + 0x100;
+ bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
rte_write32(1, bar);
/* Poll for the valid bit */
*
* HWRM_UNLOCK() must be called after all response processing is completed.
*/
-#define HWRM_PREP(req, type) do { \
+#define HWRM_PREP(req, type, kong) do { \
rte_spinlock_lock(&bp->hwrm_lock); \
memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
req.req_type = rte_cpu_to_le_16(HWRM_##type); \
req.cmpl_ring = rte_cpu_to_le_16(-1); \
- req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
+ req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
+ rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
req.target_id = rte_cpu_to_le_16(0xffff); \
req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
} while (0)
struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, CFA_L2_SET_RX_MASK);
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.mask = 0;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
return rc;
- HWRM_PREP(req, CFA_L2_SET_RX_MASK);
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
/* FIXME add multicast flag, when multicast adding options is supported
}
req.mask = rte_cpu_to_le_32(mask);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
return 0;
}
}
- HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
+ HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr =
rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (filter->fw_l2_filter_id == UINT64_MAX)
return 0;
- HWRM_PREP(req, CFA_L2_FILTER_FREE);
+ HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (filter->fw_l2_filter_id != UINT64_MAX)
bnxt_hwrm_clear_l2_filter(bp, filter);
- HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
+ HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(filter->flags);
req.enables = rte_cpu_to_le_32(enables);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
if (!ptp)
return 0;
- HWRM_PREP(req, PORT_MAC_CFG);
+ HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
if (ptp->rx_filter)
flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
(HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_UNLOCK();
return rc;
if (ptp)
return 0;
- HWRM_PREP(req, PORT_MAC_PTP_QCFG);
+ HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
uint32_t flags;
int i;
- HWRM_PREP(req, FUNC_QCAPS);
+ HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_func_reset_input req = {.req_type = 0 };
struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_RESET);
+ HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(0);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (bp->flags & BNXT_FLAG_REGISTERED)
return 0;
- HWRM_PREP(req, FUNC_DRV_RGTR);
+ HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
req.ver_maj = RTE_VER_YEAR;
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_vf_cfg_input req = {0};
- HWRM_PREP(req, FUNC_VF_CFG);
+ HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32
(HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
req.flags = rte_cpu_to_le_32(flags);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (test)
HWRM_CHECK_RESULT_SILENT();
struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_resource_qcaps_input req = {0};
- HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
+ HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
uint32_t dev_caps_cfg;
bp->max_req_len = HWRM_MAX_REQ_LEN;
- HWRM_PREP(req, VER_GET);
+ HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
bp->flags |= BNXT_FLAG_SHORT_CMD;
}
+ if (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
+ bp->flags |= BNXT_FLAG_KONG_MB_EN;
+ PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
+ }
error:
HWRM_UNLOCK();
if (!(bp->flags & BNXT_FLAG_REGISTERED))
return 0;
- HWRM_PREP(req, FUNC_DRV_UNRGTR);
+ HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
req.flags = flags;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
- HWRM_PREP(req, PORT_PHY_CFG);
+ HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
if (conf->link_up) {
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
PMD_DRV_LOG(INFO, "Force Link Down\n");
}
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, PORT_PHY_QCFG);
+ HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
int i;
- HWRM_PREP(req, QUEUE_QPORTCFG);
+ HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
/* HWRM Version >= 1.9.1 */
if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
req.drv_qmap_cap =
HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_ring_alloc_input req = {.req_type = 0 };
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_ALLOC);
+ HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
req.fbo = rte_cpu_to_le_32(0);
}
req.enables = rte_cpu_to_le_32(enables);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc || resp->error_code) {
if (rc == 0 && resp->error_code)
struct hwrm_ring_free_input req = {.req_type = 0 };
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_FREE);
+ HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
req.ring_type = ring_type;
req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc || resp->error_code) {
if (rc == 0 && resp->error_code)
struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_GRP_ALLOC);
+ HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_ring_grp_free_input req = {.req_type = 0 };
struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_GRP_FREE);
+ HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
return rc;
- HWRM_PREP(req, STAT_CTX_CLR_STATS);
+ HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_ALLOC);
+ HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
req.update_period_ms = rte_cpu_to_le_32(0);
req.stats_dma_addr =
rte_cpu_to_le_64(cpr->hw_stats_map);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_FREE);
+ HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN + VLAN_TAG_SIZE;
- HWRM_PREP(req, VNIC_ALLOC);
+ HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
if (vnic->func_default)
req.flags =
rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_PLCMODES_QCFG);
+ HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_PLCMODES_CFG);
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.flags = rte_cpu_to_le_32(pmode->flags);
HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (rc)
return rc;
- HWRM_PREP(req, VNIC_CFG);
+ HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
/* Only RSS support for now TBD: COS & LB */
req.enables =
req.flags |= rte_cpu_to_le_32(
HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
return rc;
}
- HWRM_PREP(req, VNIC_QCFG);
+ HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
req.enables =
rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.vf_id = rte_cpu_to_le_16(fw_vf_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
- HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
return rc;
}
- HWRM_PREP(req, VNIC_FREE);
+ HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_RSS_CFG);
+ HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
req.hash_mode_flags = vnic->hash_mode;
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
return rc;
}
- HWRM_PREP(req, VNIC_PLCMODES_CFG);
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(
HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
req.jumbo_thresh = rte_cpu_to_le_16(size);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_TPA_CFG);
+ HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
if (enable) {
req.enables = rte_cpu_to_le_32(
}
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_qstats_input req = {.req_type = 0};
struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_QSTATS);
+ HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_func_qstats_input req = {.req_type = 0};
struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_QSTATS);
+ HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct hwrm_func_clr_stats_input req = {.req_type = 0};
struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_CLR_STATS);
+ HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
uint16_t flags;
int rc = 0;
- HWRM_PREP(req, FUNC_QCFG);
+ HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
req.fid = rte_cpu_to_le_16(0xffff);
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
int rc;
/* Get the actual allocated values now */
- HWRM_PREP(req, FUNC_QCAPS);
+ HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
int rc;
/* Check for zero MAC address */
- HWRM_PREP(req, FUNC_QCFG);
+ HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
return -1;
int rc;
/* And copy the allocated numbers into the pf struct */
- HWRM_PREP(req, FUNC_QCFG);
+ HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
/* Only TX ring value reflects actual allocation? TODO */
for (i = 0; i < num_vfs; i++) {
add_random_mac_if_needed(bp, &req, i);
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp,
+ &req,
+ sizeof(req),
+ BNXT_USE_CHIMP_MB);
/* Clear enable flag for next pass */
req.enables &= ~rte_cpu_to_le_32(
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
req.evb_mode = bp->pf.evb_mode;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
+ HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
req.tunnel_type = tunnel_type;
req.tunnel_dst_port_val = port;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
switch (tunnel_type) {
struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
+ HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
req.tunnel_type = tunnel_type;
req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.flags = rte_cpu_to_le_32(flags);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_BUF_RGTR);
+ HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
req.req_buf_num_pages = rte_cpu_to_le_16(1);
req.req_buf_page_size = rte_cpu_to_le_16(
return -ENOMEM;
}
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_BUF_UNRGTR);
+ HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
bp->def_cp_ring->cp_ring_struct->fw_ring_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_vf_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_VF_CFG);
+ HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
bp->def_cp_ring->cp_ring_struct->fw_ring_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
uint32_t func_cfg_flags;
int rc = 0;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
if (is_vf) {
dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(enables);
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
req.max_bw = rte_cpu_to_le_32(max_bw);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (ec_size > sizeof(req.encap_request))
return -1;
- HWRM_PREP(req, REJECT_FWD_RESP);
+ HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
memcpy(req.encap_request, encaped, ec_size);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
- HWRM_PREP(req, FUNC_QCFG);
+ HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
if (ec_size > sizeof(req.encap_request))
return -1;
- HWRM_PREP(req, EXEC_FWD_RESP);
+ HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
memcpy(req.encap_request, encaped, ec_size);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_stat_ctx_query_input req = {.req_type = 0};
struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_QUERY);
+ HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
req.stat_ctx_id = rte_cpu_to_le_32(cid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
struct bnxt_pf_info *pf = &bp->pf;
int rc;
- HWRM_PREP(req, PORT_QSTATS);
+ HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
req.port_id = rte_cpu_to_le_16(pf->port_id);
req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
return 0;
- HWRM_PREP(req, PORT_CLR_STATS);
+ HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
req.port_id = rte_cpu_to_le_16(pf->port_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (BNXT_VF(bp))
return 0;
- HWRM_PREP(req, PORT_LED_QCAPS);
+ HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
req.port_id = bp->pf.port_id;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
if (!bp->num_leds || BNXT_VF(bp))
return -EOPNOTSUPP;
- HWRM_PREP(req, PORT_LED_CFG);
+ HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
if (led_on) {
led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
struct hwrm_nvm_get_dir_info_input req = {0};
struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, NVM_GET_DIR_INFO);
+ HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
"unable to map response address to physical memory\n");
return -ENOMEM;
}
- HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
+ HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc == 0)
memcpy(data, buf, len > buflen ? buflen : len);
"unable to map response address to physical memory\n");
return -ENOMEM;
}
- HWRM_PREP(req, NVM_READ);
+ HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
req.dir_idx = rte_cpu_to_le_16(index);
req.offset = rte_cpu_to_le_32(offset);
req.len = rte_cpu_to_le_32(length);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc == 0)
memcpy(data, buf, length);
struct hwrm_nvm_erase_dir_entry_input req = {0};
struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
+ HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
req.dir_idx = rte_cpu_to_le_16(index);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
}
memcpy(buf, data, data_len);
- HWRM_PREP(req, NVM_WRITE);
+ HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
req.dir_type = rte_cpu_to_le_16(dir_type);
req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
req.dir_data_length = rte_cpu_to_le_32(data_len);
req.host_src_addr = rte_cpu_to_le_64(dma_handle);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
rte_free(buf);
HWRM_CHECK_RESULT();
int rc;
/* First query all VNIC ids */
- HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
+ HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
HWRM_UNLOCK();
PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(
req.vlan_antispoof_mode = on ?
HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (filter->fw_em_filter_id != UINT64_MAX)
bnxt_hwrm_clear_em_filter(bp, filter);
- HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
+ HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
req.flags = rte_cpu_to_le_32(filter->flags);
req.enables = rte_cpu_to_le_32(enables);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
HWRM_CHECK_RESULT();
return 0;
PMD_DRV_LOG(ERR, "Clear EM filter\n");
- HWRM_PREP(req, CFA_EM_FLOW_FREE);
+ HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (filter->fw_ntuple_filter_id != UINT64_MAX)
bnxt_hwrm_clear_ntuple_filter(bp, filter);
- HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
+ HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(filter->flags);
req.enables = rte_cpu_to_le_32(enables);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
if (filter->fw_ntuple_filter_id == UINT64_MAX)
return 0;
- HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
+ HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (!bnxt_stratus_device(bp))
return 0;
- HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
bnxt_hwrm_set_coal_params(coal, &req);
req.ring_id = rte_cpu_to_le_16(ring_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
return 0;
bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
return 0;
- HWRM_PREP(req, PORT_QSTATS_EXT);
+ HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
req.port_id = rte_cpu_to_le_16(pf->port_id);
if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
req.rx_stat_size =
rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
}
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
bp->fw_rx_port_stats_ext_size = 0;