X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=93b2ea7a49d8605d064e1a13d3d0c99782e63174;hb=3684541b86f5277d9cd17667c13c08d93180fb04;hp=f94fdde2b12b7e8fa63614de952e40d62ca047d3;hpb=5bee7bef8d6f74d43fe81fae3e786608f3a5d725;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index f94fdde2b1..93b2ea7a49 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "bnxt.h" #include "bnxt_filter.h" @@ -23,8 +24,6 @@ #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" -#include - #define HWRM_SPEC_CODE_1_8_3 0x10803 #define HWRM_VERSION_1_9_1 0x10901 #define HWRM_VERSION_1_9_2 0x10903 @@ -75,9 +74,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, /* * HWRM Functions (sent to HWRM) - * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message() - * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM - * command was failed by the ChiMP. + * These are named bnxt_hwrm_*() and return 0 on success or -110 if the + * HWRM command times out, or a negative error code if the HWRM + * command was failed by the FW. */ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, @@ -101,11 +100,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, if (bp->flags & BNXT_FLAG_FATAL_ERROR) return 0; - /* For VER_GET command, set timeout as 50ms */ - if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET) - timeout = HWRM_CMD_TIMEOUT; - else - timeout = bp->hwrm_cmd_timeout; + timeout = bp->hwrm_cmd_timeout; if (bp->flags & BNXT_FLAG_SHORT_CMD || msg_len > bp->max_req_len) { @@ -177,25 +172,29 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, } /* - * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the + * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the * spinlock, and does initial processing. * * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It - * releases the spinlock only if it returns. If the regular int return codes + * releases the spinlock only if it returns. If the regular int return codes * are not used by the function, HWRM_CHECK_RESULT() should not be used * directly, rather it should be copied and modified to suit the function. * * HWRM_UNLOCK() must be called after all response processing is completed. */ -#define HWRM_PREP(req, type, kong) do { \ +#define HWRM_PREP(req, type, kong) do { \ rte_spinlock_lock(&bp->hwrm_lock); \ + if (bp->hwrm_cmd_resp_addr == NULL) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return -EACCES; \ + } \ memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \ - req.req_type = rte_cpu_to_le_16(HWRM_##type); \ - req.cmpl_ring = rte_cpu_to_le_16(-1); \ - req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\ - rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \ - req.target_id = rte_cpu_to_le_16(0xffff); \ - req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ + (req)->req_type = rte_cpu_to_le_16(type); \ + (req)->cmpl_ring = rte_cpu_to_le_16(-1); \ + (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\ + rte_cpu_to_le_16(bp->chimp_cmd_seq++); \ + (req)->target_id = rte_cpu_to_le_16(0xffff); \ + (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ } while (0) #define HWRM_CHECK_RESULT_SILENT() do {\ @@ -264,7 +263,7 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.mask = 0; @@ -289,7 +288,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (vnic->fw_vnic_id == INVALID_HW_RING_ID) return rc; - HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); if (vnic->flags & BNXT_VNIC_INFO_BCAST) @@ -310,8 +309,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (vlan_table) { if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; - req.vlan_tag_tbl_addr = rte_cpu_to_le_64( - rte_mem_virt2iova(vlan_table)); + req.vlan_tag_tbl_addr = + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); } req.mask = rte_cpu_to_le_32(mask); @@ -348,11 +347,11 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, return 0; } } - HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); req.vlan_tag_mask_tbl_addr = - rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table)); + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -364,10 +363,11 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, } int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, - struct bnxt_filter_info *filter) + struct bnxt_filter_info *filter) { int rc = 0; struct bnxt_filter_info *l2_filter = filter; + struct bnxt_vnic_info *vnic = NULL; struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; @@ -380,13 +380,16 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n", filter, l2_filter, l2_filter->l2_ref_cnt); + if (l2_filter->l2_ref_cnt == 0) + return 0; + if (l2_filter->l2_ref_cnt > 0) l2_filter->l2_ref_cnt--; if (l2_filter->l2_ref_cnt > 0) return 0; - HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB); req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); @@ -396,6 +399,14 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, HWRM_UNLOCK(); filter->fw_l2_filter_id = UINT64_MAX; + if (l2_filter->l2_ref_cnt == 0) { + vnic = l2_filter->vnic; + if (vnic) { + STAILQ_REMOVE(&vnic->filter, l2_filter, + bnxt_filter_info, next); + bnxt_free_filter(bp, l2_filter); + } + } return 0; } @@ -429,7 +440,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, if (filter->fw_l2_filter_id != UINT64_MAX) bnxt_hwrm_clear_l2_filter(bp, filter); - HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(filter->flags); @@ -474,8 +485,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); HWRM_UNLOCK(); + filter->l2_ref_cnt++; + return rc; } @@ -489,7 +503,7 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp) if (!ptp) return 0; - HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB); if (ptp->rx_filter) flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; @@ -519,11 +533,10 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; -/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */ if (ptp) return 0; - HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(bp->pf.port_id); @@ -578,7 +591,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) uint32_t flags; int i; - HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); @@ -663,16 +676,15 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED; if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) { - bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY; + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n"); - } else { - bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY; } if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD) - bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; - else - bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; HWRM_UNLOCK(); @@ -694,7 +706,12 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_NEW_RM; } - return rc; + /* On older FW, + * bnxt_hwrm_func_resc_qcaps can fail and cause init failure. + * But the error can be ignored. Return success. + */ + + return 0; } /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */ @@ -704,7 +721,7 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) struct hwrm_vnic_qcaps_input req = {.req_type = 0 }; struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB); req.target_id = rte_cpu_to_le_16(0xffff); @@ -731,7 +748,7 @@ int bnxt_hwrm_func_reset(struct bnxt *bp) struct hwrm_func_reset_input req = {.req_type = 0 }; struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(0); @@ -753,8 +770,9 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if (bp->flags & BNXT_FLAG_REGISTERED) return 0; - flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; - if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT; /* PFs and trusted VFs should indicate the support of the @@ -763,7 +781,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp)) flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT; - HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); req.ver_maj = RTE_VER_YEAR; @@ -794,12 +812,15 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE | ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE | ASYNC_CMPL_EVENT_ID_RESET_NOTIFY); - if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) req.async_event_fwd[0] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY); req.async_event_fwd[1] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD | ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE); + if (BNXT_PF(bp)) + req.async_event_fwd[1] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -807,7 +828,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) flags = rte_le_to_cpu_32(resp->flags); if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED) - bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE; + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; HWRM_UNLOCK(); @@ -832,7 +853,7 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_vf_cfg_input req = {0}; - HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB); enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS | HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS | @@ -861,6 +882,10 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX); req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX); req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC); + } else if (bp->vf_resv_strategy == + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) { + enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS; + req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); } if (test) @@ -894,12 +919,12 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_resource_qcaps_input req = {0}; - HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + HWRM_CHECK_RESULT_SILENT(); if (BNXT_VF(bp)) { bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); @@ -927,7 +952,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) return rc; } -int bnxt_hwrm_ver_get(struct bnxt *bp) +int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) { int rc = 0; struct hwrm_ver_get_input req = {.req_type = 0 }; @@ -938,7 +963,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) uint32_t dev_caps_cfg; bp->max_req_len = HWRM_MAX_REQ_LEN; - HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB); + bp->hwrm_cmd_timeout = timeout; + HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; req.hwrm_intf_min = HWRM_VERSION_MINOR; @@ -972,7 +998,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) /* convert timeout to usec */ bp->hwrm_cmd_timeout *= 1000; if (!bp->hwrm_cmd_timeout) - bp->hwrm_cmd_timeout = HWRM_CMD_TIMEOUT; + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) { PMD_DRV_LOG(ERR, "Unsupported firmware API version\n"); @@ -1004,9 +1030,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } - rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "Unable to map response buffer to physical memory.\n"); @@ -1041,9 +1066,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } - rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = - rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); + rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr); if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) { rte_free(bp->hwrm_short_cmd_req_addr); PMD_DRV_LOG(ERR, @@ -1080,7 +1104,7 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) if (!(bp->flags & BNXT_FLAG_REGISTERED)) return 0; - HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB); req.flags = flags; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1098,7 +1122,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; - HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB); if (conf->link_up) { /* Setting Fixed Speed. But AutoNeg is ON, So disable it */ @@ -1162,7 +1186,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1203,6 +1227,35 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, return rc; } +static bool bnxt_find_lossy_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile == + HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + return true; + } + } + return false; +} + +static void bnxt_find_first_valid_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN && + bp->tx_cos_queue[i].id != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + break; + } + } +} + int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) { int rc = 0; @@ -1212,11 +1265,12 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) int i; get_rx_info: - HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(dir); - /* HWRM Version >= 1.9.1 */ - if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1) + /* HWRM Version >= 1.9.1 only if COS Classification is not required. */ + if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 && + !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) req.drv_qmap_cap = HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1261,14 +1315,13 @@ get_rx_info: bp->tx_cos_queue[i].id; } } else { - for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { - if (bp->tx_cos_queue[i].profile == - HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { - bp->tx_cosq_id[0] = - bp->tx_cos_queue[i].id; - break; - } - } + /* When CoS classification is disabled, for normal NIC + * operations, ideally we should look to use LOSSY. + * If not found, fallback to the first valid profile + */ + if (!bnxt_find_lossy_profile(bp)) + bnxt_find_first_valid_profile(bp); + } } @@ -1300,7 +1353,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, struct rte_mempool *mb_pool; uint16_t rx_buf_size; - HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB); req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma); req.fbo = rte_cpu_to_le_32(0); @@ -1424,7 +1477,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, struct hwrm_ring_free_input req = {.req_type = 0 }; struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB); req.ring_type = ring_type; req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id); @@ -1472,7 +1525,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_alloc_input req = {.req_type = 0 }; struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB); req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id); req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id); @@ -1483,8 +1536,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) HWRM_CHECK_RESULT(); - bp->grp_info[idx].fw_grp_id = - rte_le_to_cpu_16(resp->ring_group_id); + bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id); HWRM_UNLOCK(); @@ -1497,7 +1549,7 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_free_input req = {.req_type = 0 }; struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB); req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id); @@ -1519,7 +1571,7 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) return rc; - HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB); req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); @@ -1538,12 +1590,11 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB); req.update_period_ms = rte_cpu_to_le_32(0); - req.stats_dma_addr = - rte_cpu_to_le_64(cpr->hw_stats_map); + req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1563,7 +1614,7 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB); req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); @@ -1596,9 +1647,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; skip_ring_grps: - vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; - HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB); + vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu); + HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB); if (vnic->func_default) req.flags = @@ -1621,7 +1671,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 }; struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -1654,7 +1704,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, return rc; } - HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.flags = rte_cpu_to_le_32(pmode->flags); @@ -1693,13 +1743,32 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) if (rc) return rc; - HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB); if (BNXT_CHIP_THOR(bp)) { - struct bnxt_rx_queue *rxq = - bp->eth_dev->data->rx_queues[vnic->start_grp_id]; - struct bnxt_rx_ring_info *rxr = rxq->rx_ring; - struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + int dflt_rxq = vnic->start_grp_id; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_queue *rxq; + int i; + + /* + * The first active receive ring is used as the VNIC + * default receive ring. If there are no active receive + * rings (all corresponding receive queues are stopped), + * the first receive ring is used. + */ + for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) { + rxq = bp->eth_dev->data->rx_queues[i]; + if (rxq->rx_started) { + dflt_rxq = i; + break; + } + } + + rxq = bp->eth_dev->data->rx_queues[dflt_rxq]; + rxr = rxq->rx_ring; + cpr = rxq->cp_ring; req.default_rx_ring_id = rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id); @@ -1778,7 +1847,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); return rc; } - HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID); @@ -1821,7 +1890,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1850,7 +1919,7 @@ int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB); req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx); @@ -1895,7 +1964,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) return rc; } - HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -1922,7 +1991,7 @@ bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; for (i = 0; i < nr_ctxs; i++) { - HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); @@ -1960,7 +2029,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, if (BNXT_CHIP_THOR(bp)) return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic); - HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); req.hash_mode_flags = vnic->hash_mode; @@ -1993,7 +2062,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, return rc; } - HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32( HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT); @@ -2034,7 +2103,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, return 0; } - HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB); if (enable) { req.enables = rte_cpu_to_le_32( @@ -2074,7 +2143,7 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr) memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr)); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -2092,7 +2161,7 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); @@ -2115,7 +2184,7 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); @@ -2152,7 +2221,7 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) struct hwrm_func_clr_stats_input req = {.req_type = 0}; struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); @@ -2164,10 +2233,6 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) return rc; } -/* - * HWRM utility functions - */ - int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp) { unsigned int i; @@ -2193,7 +2258,8 @@ int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp) return 0; } -int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) +static int +bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) { int rc; unsigned int i; @@ -2244,7 +2310,8 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) return rc; } -int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) +static int +bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) { uint16_t idx; uint32_t rc = 0; @@ -2335,7 +2402,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; } -int bnxt_free_all_hwrm_rings(struct bnxt *bp) +static int +bnxt_free_all_hwrm_rings(struct bnxt *bp) { unsigned int i; @@ -2386,6 +2454,10 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) return rc; } +/* + * HWRM utility functions + */ + void bnxt_free_hwrm_resources(struct bnxt *bp) { /* Release memzone */ @@ -2406,11 +2478,10 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) pdev->addr.bus, pdev->addr.devid, pdev->addr.function); bp->max_resp_len = HWRM_MAX_RESP_LEN; bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); - rte_mem_lock_page(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -2421,7 +2492,8 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) return 0; } -int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) +static int +bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) { struct bnxt_filter_info *filter; int rc = 0; @@ -2431,12 +2503,9 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else - rc = bnxt_hwrm_clear_l2_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); bnxt_free_filter(bp, filter); - //if (rc) - //break; } return rc; } @@ -2456,13 +2525,10 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else - rc = bnxt_hwrm_clear_l2_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); rte_free(flow); - //if (rc) - //break; } return rc; } @@ -2473,25 +2539,23 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; STAILQ_FOREACH(filter, &vnic->filter, next) { - if (filter->filter_type == HWRM_CFA_EM_FILTER) { + if (filter->filter_type == HWRM_CFA_EM_FILTER) rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); - } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); - } else { + else rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); - if (!rc) - filter->dflt = 1; - } if (rc) break; } return rc; } -void bnxt_free_tunnel_ports(struct bnxt *bp) +static void +bnxt_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id, @@ -2517,7 +2581,6 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) for (i = bp->max_vnics - 1; i >= 0; i--) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - // If the VNIC ID is invalid we are not currently using the VNIC if (vnic->fw_vnic_id == INVALID_HW_RING_ID) continue; @@ -2865,7 +2928,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) uint16_t flags; int rc = 0; - HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -2891,17 +2954,17 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) } if (mtu) - *mtu = resp->mtu; + *mtu = rte_le_to_cpu_16(resp->mtu); switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0: /* FALLTHROUGH */ - bp->port_partition_type = resp->port_partition_type; + bp->flags |= BNXT_FLAG_NPAR_PF; break; default: - bp->port_partition_type = 0; + bp->flags &= ~BNXT_FLAG_NPAR_PF; break; } @@ -2963,9 +3026,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); - req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * - BNXT_NUM_VLANS); + req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings); @@ -2976,7 +3037,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(enables); - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3004,9 +3065,7 @@ static void populate_vf_func_cfg_req(struct bnxt *bp, req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * BNXT_NUM_VLANS); - req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * - BNXT_NUM_VLANS); + req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1)); req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1)); @@ -3041,16 +3100,16 @@ static void add_random_mac_if_needed(struct bnxt *bp, } } -static void reserve_resources_from_vf(struct bnxt *bp, - struct hwrm_func_cfg_input *cfg_req, - int vf) +static int reserve_resources_from_vf(struct bnxt *bp, + struct hwrm_func_cfg_input *cfg_req, + int vf) { struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; int rc; /* Get the actual allocated values now */ - HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3077,6 +3136,8 @@ static void reserve_resources_from_vf(struct bnxt *bp, bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps); HWRM_UNLOCK(); + + return 0; } int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) @@ -3086,7 +3147,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) int rc; /* Check for zero MAC address */ - HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3104,7 +3165,7 @@ static int update_pf_resource_max(struct bnxt *bp) int rc; /* And copy the allocated numbers into the pf struct */ - HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3207,7 +3268,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) for (i = 0; i < num_vfs; i++) { add_random_mac_if_needed(bp, &req, i); - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid); rc = bnxt_hwrm_send_message(bp, @@ -3263,7 +3324,7 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE); @@ -3283,7 +3344,7 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB); req.tunnel_type = tunnel_type; req.tunnel_dst_port_val = port; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3314,7 +3375,7 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB); req.tunnel_type = tunnel_type; req.tunnel_dst_port_id = rte_cpu_to_be_16(port); @@ -3333,7 +3394,7 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.flags = rte_cpu_to_le_32(flags); @@ -3363,14 +3424,14 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) struct hwrm_func_buf_rgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB); req.req_buf_num_pages = rte_cpu_to_le_16(1); req.req_buf_page_size = rte_cpu_to_le_16( page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr0 = - rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); + rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf)); if (req.req_buf_page_addr0 == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); @@ -3394,7 +3455,7 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp) if (!(BNXT_PF(bp) && bp->pdev->max_vfs)) return 0; - HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3410,7 +3471,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); @@ -3432,7 +3493,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_vf_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32( HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); @@ -3454,7 +3515,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) uint32_t func_cfg_flags; int rc = 0; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); if (is_vf) { dflt_vlan = bp->pf.vf_info[vf].dflt_vlan; @@ -3486,7 +3547,7 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(enables); @@ -3506,7 +3567,7 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); @@ -3543,7 +3604,7 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); @@ -3563,7 +3624,7 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3587,7 +3648,7 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); @@ -3607,7 +3668,7 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, struct hwrm_stat_ctx_query_input req = {.req_type = 0}; struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB); req.stat_ctx_id = rte_cpu_to_le_32(cid); @@ -3633,7 +3694,6 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); } - HWRM_UNLOCK(); return rc; @@ -3646,7 +3706,7 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; - HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(pf->port_id); req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map); @@ -3671,7 +3731,7 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp) BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp)) return 0; - HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(pf->port_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3691,7 +3751,7 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) if (BNXT_VF(bp)) return 0; - HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB); req.port_id = bp->pf.port_id; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3733,7 +3793,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; - HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB); if (led_on) { led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT; @@ -3766,7 +3826,7 @@ int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries, struct hwrm_nvm_get_dir_info_input req = {0}; struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3801,16 +3861,15 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) buflen = dir_entries * entry_length; buf = rte_malloc("nvm_dir", buflen, 0); - rte_mem_lock_page(buf); if (buf == NULL) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } - HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB); req.host_dest_addr = rte_cpu_to_le_64(dma_handle); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3835,17 +3894,16 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr; buf = rte_malloc("nvm_item", length, 0); - rte_mem_lock_page(buf); if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } - HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB); req.host_dest_addr = rte_cpu_to_le_64(dma_handle); req.dir_idx = rte_cpu_to_le_16(index); req.offset = rte_cpu_to_le_32(offset); @@ -3867,7 +3925,7 @@ int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index) struct hwrm_nvm_erase_dir_entry_input req = {0}; struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB); req.dir_idx = rte_cpu_to_le_16(index); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3889,11 +3947,10 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, uint8_t *buf; buf = rte_malloc("nvm_write", data_len, 0); - rte_mem_lock_page(buf); if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -3901,7 +3958,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, } memcpy(buf, data, data_len); - HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB); req.dir_type = rte_cpu_to_le_16(dir_type); req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); @@ -3952,11 +4009,11 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, int rc; /* First query all VNIC ids */ - HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB); req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); - req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids)); if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) { HWRM_UNLOCK(); @@ -4034,7 +4091,7 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32( @@ -4109,7 +4166,7 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp, if (filter->fw_em_filter_id != UINT64_MAX) bnxt_hwrm_clear_em_filter(bp, filter); - HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp)); req.flags = rte_cpu_to_le_32(filter->flags); @@ -4181,8 +4238,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) if (filter->fw_em_filter_id == UINT64_MAX) return 0; - PMD_DRV_LOG(ERR, "Clear EM filter\n"); - HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp)); req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id); @@ -4210,7 +4266,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, if (filter->fw_ntuple_filter_id != UINT64_MAX) bnxt_hwrm_clear_ntuple_filter(bp, filter); - HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(filter->flags); @@ -4218,7 +4274,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID; req.dst_id = rte_cpu_to_le_16(dst_id); - if (filter->ip_addr_type) { req.ip_addr_type = filter->ip_addr_type; enables |= @@ -4231,10 +4286,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR) memcpy(req.src_macaddr, filter->src_macaddr, RTE_ETHER_ADDR_LEN); - //if (enables & - //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR) - //memcpy(req.dst_macaddr, filter->dst_macaddr, - //RTE_ETHER_ADDR_LEN); if (enables & HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE) req.ethertype = rte_cpu_to_be_16(filter->ethertype); @@ -4278,6 +4329,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); HWRM_UNLOCK(); return rc; @@ -4294,7 +4346,7 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, if (filter->fw_ntuple_filter_id == UINT64_MAX) return 0; - HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB); req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id); @@ -4325,7 +4377,7 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct bnxt_rx_ring_info *rxr; struct bnxt_cp_ring_info *cpr; - HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); @@ -4354,8 +4406,10 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) } /* Return if no rings are active. */ - if (cnt == max_rings) + if (cnt == max_rings) { + HWRM_UNLOCK(); return 0; + } /* Add rx/cp ring pair to RSS table. */ rxr = rxqs[k]->rx_ring; @@ -4455,7 +4509,7 @@ static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp, uint16_t flags; int rc; - HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -4492,7 +4546,9 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, return 0; } - HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, + BNXT_USE_CHIMP_MB); req.ring_id = rte_cpu_to_le_16(ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -4517,7 +4573,7 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) bp->ctx) return 0; - HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT_SILENT(); @@ -4596,7 +4652,7 @@ int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables) if (!ctx) return 0; - HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(enables); if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) { @@ -4689,7 +4745,7 @@ int bnxt_hwrm_ext_port_qstats(struct bnxt *bp) bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS)) return 0; - HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(pf->port_id); if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) { @@ -4730,7 +4786,7 @@ bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type) bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB); req.tunnel_type = type; req.dest_fid = bp->fw_fid; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -4749,7 +4805,7 @@ bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type) bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB); req.tunnel_type = type; req.dest_fid = bp->fw_fid; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -4767,7 +4823,7 @@ int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type) bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB); req.src_fid = bp->fw_fid; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -4788,7 +4844,7 @@ int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type, bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB); req.src_fid = bp->fw_fid; req.tunnel_type = tun_type; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -4813,7 +4869,7 @@ int bnxt_hwrm_set_mac(struct bnxt *bp) if (!BNXT_VF(bp)) return 0; - HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); @@ -4836,7 +4892,7 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up) uint32_t flags; int rc; - if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE)) + if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) return 0; /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery. @@ -4846,7 +4902,7 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (!up && (bp->flags & BNXT_FLAG_FW_RESET)) return 0; - HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB); if (up) req.flags = @@ -4879,7 +4935,7 @@ int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) int rc; /* Older FW does not have error recovery support */ - if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)) + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) return 0; if (!info) { @@ -4892,7 +4948,7 @@ int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) memset(info, 0, sizeof(*info)); } - HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -4968,7 +5024,7 @@ int bnxt_hwrm_fw_reset(struct bnxt *bp) if (!BNXT_PF(bp)) return -EOPNOTSUPP; - HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp)); req.embedded_proc_type = HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP; @@ -4996,7 +5052,7 @@ int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp) if (!ptp) return 0; - HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB); switch (path) { case BNXT_PTP_FLAGS_PATH_TX: @@ -5044,7 +5100,7 @@ int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp) return 0; } - HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp)); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); HWRM_CHECK_RESULT();