X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=a9c9c7297cab3cab94a354d78c1065afc3fb66f4;hb=a6e7cd81fa41a9938d4d056e0b6c18adc0b67546;hp=b5211aea75be9d1f76642364b91417481746cc7e;hpb=683e5cf79249d2628b7370beb7e39e040f5ec356;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index b5211aea75..a9c9c7297c 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -11,9 +11,9 @@ #include #include #include +#include #include "bnxt.h" -#include "bnxt_cpr.h" #include "bnxt_filter.h" #include "bnxt_hwrm.h" #include "bnxt_rxq.h" @@ -24,10 +24,6 @@ #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" -#include - -#define HWRM_CMD_TIMEOUT 6000000 -#define HWRM_SHORT_CMD_TIMEOUT 50000 #define HWRM_SPEC_CODE_1_8_3 0x10803 #define HWRM_VERSION_1_9_1 0x10901 #define HWRM_VERSION_1_9_2 0x10903 @@ -78,9 +74,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, /* * HWRM Functions (sent to HWRM) - * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message() - * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM - * command was failed by the ChiMP. + * These are named bnxt_hwrm_*() and return 0 on success or -110 if the + * HWRM command times out, or a negative error code if the HWRM + * command was failed by the FW. */ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, @@ -104,11 +100,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, if (bp->flags & BNXT_FLAG_FATAL_ERROR) return 0; - /* For VER_GET command, set timeout as 50ms */ - if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET) - timeout = HWRM_SHORT_CMD_TIMEOUT; - else - timeout = HWRM_CMD_TIMEOUT; + timeout = bp->hwrm_cmd_timeout; if (bp->flags & BNXT_FLAG_SHORT_CMD || msg_len > bp->max_req_len) { @@ -180,11 +172,11 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, } /* - * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the + * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the * spinlock, and does initial processing. * * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It - * releases the spinlock only if it returns. If the regular int return codes + * releases the spinlock only if it returns. If the regular int return codes * are not used by the function, HWRM_CHECK_RESULT() should not be used * directly, rather it should be copied and modified to suit the function. * @@ -192,6 +184,10 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, */ #define HWRM_PREP(req, type, kong) do { \ rte_spinlock_lock(&bp->hwrm_lock); \ + if (bp->hwrm_cmd_resp_addr == NULL) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return -EACCES; \ + } \ memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \ req.req_type = rte_cpu_to_le_16(HWRM_##type); \ req.cmpl_ring = rte_cpu_to_le_16(-1); \ @@ -313,8 +309,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (vlan_table) { if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; - req.vlan_tag_tbl_addr = rte_cpu_to_le_64( - rte_mem_virt2iova(vlan_table)); + req.vlan_tag_tbl_addr = + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); } req.mask = rte_cpu_to_le_32(mask); @@ -355,7 +351,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, req.fid = rte_cpu_to_le_16(fid); req.vlan_tag_mask_tbl_addr = - rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table)); + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -367,10 +363,11 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, } int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, - struct bnxt_filter_info *filter) + struct bnxt_filter_info *filter) { int rc = 0; struct bnxt_filter_info *l2_filter = filter; + struct bnxt_vnic_info *vnic = NULL; struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; @@ -383,6 +380,9 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n", filter, l2_filter, l2_filter->l2_ref_cnt); + if (l2_filter->l2_ref_cnt == 0) + return 0; + if (l2_filter->l2_ref_cnt > 0) l2_filter->l2_ref_cnt--; @@ -399,6 +399,14 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, HWRM_UNLOCK(); filter->fw_l2_filter_id = UINT64_MAX; + if (l2_filter->l2_ref_cnt == 0) { + vnic = l2_filter->vnic; + if (vnic) { + STAILQ_REMOVE(&vnic->filter, l2_filter, + bnxt_filter_info, next); + bnxt_free_filter(bp, l2_filter); + } + } return 0; } @@ -477,8 +485,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); HWRM_UNLOCK(); + filter->l2_ref_cnt++; + return rc; } @@ -522,7 +533,6 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; -/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */ if (ptp) return 0; @@ -666,16 +676,15 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED; if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) { - bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY; + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n"); - } else { - bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY; } if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD) - bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; - else - bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; HWRM_UNLOCK(); @@ -697,7 +706,12 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_NEW_RM; } - return rc; + /* On older FW, + * bnxt_hwrm_func_resc_qcaps can fail and cause init failure. + * But the error can be ignored. Return success. + */ + + return 0; } /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */ @@ -756,8 +770,9 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if (bp->flags & BNXT_FLAG_REGISTERED) return 0; - flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; - if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT; /* PFs and trusted VFs should indicate the support of the @@ -797,12 +812,15 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE | ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE | ASYNC_CMPL_EVENT_ID_RESET_NOTIFY); - if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) req.async_event_fwd[0] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY); req.async_event_fwd[1] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD | ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE); + if (BNXT_PF(bp)) + req.async_event_fwd[1] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -810,7 +828,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) flags = rte_le_to_cpu_32(resp->flags); if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED) - bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE; + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; HWRM_UNLOCK(); @@ -851,9 +869,7 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings); req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings * AGG_RING_MULTIPLIER); - req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + - bp->tx_nr_rings + - BNXT_NUM_ASYNC_CPR(bp)); + req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings); req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings + BNXT_NUM_ASYNC_CPR(bp)); @@ -866,6 +882,10 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX); req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX); req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC); + } else if (bp->vf_resv_strategy == + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) { + enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS; + req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); } if (test) @@ -904,7 +924,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + HWRM_CHECK_RESULT_SILENT(); if (BNXT_VF(bp)) { bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); @@ -932,7 +952,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) return rc; } -int bnxt_hwrm_ver_get(struct bnxt *bp) +int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) { int rc = 0; struct hwrm_ver_get_input req = {.req_type = 0 }; @@ -943,6 +963,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) uint32_t dev_caps_cfg; bp->max_req_len = HWRM_MAX_REQ_LEN; + bp->hwrm_cmd_timeout = timeout; HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; @@ -972,6 +993,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) fw_version |= resp->hwrm_intf_upd_8b; bp->hwrm_spec_code = fw_version; + /* def_req_timeout value is in milliseconds */ + bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout); + /* convert timeout to usec */ + bp->hwrm_cmd_timeout *= 1000; + if (!bp->hwrm_cmd_timeout) + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) { PMD_DRV_LOG(ERR, "Unsupported firmware API version\n"); rc = -EINVAL; @@ -1002,9 +1030,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } - rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "Unable to map response buffer to physical memory.\n"); @@ -1039,9 +1066,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } - rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = - rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); + rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr); if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) { rte_free(bp->hwrm_short_cmd_req_addr); PMD_DRV_LOG(ERR, @@ -1201,6 +1227,35 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, return rc; } +static bool bnxt_find_lossy_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile == + HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + return true; + } + } + return false; +} + +static void bnxt_find_first_valid_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN && + bp->tx_cos_queue[i].id != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + break; + } + } +} + int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) { int rc = 0; @@ -1213,8 +1268,9 @@ get_rx_info: HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(dir); - /* HWRM Version >= 1.9.1 */ - if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1) + /* HWRM Version >= 1.9.1 only if COS Classification is not required. */ + if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 && + !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) req.drv_qmap_cap = HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1259,14 +1315,13 @@ get_rx_info: bp->tx_cos_queue[i].id; } } else { - for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { - if (bp->tx_cos_queue[i].profile == - HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { - bp->tx_cosq_id[0] = - bp->tx_cos_queue[i].id; - break; - } - } + /* When CoS classification is disabled, for normal NIC + * operations, ideally we should look to use LOSSY. + * If not found, fallback to the first valid profile + */ + if (!bnxt_find_lossy_profile(bp)) + bnxt_find_first_valid_profile(bp); + } } @@ -1481,8 +1536,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) HWRM_CHECK_RESULT(); - bp->grp_info[idx].fw_grp_id = - rte_le_to_cpu_16(resp->ring_group_id); + bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id); HWRM_UNLOCK(); @@ -1540,8 +1594,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, req.update_period_ms = rte_cpu_to_le_32(0); - req.stats_dma_addr = - rte_cpu_to_le_64(cpr->hw_stats_map); + req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1594,8 +1647,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; skip_ring_grps: - vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; + vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu); HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB); if (vnic->func_default) @@ -1694,9 +1746,29 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB); if (BNXT_CHIP_THOR(bp)) { - struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0]; - struct bnxt_rx_ring_info *rxr = rxq->rx_ring; - struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + int dflt_rxq = vnic->start_grp_id; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_queue *rxq; + int i; + + /* + * The first active receive ring is used as the VNIC + * default receive ring. If there are no active receive + * rings (all corresponding receive queues are stopped), + * the first receive ring is used. + */ + for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) { + rxq = bp->eth_dev->data->rx_queues[i]; + if (rxq->rx_started) { + dflt_rxq = i; + break; + } + } + + rxq = bp->eth_dev->data->rx_queues[dflt_rxq]; + rxr = rxq->rx_ring; + cpr = rxq->cp_ring; req.default_rx_ring_id = rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id); @@ -2161,10 +2233,6 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) return rc; } -/* - * HWRM utility functions - */ - int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp) { unsigned int i; @@ -2190,7 +2258,8 @@ int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp) return 0; } -int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) +static int +bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) { int rc; unsigned int i; @@ -2241,7 +2310,8 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) return rc; } -int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) +static int +bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) { uint16_t idx; uint32_t rc = 0; @@ -2332,7 +2402,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; } -int bnxt_free_all_hwrm_rings(struct bnxt *bp) +static int +bnxt_free_all_hwrm_rings(struct bnxt *bp) { unsigned int i; @@ -2383,6 +2454,10 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) return rc; } +/* + * HWRM utility functions + */ + void bnxt_free_hwrm_resources(struct bnxt *bp) { /* Release memzone */ @@ -2403,11 +2478,10 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) pdev->addr.bus, pdev->addr.devid, pdev->addr.function); bp->max_resp_len = HWRM_MAX_RESP_LEN; bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); - rte_mem_lock_page(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -2418,7 +2492,8 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) return 0; } -int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) +static int +bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) { struct bnxt_filter_info *filter; int rc = 0; @@ -2428,12 +2503,9 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else - rc = bnxt_hwrm_clear_l2_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); bnxt_free_filter(bp, filter); - //if (rc) - //break; } return rc; } @@ -2453,13 +2525,10 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else - rc = bnxt_hwrm_clear_l2_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); rte_free(flow); - //if (rc) - //break; } return rc; } @@ -2470,25 +2539,23 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; STAILQ_FOREACH(filter, &vnic->filter, next) { - if (filter->filter_type == HWRM_CFA_EM_FILTER) { + if (filter->filter_type == HWRM_CFA_EM_FILTER) rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); - } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); - } else { + else rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); - if (!rc) - filter->dflt = 1; - } if (rc) break; } return rc; } -void bnxt_free_tunnel_ports(struct bnxt *bp) +static void +bnxt_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id, @@ -2514,7 +2581,6 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) for (i = bp->max_vnics - 1; i >= 0; i--) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - // If the VNIC ID is invalid we are not currently using the VNIC if (vnic->fw_vnic_id == INVALID_HW_RING_ID) continue; @@ -2875,27 +2941,30 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST)) bp->flags |= BNXT_FLAG_MULTI_HOST; - if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { + if (BNXT_VF(bp) && + !BNXT_VF_IS_TRUSTED(bp) && + (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { bp->flags |= BNXT_FLAG_TRUSTED_VF_EN; PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n"); } else if (BNXT_VF(bp) && + BNXT_VF_IS_TRUSTED(bp) && !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN; PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n"); } if (mtu) - *mtu = resp->mtu; + *mtu = rte_le_to_cpu_16(resp->mtu); switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0: /* FALLTHROUGH */ - bp->port_partition_type = resp->port_partition_type; + bp->flags |= BNXT_FLAG_NPAR_PF; break; default: - bp->port_partition_type = 0; + bp->flags &= ~BNXT_FLAG_NPAR_PF; break; } @@ -2957,9 +3026,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); - req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * - BNXT_NUM_VLANS); + req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings); @@ -2998,9 +3065,7 @@ static void populate_vf_func_cfg_req(struct bnxt *bp, req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * BNXT_NUM_VLANS); - req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * - BNXT_NUM_VLANS); + req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1)); req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1)); @@ -3035,9 +3100,9 @@ static void add_random_mac_if_needed(struct bnxt *bp, } } -static void reserve_resources_from_vf(struct bnxt *bp, - struct hwrm_func_cfg_input *cfg_req, - int vf) +static int reserve_resources_from_vf(struct bnxt *bp, + struct hwrm_func_cfg_input *cfg_req, + int vf) { struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; @@ -3071,6 +3136,8 @@ static void reserve_resources_from_vf(struct bnxt *bp, bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps); HWRM_UNLOCK(); + + return 0; } int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) @@ -3364,7 +3431,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr0 = - rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); + rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf)); if (req.req_buf_page_addr0 == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); @@ -3627,7 +3694,6 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); } - HWRM_UNLOCK(); return rc; @@ -3795,10 +3861,9 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) buflen = dir_entries * entry_length; buf = rte_malloc("nvm_dir", buflen, 0); - rte_mem_lock_page(buf); if (buf == NULL) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -3829,11 +3894,10 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr; buf = rte_malloc("nvm_item", length, 0); - rte_mem_lock_page(buf); if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -3883,11 +3947,10 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, uint8_t *buf; buf = rte_malloc("nvm_write", data_len, 0); - rte_mem_lock_page(buf); if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -3950,7 +4013,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); - req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids)); if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) { HWRM_UNLOCK(); @@ -4175,7 +4238,6 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) if (filter->fw_em_filter_id == UINT64_MAX) return 0; - PMD_DRV_LOG(ERR, "Clear EM filter\n"); HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp)); req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id); @@ -4212,7 +4274,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID; req.dst_id = rte_cpu_to_le_16(dst_id); - if (filter->ip_addr_type) { req.ip_addr_type = filter->ip_addr_type; enables |= @@ -4225,10 +4286,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR) memcpy(req.src_macaddr, filter->src_macaddr, RTE_ETHER_ADDR_LEN); - //if (enables & - //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR) - //memcpy(req.dst_macaddr, filter->dst_macaddr, - //RTE_ETHER_ADDR_LEN); if (enables & HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE) req.ethertype = rte_cpu_to_be_16(filter->ethertype); @@ -4272,6 +4329,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); HWRM_UNLOCK(); return rc; @@ -4348,8 +4406,10 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) } /* Return if no rings are active. */ - if (cnt == max_rings) + if (cnt == max_rings) { + HWRM_UNLOCK(); return 0; + } /* Add rx/cp ring pair to RSS table. */ rxr = rxqs[k]->rx_ring; @@ -4830,7 +4890,7 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up) uint32_t flags; int rc; - if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE)) + if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) return 0; /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery. @@ -4852,6 +4912,9 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up) flags = rte_le_to_cpu_32(resp->flags); HWRM_UNLOCK(); + if (!up) + return 0; + if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) { PMD_DRV_LOG(INFO, "FW reset happened while port was down\n"); bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; @@ -4870,7 +4933,7 @@ int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) int rc; /* Older FW does not have error recovery support */ - if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)) + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) return 0; if (!info) {