X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=a9c9c7297cab3cab94a354d78c1065afc3fb66f4;hb=a6e7cd81fa41a9938d4d056e0b6c18adc0b67546;hp=1d3732eee73d3808c94e374040d744f07e3160e1;hpb=89a0deb866dc42ead92b79e6e7159622e1ab8490;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 1d3732eee7..a9c9c7297c 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -100,11 +100,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, if (bp->flags & BNXT_FLAG_FATAL_ERROR) return 0; - /* For VER_GET command, set timeout as 50ms */ - if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET) - timeout = HWRM_CMD_TIMEOUT; - else - timeout = bp->hwrm_cmd_timeout; + timeout = bp->hwrm_cmd_timeout; if (bp->flags & BNXT_FLAG_SHORT_CMD || msg_len > bp->max_req_len) { @@ -188,6 +184,10 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, */ #define HWRM_PREP(req, type, kong) do { \ rte_spinlock_lock(&bp->hwrm_lock); \ + if (bp->hwrm_cmd_resp_addr == NULL) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return -EACCES; \ + } \ memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \ req.req_type = rte_cpu_to_le_16(HWRM_##type); \ req.cmpl_ring = rte_cpu_to_le_16(-1); \ @@ -309,8 +309,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (vlan_table) { if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; - req.vlan_tag_tbl_addr = rte_cpu_to_le_64( - rte_mem_virt2iova(vlan_table)); + req.vlan_tag_tbl_addr = + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); } req.mask = rte_cpu_to_le_32(mask); @@ -351,7 +351,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, req.fid = rte_cpu_to_le_16(fid); req.vlan_tag_mask_tbl_addr = - rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table)); + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -363,10 +363,11 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, } int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, - struct bnxt_filter_info *filter) + struct bnxt_filter_info *filter) { int rc = 0; struct bnxt_filter_info *l2_filter = filter; + struct bnxt_vnic_info *vnic = NULL; struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; @@ -379,6 +380,9 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n", filter, l2_filter, l2_filter->l2_ref_cnt); + if (l2_filter->l2_ref_cnt == 0) + return 0; + if (l2_filter->l2_ref_cnt > 0) l2_filter->l2_ref_cnt--; @@ -395,6 +399,14 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, HWRM_UNLOCK(); filter->fw_l2_filter_id = UINT64_MAX; + if (l2_filter->l2_ref_cnt == 0) { + vnic = l2_filter->vnic; + if (vnic) { + STAILQ_REMOVE(&vnic->filter, l2_filter, + bnxt_filter_info, next); + bnxt_free_filter(bp, l2_filter); + } + } return 0; } @@ -473,8 +485,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); HWRM_UNLOCK(); + filter->l2_ref_cnt++; + return rc; } @@ -661,16 +676,15 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED; if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) { - bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY; + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n"); - } else { - bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY; } if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD) - bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; - else - bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; HWRM_UNLOCK(); @@ -756,8 +770,9 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if (bp->flags & BNXT_FLAG_REGISTERED) return 0; - flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; - if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT; /* PFs and trusted VFs should indicate the support of the @@ -797,12 +812,15 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE | ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE | ASYNC_CMPL_EVENT_ID_RESET_NOTIFY); - if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) req.async_event_fwd[0] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY); req.async_event_fwd[1] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD | ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE); + if (BNXT_PF(bp)) + req.async_event_fwd[1] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -810,7 +828,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) flags = rte_le_to_cpu_32(resp->flags); if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED) - bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE; + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; HWRM_UNLOCK(); @@ -934,7 +952,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) return rc; } -int bnxt_hwrm_ver_get(struct bnxt *bp) +int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) { int rc = 0; struct hwrm_ver_get_input req = {.req_type = 0 }; @@ -945,6 +963,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) uint32_t dev_caps_cfg; bp->max_req_len = HWRM_MAX_REQ_LEN; + bp->hwrm_cmd_timeout = timeout; HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; @@ -979,7 +998,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) /* convert timeout to usec */ bp->hwrm_cmd_timeout *= 1000; if (!bp->hwrm_cmd_timeout) - bp->hwrm_cmd_timeout = HWRM_CMD_TIMEOUT; + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) { PMD_DRV_LOG(ERR, "Unsupported firmware API version\n"); @@ -1011,9 +1030,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } - rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "Unable to map response buffer to physical memory.\n"); @@ -1048,9 +1066,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } - rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = - rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); + rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr); if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) { rte_free(bp->hwrm_short_cmd_req_addr); PMD_DRV_LOG(ERR, @@ -1210,6 +1227,35 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, return rc; } +static bool bnxt_find_lossy_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile == + HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + return true; + } + } + return false; +} + +static void bnxt_find_first_valid_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN && + bp->tx_cos_queue[i].id != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + break; + } + } +} + int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) { int rc = 0; @@ -1269,14 +1315,13 @@ get_rx_info: bp->tx_cos_queue[i].id; } } else { - for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { - if (bp->tx_cos_queue[i].profile == - HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { - bp->tx_cosq_id[0] = - bp->tx_cos_queue[i].id; - break; - } - } + /* When CoS classification is disabled, for normal NIC + * operations, ideally we should look to use LOSSY. + * If not found, fallback to the first valid profile + */ + if (!bnxt_find_lossy_profile(bp)) + bnxt_find_first_valid_profile(bp); + } } @@ -2213,7 +2258,8 @@ int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp) return 0; } -int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) +static int +bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) { int rc; unsigned int i; @@ -2264,7 +2310,8 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) return rc; } -int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) +static int +bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) { uint16_t idx; uint32_t rc = 0; @@ -2355,7 +2402,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; } -int bnxt_free_all_hwrm_rings(struct bnxt *bp) +static int +bnxt_free_all_hwrm_rings(struct bnxt *bp) { unsigned int i; @@ -2430,11 +2478,10 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) pdev->addr.bus, pdev->addr.devid, pdev->addr.function); bp->max_resp_len = HWRM_MAX_RESP_LEN; bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); - rte_mem_lock_page(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -2445,7 +2492,8 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) return 0; } -int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) +static int +bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) { struct bnxt_filter_info *filter; int rc = 0; @@ -2455,8 +2503,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else - rc = bnxt_hwrm_clear_l2_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); bnxt_free_filter(bp, filter); } @@ -2478,8 +2525,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else - rc = bnxt_hwrm_clear_l2_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); rte_free(flow); @@ -2508,7 +2554,8 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) return rc; } -void bnxt_free_tunnel_ports(struct bnxt *bp) +static void +bnxt_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id, @@ -2914,10 +2961,10 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0: /* FALLTHROUGH */ - bp->port_partition_type = resp->port_partition_type; + bp->flags |= BNXT_FLAG_NPAR_PF; break; default: - bp->port_partition_type = 0; + bp->flags &= ~BNXT_FLAG_NPAR_PF; break; } @@ -2978,8 +3025,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) } req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); - req.mtu = rte_cpu_to_le_16(RTE_MIN(bp->eth_dev->data->mtu, - BNXT_MAX_MTU)); //FW adds hdr sizes + req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); @@ -3054,9 +3100,9 @@ static void add_random_mac_if_needed(struct bnxt *bp, } } -static void reserve_resources_from_vf(struct bnxt *bp, - struct hwrm_func_cfg_input *cfg_req, - int vf) +static int reserve_resources_from_vf(struct bnxt *bp, + struct hwrm_func_cfg_input *cfg_req, + int vf) { struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; @@ -3090,6 +3136,8 @@ static void reserve_resources_from_vf(struct bnxt *bp, bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps); HWRM_UNLOCK(); + + return 0; } int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) @@ -3383,7 +3431,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr0 = - rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); + rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf)); if (req.req_buf_page_addr0 == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); @@ -3813,10 +3861,9 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) buflen = dir_entries * entry_length; buf = rte_malloc("nvm_dir", buflen, 0); - rte_mem_lock_page(buf); if (buf == NULL) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -3847,11 +3894,10 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr; buf = rte_malloc("nvm_item", length, 0); - rte_mem_lock_page(buf); if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -3901,11 +3947,10 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, uint8_t *buf; buf = rte_malloc("nvm_write", data_len, 0); - rte_mem_lock_page(buf); if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -3968,7 +4013,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); - req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids)); if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) { HWRM_UNLOCK(); @@ -4193,7 +4238,6 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) if (filter->fw_em_filter_id == UINT64_MAX) return 0; - PMD_DRV_LOG(ERR, "Clear EM filter\n"); HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp)); req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id); @@ -4285,6 +4329,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); HWRM_UNLOCK(); return rc; @@ -4845,7 +4890,7 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up) uint32_t flags; int rc; - if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE)) + if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) return 0; /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery. @@ -4888,7 +4933,7 @@ int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) int rc; /* Older FW does not have error recovery support */ - if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)) + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) return 0; if (!info) {