net/bnxt: fix bumping of L2 filter reference count
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index 018113c..460cc48 100644 (file)
@@ -309,8 +309,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
        if (vlan_table) {
                if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
                        mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
-               req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
-                        rte_mem_virt2iova(vlan_table));
+               req.vlan_tag_tbl_addr =
+                       rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
                req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
        }
        req.mask = rte_cpu_to_le_32(mask);
@@ -351,7 +351,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
        req.fid = rte_cpu_to_le_16(fid);
 
        req.vlan_tag_mask_tbl_addr =
-               rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
+               rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
        req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -363,10 +363,11 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
 }
 
 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
-                          struct bnxt_filter_info *filter)
+                            struct bnxt_filter_info *filter)
 {
        int rc = 0;
        struct bnxt_filter_info *l2_filter = filter;
+       struct bnxt_vnic_info *vnic = NULL;
        struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
        struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
 
@@ -379,6 +380,9 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
        PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
                    filter, l2_filter, l2_filter->l2_ref_cnt);
 
+       if (l2_filter->l2_ref_cnt == 0)
+               return 0;
+
        if (l2_filter->l2_ref_cnt > 0)
                l2_filter->l2_ref_cnt--;
 
@@ -395,6 +399,14 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
        HWRM_UNLOCK();
 
        filter->fw_l2_filter_id = UINT64_MAX;
+       if (l2_filter->l2_ref_cnt == 0) {
+               vnic = l2_filter->vnic;
+               if (vnic) {
+                       STAILQ_REMOVE(&vnic->filter, l2_filter,
+                                     bnxt_filter_info, next);
+                       bnxt_free_filter(bp, l2_filter);
+               }
+       }
 
        return 0;
 }
@@ -473,8 +485,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
        HWRM_CHECK_RESULT();
 
        filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
+       filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
        HWRM_UNLOCK();
 
+       filter->l2_ref_cnt++;
+
        return rc;
 }
 
@@ -661,16 +676,15 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
 
        if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
-               bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
+               bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
                PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
-       } else {
-               bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
        }
 
        if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
-               bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
-       else
-               bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
+               bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
+
+       if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
+               bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
 
        HWRM_UNLOCK();
 
@@ -692,7 +706,12 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
                        bp->flags |= BNXT_FLAG_NEW_RM;
        }
 
-       return rc;
+       /* On older FW,
+        * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
+        * But the error can be ignored. Return success.
+        */
+
+       return 0;
 }
 
 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
@@ -751,8 +770,9 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
        if (bp->flags & BNXT_FLAG_REGISTERED)
                return 0;
 
-       flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
-       if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+       if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+               flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
+       if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
                flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
 
        /* PFs and trusted VFs should indicate the support of the
@@ -792,7 +812,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
                                 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
                                 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
                                 ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
-       if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+       if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
                req.async_event_fwd[0] |=
                        rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
        req.async_event_fwd[1] |=
@@ -805,7 +825,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 
        flags = rte_le_to_cpu_32(resp->flags);
        if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
-               bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
+               bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
 
        HWRM_UNLOCK();
 
@@ -859,6 +879,10 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
                req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
                req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
                req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
+       } else if (bp->vf_resv_strategy ==
+                  HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
+               enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
+               req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
        }
 
        if (test)
@@ -897,7 +921,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
-       HWRM_CHECK_RESULT();
+       HWRM_CHECK_RESULT_SILENT();
 
        if (BNXT_VF(bp)) {
                bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
@@ -1002,9 +1026,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                        rc = -ENOMEM;
                        goto error;
                }
-               rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
                bp->hwrm_cmd_resp_dma_addr =
-                       rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+                       rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
                if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
                        PMD_DRV_LOG(ERR,
                        "Unable to map response buffer to physical memory.\n");
@@ -1039,9 +1062,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                        rc = -ENOMEM;
                        goto error;
                }
-               rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
                bp->hwrm_short_cmd_req_dma_addr =
-                       rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
+                       rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
                if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
                        rte_free(bp->hwrm_short_cmd_req_addr);
                        PMD_DRV_LOG(ERR,
@@ -1201,6 +1223,35 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
        return rc;
 }
 
+static bool bnxt_find_lossy_profile(struct bnxt *bp)
+{
+       int i = 0;
+
+       for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
+               if (bp->tx_cos_queue[i].profile ==
+                   HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
+                       bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
+                       return true;
+               }
+       }
+       return false;
+}
+
+static void bnxt_find_first_valid_profile(struct bnxt *bp)
+{
+       int i = 0;
+
+       for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
+               if (bp->tx_cos_queue[i].profile !=
+                   HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
+                   bp->tx_cos_queue[i].id !=
+                   HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
+                       bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
+                       break;
+               }
+       }
+}
+
 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 {
        int rc = 0;
@@ -1213,8 +1264,9 @@ get_rx_info:
        HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
 
        req.flags = rte_cpu_to_le_32(dir);
-       /* HWRM Version >= 1.9.1 */
-       if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
+       /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
+       if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
+           !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
                req.drv_qmap_cap =
                        HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -1259,14 +1311,13 @@ get_rx_info:
                                                bp->tx_cos_queue[i].id;
                        }
                } else {
-                       for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
-                               if (bp->tx_cos_queue[i].profile ==
-                                       HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
-                                       bp->tx_cosq_id[0] =
-                                               bp->tx_cos_queue[i].id;
-                                       break;
-                               }
-                       }
+                       /* When CoS classification is disabled, for normal NIC
+                        * operations, ideally we should look to use LOSSY.
+                        * If not found, fallback to the first valid profile
+                        */
+                       if (!bnxt_find_lossy_profile(bp))
+                               bnxt_find_first_valid_profile(bp);
+
                }
        }
 
@@ -1691,10 +1742,29 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
 
        if (BNXT_CHIP_THOR(bp)) {
-               struct bnxt_rx_queue *rxq =
-                       bp->eth_dev->data->rx_queues[vnic->start_grp_id];
-               struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
-               struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+               int dflt_rxq = vnic->start_grp_id;
+               struct bnxt_rx_ring_info *rxr;
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_rx_queue *rxq;
+               int i;
+
+               /*
+                * The first active receive ring is used as the VNIC
+                * default receive ring. If there are no active receive
+                * rings (all corresponding receive queues are stopped),
+                * the first receive ring is used.
+                */
+               for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
+                       rxq = bp->eth_dev->data->rx_queues[i];
+                       if (rxq->rx_started) {
+                               dflt_rxq = i;
+                               break;
+                       }
+               }
+
+               rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
+               rxr = rxq->rx_ring;
+               cpr = rxq->cp_ring;
 
                req.default_rx_ring_id =
                        rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
@@ -2401,11 +2471,10 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
                pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
        bp->max_resp_len = HWRM_MAX_RESP_LEN;
        bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
-       rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_addr == NULL)
                return -ENOMEM;
        bp->hwrm_cmd_resp_dma_addr =
-               rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+               rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
@@ -2426,8 +2495,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                        rc = bnxt_hwrm_clear_em_filter(bp, filter);
                else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
                        rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-               else
-                       rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+               rc = bnxt_hwrm_clear_l2_filter(bp, filter);
                STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
                bnxt_free_filter(bp, filter);
        }
@@ -2449,8 +2517,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                        rc = bnxt_hwrm_clear_em_filter(bp, filter);
                else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
                        rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-               else
-                       rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+               rc = bnxt_hwrm_clear_l2_filter(bp, filter);
 
                STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
                rte_free(flow);
@@ -2464,18 +2531,15 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        int rc = 0;
 
        STAILQ_FOREACH(filter, &vnic->filter, next) {
-               if (filter->filter_type == HWRM_CFA_EM_FILTER) {
+               if (filter->filter_type == HWRM_CFA_EM_FILTER)
                        rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
                                                     filter);
-               } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
+               else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
                        rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
                                                         filter);
-               } else {
+               else
                        rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
                                                     filter);
-                       if (!rc)
-                               filter->dflt = 1;
-               }
                if (rc)
                        break;
        }
@@ -3356,7 +3420,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
                         page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
        req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
        req.req_buf_page_addr0 =
-               rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
+               rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf));
        if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map buffer address to physical memory\n");
@@ -3786,10 +3850,9 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
 
        buflen = dir_entries * entry_length;
        buf = rte_malloc("nvm_dir", buflen, 0);
-       rte_mem_lock_page(buf);
        if (buf == NULL)
                return -ENOMEM;
-       dma_handle = rte_mem_virt2iova(buf);
+       dma_handle = rte_malloc_virt2iova(buf);
        if (dma_handle == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
@@ -3820,11 +3883,10 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
        struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
 
        buf = rte_malloc("nvm_item", length, 0);
-       rte_mem_lock_page(buf);
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2iova(buf);
+       dma_handle = rte_malloc_virt2iova(buf);
        if (dma_handle == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
@@ -3874,11 +3936,10 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
        uint8_t *buf;
 
        buf = rte_malloc("nvm_write", data_len, 0);
-       rte_mem_lock_page(buf);
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2iova(buf);
+       dma_handle = rte_malloc_virt2iova(buf);
        if (dma_handle == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
@@ -3941,7 +4002,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
 
        req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
        req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
-       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
+       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
 
        if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
                HWRM_UNLOCK();
@@ -4166,7 +4227,6 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
        if (filter->fw_em_filter_id == UINT64_MAX)
                return 0;
 
-       PMD_DRV_LOG(ERR, "Clear EM filter\n");
        HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
 
        req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
@@ -4258,6 +4318,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
        HWRM_CHECK_RESULT();
 
        filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
+       filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
        HWRM_UNLOCK();
 
        return rc;
@@ -4334,8 +4395,10 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                        }
 
                        /* Return if no rings are active. */
-                       if (cnt == max_rings)
+                       if (cnt == max_rings) {
+                               HWRM_UNLOCK();
                                return 0;
+                       }
 
                        /* Add rx/cp ring pair to RSS table. */
                        rxr = rxqs[k]->rx_ring;
@@ -4816,7 +4879,7 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
        uint32_t flags;
        int rc;
 
-       if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
+       if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
                return 0;
 
        /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
@@ -4859,7 +4922,7 @@ int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
        int rc;
 
        /* Older FW does not have error recovery support */
-       if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
+       if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
                return 0;
 
        if (!info) {