net/ice/base: add hook to send AdminQ command
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index bee4c15..a9c9c72 100644 (file)
@@ -100,11 +100,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
        if (bp->flags & BNXT_FLAG_FATAL_ERROR)
                return 0;
 
-       /* For VER_GET command, set timeout as 50ms */
-       if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
-               timeout = HWRM_CMD_TIMEOUT;
-       else
-               timeout = bp->hwrm_cmd_timeout;
+       timeout = bp->hwrm_cmd_timeout;
 
        if (bp->flags & BNXT_FLAG_SHORT_CMD ||
            msg_len > bp->max_req_len) {
@@ -188,6 +184,10 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
  */
 #define HWRM_PREP(req, type, kong) do { \
        rte_spinlock_lock(&bp->hwrm_lock); \
+       if (bp->hwrm_cmd_resp_addr == NULL) { \
+               rte_spinlock_unlock(&bp->hwrm_lock); \
+               return -EACCES; \
+       } \
        memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
        req.req_type = rte_cpu_to_le_16(HWRM_##type); \
        req.cmpl_ring = rte_cpu_to_le_16(-1); \
@@ -309,8 +309,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
        if (vlan_table) {
                if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
                        mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
-               req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
-                        rte_mem_virt2iova(vlan_table));
+               req.vlan_tag_tbl_addr =
+                       rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
                req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
        }
        req.mask = rte_cpu_to_le_32(mask);
@@ -351,7 +351,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
        req.fid = rte_cpu_to_le_16(fid);
 
        req.vlan_tag_mask_tbl_addr =
-               rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
+               rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
        req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -363,10 +363,11 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
 }
 
 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
-                          struct bnxt_filter_info *filter)
+                            struct bnxt_filter_info *filter)
 {
        int rc = 0;
        struct bnxt_filter_info *l2_filter = filter;
+       struct bnxt_vnic_info *vnic = NULL;
        struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
        struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
 
@@ -379,6 +380,9 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
        PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
                    filter, l2_filter, l2_filter->l2_ref_cnt);
 
+       if (l2_filter->l2_ref_cnt == 0)
+               return 0;
+
        if (l2_filter->l2_ref_cnt > 0)
                l2_filter->l2_ref_cnt--;
 
@@ -395,6 +399,14 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
        HWRM_UNLOCK();
 
        filter->fw_l2_filter_id = UINT64_MAX;
+       if (l2_filter->l2_ref_cnt == 0) {
+               vnic = l2_filter->vnic;
+               if (vnic) {
+                       STAILQ_REMOVE(&vnic->filter, l2_filter,
+                                     bnxt_filter_info, next);
+                       bnxt_free_filter(bp, l2_filter);
+               }
+       }
 
        return 0;
 }
@@ -473,8 +485,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
        HWRM_CHECK_RESULT();
 
        filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
+       filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
        HWRM_UNLOCK();
 
+       filter->l2_ref_cnt++;
+
        return rc;
 }
 
@@ -803,6 +818,9 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
        req.async_event_fwd[1] |=
                rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
                                 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
+       if (BNXT_PF(bp))
+               req.async_event_fwd[1] |=
+                       rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -934,7 +952,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
        return rc;
 }
 
-int bnxt_hwrm_ver_get(struct bnxt *bp)
+int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
 {
        int rc = 0;
        struct hwrm_ver_get_input req = {.req_type = 0 };
@@ -945,6 +963,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
        uint32_t dev_caps_cfg;
 
        bp->max_req_len = HWRM_MAX_REQ_LEN;
+       bp->hwrm_cmd_timeout = timeout;
        HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
 
        req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
@@ -979,7 +998,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
        /* convert timeout to usec */
        bp->hwrm_cmd_timeout *= 1000;
        if (!bp->hwrm_cmd_timeout)
-               bp->hwrm_cmd_timeout = HWRM_CMD_TIMEOUT;
+               bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
 
        if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
                PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
@@ -1011,9 +1030,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                        rc = -ENOMEM;
                        goto error;
                }
-               rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
                bp->hwrm_cmd_resp_dma_addr =
-                       rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+                       rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
                if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
                        PMD_DRV_LOG(ERR,
                        "Unable to map response buffer to physical memory.\n");
@@ -1048,9 +1066,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                        rc = -ENOMEM;
                        goto error;
                }
-               rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
                bp->hwrm_short_cmd_req_dma_addr =
-                       rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
+                       rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
                if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
                        rte_free(bp->hwrm_short_cmd_req_addr);
                        PMD_DRV_LOG(ERR,
@@ -2241,7 +2258,8 @@ int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
        return 0;
 }
 
-int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
+static int
+bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
 {
        int rc;
        unsigned int i;
@@ -2292,7 +2310,8 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
        return rc;
 }
 
-int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
+static int
+bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
 {
        uint16_t idx;
        uint32_t rc = 0;
@@ -2383,7 +2402,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
                bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
 }
 
-int bnxt_free_all_hwrm_rings(struct bnxt *bp)
+static int
+bnxt_free_all_hwrm_rings(struct bnxt *bp)
 {
        unsigned int i;
 
@@ -2458,11 +2478,10 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
                pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
        bp->max_resp_len = HWRM_MAX_RESP_LEN;
        bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
-       rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_addr == NULL)
                return -ENOMEM;
        bp->hwrm_cmd_resp_dma_addr =
-               rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+               rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
@@ -2473,7 +2492,8 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
        return 0;
 }
 
-int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static int
+bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 {
        struct bnxt_filter_info *filter;
        int rc = 0;
@@ -2483,8 +2503,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                        rc = bnxt_hwrm_clear_em_filter(bp, filter);
                else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
                        rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-               else
-                       rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+               rc = bnxt_hwrm_clear_l2_filter(bp, filter);
                STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
                bnxt_free_filter(bp, filter);
        }
@@ -2506,8 +2525,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                        rc = bnxt_hwrm_clear_em_filter(bp, filter);
                else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
                        rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-               else
-                       rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+               rc = bnxt_hwrm_clear_l2_filter(bp, filter);
 
                STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
                rte_free(flow);
@@ -2536,7 +2554,8 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        return rc;
 }
 
-void bnxt_free_tunnel_ports(struct bnxt *bp)
+static void
+bnxt_free_tunnel_ports(struct bnxt *bp)
 {
        if (bp->vxlan_port_cnt)
                bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
@@ -2942,10 +2961,10 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
        case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
        case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
                /* FALLTHROUGH */
-               bp->port_partition_type = resp->port_partition_type;
+               bp->flags |= BNXT_FLAG_NPAR_PF;
                break;
        default:
-               bp->port_partition_type = 0;
+               bp->flags &= ~BNXT_FLAG_NPAR_PF;
                break;
        }
 
@@ -3081,9 +3100,9 @@ static void add_random_mac_if_needed(struct bnxt *bp,
        }
 }
 
-static void reserve_resources_from_vf(struct bnxt *bp,
-                                     struct hwrm_func_cfg_input *cfg_req,
-                                     int vf)
+static int reserve_resources_from_vf(struct bnxt *bp,
+                                    struct hwrm_func_cfg_input *cfg_req,
+                                    int vf)
 {
        struct hwrm_func_qcaps_input req = {0};
        struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
@@ -3117,6 +3136,8 @@ static void reserve_resources_from_vf(struct bnxt *bp,
        bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
 
        HWRM_UNLOCK();
+
+       return 0;
 }
 
 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
@@ -3410,7 +3431,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
                         page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
        req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
        req.req_buf_page_addr0 =
-               rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
+               rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf));
        if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map buffer address to physical memory\n");
@@ -3840,10 +3861,9 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
 
        buflen = dir_entries * entry_length;
        buf = rte_malloc("nvm_dir", buflen, 0);
-       rte_mem_lock_page(buf);
        if (buf == NULL)
                return -ENOMEM;
-       dma_handle = rte_mem_virt2iova(buf);
+       dma_handle = rte_malloc_virt2iova(buf);
        if (dma_handle == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
@@ -3874,11 +3894,10 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
        struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
 
        buf = rte_malloc("nvm_item", length, 0);
-       rte_mem_lock_page(buf);
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2iova(buf);
+       dma_handle = rte_malloc_virt2iova(buf);
        if (dma_handle == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
@@ -3928,11 +3947,10 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
        uint8_t *buf;
 
        buf = rte_malloc("nvm_write", data_len, 0);
-       rte_mem_lock_page(buf);
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2iova(buf);
+       dma_handle = rte_malloc_virt2iova(buf);
        if (dma_handle == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
@@ -3995,7 +4013,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
 
        req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
        req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
-       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
+       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
 
        if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
                HWRM_UNLOCK();
@@ -4311,6 +4329,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
        HWRM_CHECK_RESULT();
 
        filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
+       filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
        HWRM_UNLOCK();
 
        return rc;