net/qede/base: changes for 100G
[dpdk.git] / drivers / net / qede / base / ecore_cxt.c
index 46455ea..5c3370e 100644 (file)
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
  * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
  */
 
 #include "bcm_osal.h"
 #define TM_ELEM_SIZE   4
 
 /* ILT constants */
-/* If for some reason, HW P size is modified to be less than 32K,
- * special handling needs to be made for CDU initialization
- */
-#define ILT_DEFAULT_HW_P_SIZE  3
+#define ILT_DEFAULT_HW_P_SIZE  4
 
 #define ILT_PAGE_IN_BYTES(hw_p_size)   (1U << ((hw_p_size) + 12))
 #define ILT_CFG_REG(cli, reg)          PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
@@ -59,8 +54,8 @@
 
 /* connection context union */
 union conn_context {
-       struct core_conn_context core_ctx;
-       struct eth_conn_context eth_ctx;
+       struct e4_core_conn_context core_ctx;
+       struct e4_eth_conn_context eth_ctx;
 };
 
 /* TYPE-0 task context - iSCSI, FCOE */
@@ -69,6 +64,7 @@ union type0_task_context {
 
 /* TYPE-1 task context - ROCE */
 union type1_task_context {
+       struct regpair reserved; /* @DPDK */
 };
 
 struct src_ent {
@@ -836,7 +832,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
                                 p_mngr->t2_num_pages *
                                 sizeof(struct ecore_dma_mem));
        if (!p_mngr->t2) {
-               DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
                rc = ECORE_NOMEM;
                goto t2_fail;
        }
@@ -921,6 +917,9 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
        struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
        u32 ilt_size, i;
 
+       if (p_mngr->ilt_shadow == OSAL_NULL)
+               return;
+
        ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
 
        for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
@@ -933,6 +932,7 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
                p_dma->p_virt = OSAL_NULL;
        }
        OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
+       p_mngr->ilt_shadow = OSAL_NULL;
 }
 
 static enum _ecore_status_t
@@ -1002,8 +1002,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
                                         size * sizeof(struct ecore_dma_mem));
 
        if (!p_mngr->ilt_shadow) {
-               DP_NOTICE(p_hwfn, true,
-                         "Failed to allocate ilt shadow table\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
                rc = ECORE_NOMEM;
                goto ilt_shadow_fail;
        }
@@ -1046,12 +1045,14 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
 
        for (type = 0; type < MAX_CONN_TYPES; type++) {
                OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
+               p_mngr->acquired[type].cid_map = OSAL_NULL;
                p_mngr->acquired[type].max_count = 0;
                p_mngr->acquired[type].start_cid = 0;
 
                for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
                        OSAL_FREE(p_hwfn->p_dev,
                                  p_mngr->acquired_vf[type][vf].cid_map);
+                       p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
                        p_mngr->acquired_vf[type][vf].max_count = 0;
                        p_mngr->acquired_vf[type][vf].start_cid = 0;
                }
@@ -1128,11 +1129,13 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
 
        p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
        if (!p_mngr) {
-               DP_NOTICE(p_hwfn, true,
-                         "Failed to allocate `struct ecore_cxt_mngr'\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
                return ECORE_NOMEM;
        }
 
+       /* Set the cxt mangr pointer prior to further allocations */
+       p_hwfn->p_cxt_mngr = p_mngr;
+
        /* Initialize ILT client registers */
        clients = p_mngr->clients;
        clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
@@ -1159,7 +1162,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
        clients[ILT_CLI_TSDM].last.reg  = ILT_CFG_REG(TSDM, LAST_ILT);
        clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
 
-       /* default ILT page size for all clients is 32K */
+       /* default ILT page size for all clients is 64K */
        for (i = 0; i < ILT_CLI_MAX; i++)
                p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
 
@@ -1174,13 +1177,13 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
 
        /* Initialize the dynamic ILT allocation mutex */
 #ifdef CONFIG_ECORE_LOCK_ALLOC
-       OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+       if (OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex)) {
+               DP_NOTICE(p_hwfn, false, "Failed to alloc p_mngr->mutex\n");
+               return ECORE_NOMEM;
+       }
 #endif
        OSAL_MUTEX_INIT(&p_mngr->mutex);
 
-       /* Set the cxt mangr pointer priori to further allocations */
-       p_hwfn->p_cxt_mngr = p_mngr;
-
        return ECORE_SUCCESS;
 }
 
@@ -1191,21 +1194,21 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
        /* Allocate the ILT shadow table */
        rc = ecore_ilt_shadow_alloc(p_hwfn);
        if (rc) {
-               DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
                goto tables_alloc_fail;
        }
 
        /* Allocate the T2  table */
        rc = ecore_cxt_src_t2_alloc(p_hwfn);
        if (rc) {
-               DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
                goto tables_alloc_fail;
        }
 
        /* Allocate and initialize the acquired cids bitmaps */
        rc = ecore_cid_map_alloc(p_hwfn);
        if (rc) {
-               DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
                goto tables_alloc_fail;
        }
 
@@ -1429,23 +1432,29 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
        }
 }
 
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                     bool is_pf_loading)
 {
        struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+       struct ecore_mcp_link_state *p_link;
        struct ecore_qm_iids iids;
 
        OSAL_MEM_ZERO(&iids, sizeof(iids));
        ecore_cxt_qm_iids(p_hwfn, &iids);
 
-       ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
-                           p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
+       p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+       ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+                           qm_info->max_phys_tcs_per_port,
+                           is_pf_loading,
                            iids.cids, iids.vf_cids, iids.tids,
                            qm_info->start_pq,
                            qm_info->num_pqs - qm_info->num_vf_pqs,
                            qm_info->num_vf_pqs,
                            qm_info->start_vport,
                            qm_info->num_vports, qm_info->pf_wfq,
-                           qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params,
+                           qm_info->pf_rl, p_link->speed,
+                           p_hwfn->qm_info.qm_pq_params,
                            p_hwfn->qm_info.qm_vport_params);
 }
 
@@ -1795,7 +1804,7 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
 
 void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 {
-       ecore_qm_init_pf(p_hwfn, p_ptt);
+       ecore_qm_init_pf(p_hwfn, p_ptt, true);
        ecore_cm_init_pf(p_hwfn);
        ecore_dq_init_pf(p_hwfn);
        ecore_cdu_init_pf(p_hwfn);
@@ -1987,6 +1996,8 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
        switch (p_hwfn->hw_info.personality) {
        case ECORE_PCI_ETH:
                {
+               u32 count = 0;
+
                struct ecore_eth_pf_params *p_params =
                            &p_hwfn->pf_params.eth_pf_params;
 
@@ -1995,7 +2006,13 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
                ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
                                              p_params->num_cons,
                                              p_params->num_vf_cons);
-               p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
+
+               count = p_params->num_arfs_filters;
+
+               if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
+                                  &p_hwfn->p_dev->mf_bits))
+                       p_hwfn->p_cxt_mngr->arfs_count = count;
+
                break;
                }
        default:
@@ -2097,7 +2114,7 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
 
        ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
                            reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
-                           0 /* no flags */);
+                           OSAL_NULL /* default parameters */);
 
        if (elem_type == ECORE_ELEM_CXT) {
                u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
@@ -2204,41 +2221,10 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
                                    (u64)(osal_uintptr_t)&ilt_hw_entry,
                                    reg_offset,
                                    sizeof(ilt_hw_entry) / sizeof(u32),
-                                   0 /* no flags */);
+                                   OSAL_NULL /* default parameters */);
        }
 
        ecore_ptt_release(p_hwfn, p_ptt);
 
        return ECORE_SUCCESS;
 }
-
-enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
-                                             enum protocol_type proto)
-{
-       enum _ecore_status_t rc;
-       u32 cid;
-
-       /* Free Connection CXT */
-       rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_CXT,
-                                     ecore_cxt_get_proto_cid_start(p_hwfn,
-                                                                   proto),
-                                     ecore_cxt_get_proto_cid_count(p_hwfn,
-                                                                   proto,
-                                                                   &cid));
-
-       if (rc)
-               return rc;
-
-       /* Free Task CXT */
-       rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
-                                     ecore_cxt_get_proto_tid_count(p_hwfn,
-                                                                   proto));
-       if (rc)
-               return rc;
-
-       /* Free TSDM CXT */
-       rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_SRQ, 0,
-                                     ecore_cxt_get_srq_count(p_hwfn));
-
-       return rc;
-}