-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
#include "ecore_sriov.h"
#include "ecore_mcp.h"
-/* Max number of connection types in HW (DQ/CDU etc.) */
-#define MAX_CONN_TYPES PROTOCOLID_COMMON
-#define NUM_TASK_TYPES 2
-#define NUM_TASK_PF_SEGMENTS 4
-#define NUM_TASK_VF_SEGMENTS 1
-
/* Doorbell-Queue constants */
#define DQ_RANGE_SHIFT 4
#define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT)
/* Searcher constants */
#define SRC_MIN_NUM_ELEMS 256
+/* GFS constants */
+#define RGFS_MIN_NUM_ELEMS 256
+#define TGFS_MIN_NUM_ELEMS 256
+
/* Timers constants */
#define TM_SHIFT 7
#define TM_ALIGN (1 << TM_SHIFT)
/* connection context union */
union conn_context {
- struct e4_core_conn_context core_ctx;
- struct e4_eth_conn_context eth_ctx;
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
/* Alignment is inherent to the type1_task_context structure */
#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
-/* PF per protocl configuration object */
-#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
-#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
-
-struct ecore_tid_seg {
- u32 count;
- u8 type;
- bool has_fl_mem;
-};
-
-struct ecore_conn_type_cfg {
- u32 cid_count;
- u32 cids_per_vf;
- struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
-};
-
-/* ILT Client configuration,
- * Per connection type (protocol) resources (cids, tis, vf cids etc.)
- * 1 - for connection context (CDUC) and for each task context we need two
- * values, for regular task context and for force load memory
- */
-#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
-#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
-#define CDUC_BLK (0)
-#define SRQ_BLK (0)
-#define CDUT_SEG_BLK(n) (1 + (u8)(n))
-#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
-
-enum ilt_clients {
- ILT_CLI_CDUC,
- ILT_CLI_CDUT,
- ILT_CLI_QM,
- ILT_CLI_TM,
- ILT_CLI_SRC,
- ILT_CLI_TSDM,
- ILT_CLI_MAX
-};
-
-struct ilt_cfg_pair {
- u32 reg;
- u32 val;
-};
-
-struct ecore_ilt_cli_blk {
- u32 total_size; /* 0 means not active */
- u32 real_size_in_page;
- u32 start_line;
- u32 dynamic_line_cnt;
-};
-
-struct ecore_ilt_client_cfg {
- bool active;
-
- /* ILT boundaries */
- struct ilt_cfg_pair first;
- struct ilt_cfg_pair last;
- struct ilt_cfg_pair p_size;
-
- /* ILT client blocks for PF */
- struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
- u32 pf_total_lines;
-
- /* ILT client blocks for VFs */
- struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
- u32 vf_total_lines;
-};
-
-/* Per Path -
- * ILT shadow table
- * Protocol acquired CID lists
- * PF start line in ILT
- */
-struct ecore_dma_mem {
- dma_addr_t p_phys;
- void *p_virt;
- osal_size_t size;
-};
-
-#define MAP_WORD_SIZE sizeof(unsigned long)
-#define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
-
-struct ecore_cid_acquired_map {
- u32 start_cid;
- u32 max_count;
- unsigned long *cid_map;
-};
-
-struct ecore_cxt_mngr {
- /* Per protocl configuration */
- struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
-
- /* computed ILT structure */
- struct ecore_ilt_client_cfg clients[ILT_CLI_MAX];
-
- /* Task type sizes */
- u32 task_type_size[NUM_TASK_TYPES];
-
- /* total number of VFs for this hwfn -
- * ALL VFs are symmetric in terms of HW resources
- */
- u32 vf_count;
-
- /* Acquired CIDs */
- struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
- /* TBD - do we want this allocated to reserve space? */
- struct ecore_cid_acquired_map
- acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
-
- /* ILT shadow table */
- struct ecore_dma_mem *ilt_shadow;
- u32 pf_start_line;
-
- /* Mutex for a dynamic ILT allocation */
- osal_mutex_t mutex;
-
- /* SRC T2 */
- struct ecore_dma_mem *t2;
- u32 t2_num_pages;
- u64 first_free;
- u64 last_free;
-
- /* The infrastructure originally was very generic and context/task
- * oriented - per connection-type we would set how many of those
- * are needed, and later when determining how much memory we're
- * needing for a given block we'd iterate over all the relevant
- * connection-types.
- * But since then we've had some additional resources, some of which
- * require memory which is indepent of the general context/task
- * scheme. We add those here explicitly per-feature.
- */
-
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
- /* Maximal number of L2 steering filters */
- u32 arfs_count;
-
- /* TODO - VF arfs filters ? */
-};
-
static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
{
return type == PROTOCOLID_TOE;
u32 per_vf_tids;
};
-static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_mngr *p_mngr,
struct ecore_tm_iids *iids)
{
+ struct ecore_conn_type_cfg *p_cfg;
bool tm_vf_required = false;
bool tm_required = false;
u32 i, j;
for (i = 0; i < MAX_CONN_TYPES; i++) {
- struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+ p_cfg = &p_mngr->conn_cfg[i];
if (tm_cid_proto(i) || tm_required) {
if (p_cfg->cid_count)
p_blk->start_line);
}
-static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
- enum ilt_clients ilt_client)
+static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn,
+ enum ilt_clients ilt_client,
+ u32 *dynamic_line_offset,
+ u32 *dynamic_line_cnt)
{
- u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
struct ecore_ilt_client_cfg *p_cli;
- u32 lines_to_skip = 0;
+ struct ecore_conn_type_cfg *p_cfg;
u32 cxts_per_p;
/* TBD MK: ILT code should be simplified once PROTO enum is changed */
+ *dynamic_line_offset = 0;
+ *dynamic_line_cnt = 0;
+
if (ilt_client == ILT_CLI_CDUC) {
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ p_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE];
cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
(u32)CONN_CXT_SIZE(p_hwfn);
- lines_to_skip = cid_count / cxts_per_p;
+ *dynamic_line_cnt = p_cfg->cid_count / cxts_per_p;
+ }
+}
+
+static struct ecore_ilt_client_cfg *
+ecore_cxt_set_cli(struct ecore_ilt_client_cfg *p_cli)
+{
+ p_cli->active = false;
+ p_cli->first.val = 0;
+ p_cli->last.val = 0;
+ return p_cli;
+}
+
+static struct ecore_ilt_cli_blk *
+ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk)
+{
+ p_blk->total_size = 0;
+ return p_blk;
}
- return lines_to_skip;
+static u32
+ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
+{
+ struct ecore_src_iids src_iids;
+ u32 elem_num = 0;
+
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+ ecore_cxt_src_iids(p_mngr, &src_iids);
+
+ /* Both the PF and VFs searcher connections are stored in the per PF
+ * database. Thus sum the PF searcher cids and all the VFs searcher
+ * cids.
+ */
+ elem_num = src_iids.pf_cids +
+ src_iids.per_vf_cids * p_mngr->vf_count;
+ if (elem_num == 0)
+ return elem_num;
+
+ elem_num = OSAL_MAX_T(u32, elem_num, SRC_MIN_NUM_ELEMS);
+ elem_num = OSAL_ROUNDUP_POW_OF_TWO(elem_num);
+
+ return elem_num;
}
enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
{
+ u32 curr_line, total, i, task_size, line, total_size, elem_size;
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 curr_line, total, i, task_size, line;
struct ecore_ilt_client_cfg *p_cli;
struct ecore_ilt_cli_blk *p_blk;
struct ecore_cdu_iids cdu_iids;
- struct ecore_src_iids src_iids;
struct ecore_qm_iids qm_iids;
struct ecore_tm_iids tm_iids;
struct ecore_tid_seg *p_seg;
OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
- OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
/* CDUC */
- p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
curr_line = p_mngr->pf_start_line;
/* get the counters for the CDUC,CDUC and QM clients */
ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
- p_blk = &p_cli->pf_blks[CDUC_BLK];
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
- p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
- ILT_CLI_CDUC);
+ ecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC,
+ &p_blk->dynamic_line_offset,
+ &p_blk->dynamic_line_cnt);
/* CDUC VF */
- p_blk = &p_cli->vf_blks[CDUC_BLK];
+ p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
ILT_CLI_CDUC);
/* CDUT PF */
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
p_cli->first.val = curr_line;
/* first the 'working' task memory */
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
p_mngr->task_type_size[p_seg->type]);
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ p_blk =
+ ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
if (!p_seg->has_fl_mem) {
/* The segment is active (total size pf 'working'
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
}
- p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+ p_cli->pf_total_lines = curr_line - p_cli->first.val;
/* CDUT VF */
p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
/* 'working' memory */
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
- p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
ecore_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total,
p_mngr->task_type_size[p_seg->type]);
ILT_CLI_CDUT);
/* 'init' memory */
- p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ p_blk =
+ ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
if (!p_seg->has_fl_mem) {
/* see comment above */
line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
}
- p_cli->vf_total_lines = curr_line -
- p_cli->vf_blks[0].start_line;
+ p_cli->vf_total_lines = curr_line - (p_cli->first.val +
+ p_cli->pf_total_lines);
/* Now for the rest of the VFs */
for (i = 1; i < p_mngr->vf_count; i++) {
+ /* don't set p_blk i.e. don't clear total_size */
p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
+ /* don't set p_blk i.e. don't clear total_size */
p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
}
/* QM */
- p_cli = &p_mngr->clients[ILT_CLI_QM];
- p_blk = &p_cli->pf_blks[0];
-
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
+
+ /* At this stage, after the first QM configuration, the PF PQs amount
+ * is the highest possible. Save this value at qm_info->ilt_pf_pqs to
+ * detect overflows in the future.
+ * Even though VF PQs amount can be larger than VF count, use vf_count
+ * because each VF requires only the full amount of CIDs.
+ */
ecore_cxt_qm_iids(p_hwfn, &qm_iids);
- total = ecore_qm_pf_mem_size(qm_iids.cids,
+ total = ecore_qm_pf_mem_size(p_hwfn, qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
- p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_pqs + OFLD_GRP_SIZE,
p_hwfn->qm_info.num_vf_pqs);
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
- /* SRC */
- p_cli = &p_mngr->clients[ILT_CLI_SRC];
- ecore_cxt_src_iids(p_mngr, &src_iids);
-
- /* Both the PF and VFs searcher connections are stored in the per PF
- * database. Thus sum the PF searcher cids and all the VFs searcher
- * cids.
- */
- total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
- if (total) {
- u32 local_max = OSAL_MAX_T(u32, total,
- SRC_MIN_NUM_ELEMS);
-
- total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
-
- p_blk = &p_cli->pf_blks[0];
- ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
- total * sizeof(struct src_ent),
- sizeof(struct src_ent));
-
- ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
- ILT_CLI_SRC);
- p_cli->pf_total_lines = curr_line - p_blk->start_line;
- }
-
/* TM PF */
- p_cli = &p_mngr->clients[ILT_CLI_TM];
- ecore_cxt_tm_iids(p_mngr, &tm_iids);
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
+ ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
total = tm_iids.pf_cids + tm_iids.pf_tids_total;
if (total) {
- p_blk = &p_cli->pf_blks[0];
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
- total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+ total * TM_ELEM_SIZE,
+ TM_ELEM_SIZE);
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
/* TM VF */
total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
if (total) {
- p_blk = &p_cli->vf_blks[0];
+ p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
}
}
+ /* SRC */
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
+ total = ecore_cxt_src_elements(p_mngr);
+
+ if (total) {
+ total_size = total * sizeof(struct src_ent);
+ elem_size = sizeof(struct src_ent);
+
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total_size, elem_size);
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_SRC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
/* TSDM (SRQ CONTEXT) */
total = ecore_cxt_get_srq_count(p_hwfn);
if (total) {
- p_cli = &p_mngr->clients[ILT_CLI_TSDM];
- p_blk = &p_cli->pf_blks[SRQ_BLK];
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
u32 i;
- if (!p_mngr->t2)
+ if (!p_t2 || !p_t2->dma_mem)
return;
- for (i = 0; i < p_mngr->t2_num_pages; i++)
- if (p_mngr->t2[i].p_virt)
+ for (i = 0; i < p_t2->num_pages; i++)
+ if (p_t2->dma_mem[i].virt_addr)
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_mngr->t2[i].p_virt,
- p_mngr->t2[i].p_phys,
- p_mngr->t2[i].size);
+ p_t2->dma_mem[i].virt_addr,
+ p_t2->dma_mem[i].phys_addr,
+ p_t2->dma_mem[i].size);
+
+ OSAL_FREE(p_hwfn->p_dev, p_t2->dma_mem);
+ p_t2->dma_mem = OSAL_NULL;
+}
+
+static enum _ecore_status_t
+ecore_cxt_t2_alloc_pages(struct ecore_hwfn *p_hwfn,
+ struct ecore_src_t2 *p_t2,
+ u32 total_size, u32 page_size)
+{
+ void **p_virt;
+ u32 size, i;
+
+ if (!p_t2 || !p_t2->dma_mem)
+ return ECORE_INVAL;
+
+ for (i = 0; i < p_t2->num_pages; i++) {
+ size = OSAL_MIN_T(u32, total_size, page_size);
+ p_virt = &p_t2->dma_mem[i].virt_addr;
+
+ *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_t2->dma_mem[i].phys_addr,
+ size);
+ if (!p_t2->dma_mem[i].virt_addr)
+ return ECORE_NOMEM;
- OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
+ OSAL_MEM_ZERO(*p_virt, size);
+ p_t2->dma_mem[i].size = size;
+ total_size -= size;
+ }
+
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_num, total_size, ent_per_page, psz, i;
+ struct phys_mem_desc *p_t2_last_page;
struct ecore_ilt_client_cfg *p_src;
struct ecore_src_iids src_iids;
- struct ecore_dma_mem *p_t2;
+ struct ecore_src_t2 *p_t2;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
/* use the same page size as the SRC ILT client */
psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
- p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+ p_t2 = &p_mngr->src_t2;
+ p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
- p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- p_mngr->t2_num_pages *
- sizeof(struct ecore_dma_mem));
- if (!p_mngr->t2) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
+ p_t2->dma_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ p_t2->num_pages *
+ sizeof(struct phys_mem_desc));
+ if (!p_t2->dma_mem) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
rc = ECORE_NOMEM;
goto t2_fail;
}
- /* allocate t2 pages */
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
- u32 size = OSAL_MIN_T(u32, total_size, psz);
- void **p_virt = &p_mngr->t2[i].p_virt;
-
- *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
- &p_mngr->t2[i].p_phys, size);
- if (!p_mngr->t2[i].p_virt) {
- rc = ECORE_NOMEM;
- goto t2_fail;
- }
- OSAL_MEM_ZERO(*p_virt, size);
- p_mngr->t2[i].size = size;
- total_size -= size;
- }
+ rc = ecore_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
+ if (rc)
+ goto t2_fail;
/* Set the t2 pointers */
/* entries per page - must be a power of two */
ent_per_page = psz / sizeof(struct src_ent);
- p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
+ p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
- p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
- p_mngr->last_free = (u64)p_t2->p_phys +
- ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+ p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
+ p_t2->last_free = (u64)p_t2_last_page->phys_addr +
+ ((conn_num - 1) & (ent_per_page - 1)) *
+ sizeof(struct src_ent);
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ for (i = 0; i < p_t2->num_pages; i++) {
u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
- struct src_ent *entries = p_mngr->t2[i].p_virt;
- u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
+ struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
+ u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
u32 j;
for (j = 0; j < ent_num - 1; j++) {
entries[j].next = OSAL_CPU_TO_BE64(val);
}
- if (i < p_mngr->t2_num_pages - 1)
- val = (u64)p_mngr->t2[i + 1].p_phys;
+ if (i < p_t2->num_pages - 1)
+ val = (u64)p_t2->dma_mem[i + 1].phys_addr;
else
val = 0;
entries[j].next = OSAL_CPU_TO_BE64(val);
}
#define for_each_ilt_valid_client(pos, clients) \
- for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
if (!clients[pos].active) { \
continue; \
} else \
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 ilt_size, i;
+ if (p_mngr->ilt_shadow == OSAL_NULL)
+ return;
+
ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
- struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+ struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
- if (p_dma->p_virt)
+ if (p_dma->virt_addr)
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
p_dma->p_virt,
- p_dma->p_phys, p_dma->size);
- p_dma->p_virt = OSAL_NULL;
+ p_dma->phys_addr, p_dma->size);
+ p_dma->virt_addr = OSAL_NULL;
}
OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
+ p_mngr->ilt_shadow = OSAL_NULL;
}
static enum _ecore_status_t
struct ecore_ilt_cli_blk *p_blk,
enum ilt_clients ilt_client, u32 start_line_offset)
{
- struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
- u32 lines, line, sz_left, lines_to_skip = 0;
+ struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left, lines_to_skip, first_skipped_line;
/* Special handling for RoCE that supports dynamic allocation */
if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
return ECORE_SUCCESS;
- lines_to_skip = p_blk->dynamic_line_cnt;
-
if (!p_blk->total_size)
return ECORE_SUCCESS;
sz_left = p_blk->total_size;
+ lines_to_skip = p_blk->dynamic_line_cnt;
lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
line = p_blk->start_line + start_line_offset -
- p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
+ p_hwfn->p_cxt_mngr->pf_start_line;
+ first_skipped_line = line + p_blk->dynamic_line_offset;
- for (; lines; lines--) {
+ while (lines) {
dma_addr_t p_phys;
void *p_virt;
u32 size;
+ if (lines_to_skip && (line == first_skipped_line)) {
+ line += lines_to_skip;
+ continue;
+ }
+
size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
/* @DPDK */
return ECORE_NOMEM;
OSAL_MEM_ZERO(p_virt, size);
- ilt_shadow[line].p_phys = p_phys;
- ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].phys_addr = p_phys;
+ ilt_shadow[line].virt_addr = p_virt;
ilt_shadow[line].size = size;
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
sz_left -= size;
line++;
+ lines--;
}
return ECORE_SUCCESS;
size = ecore_cxt_ilt_shadow_size(clients);
p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- size * sizeof(struct ecore_dma_mem));
+ size * sizeof(struct phys_mem_desc));
if (!p_mngr->ilt_shadow) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate ilt shadow table\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
rc = ECORE_NOMEM;
goto ilt_shadow_fail;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
"Allocated 0x%x bytes for ilt shadow\n",
- (u32)(size * sizeof(struct ecore_dma_mem)));
+ (u32)(size * sizeof(struct phys_mem_desc)));
for_each_ilt_valid_client(i, clients) {
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
{
+ u32 type, vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].cid_map = OSAL_NULL;
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
- for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ for (vf = 0; vf < max_num_vfs; vf++) {
OSAL_FREE(p_hwfn->p_dev,
p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
p_mngr->acquired_vf[type][vf].max_count = 0;
p_mngr->acquired_vf[type][vf].start_cid = 0;
}
}
static enum _ecore_status_t
-ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
+__ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
u32 cid_start, u32 cid_count,
struct ecore_cid_acquired_map *p_map)
{
return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
+static enum _ecore_status_t
+ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, u32 start_cid,
+ u32 vf_start_cid)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 start_cid = 0, vf_start_cid = 0;
- u32 type, vf;
+ u32 vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
+ struct ecore_cid_acquired_map *p_map;
+ struct ecore_conn_type_cfg *p_cfg;
+ enum _ecore_status_t rc;
- for (type = 0; type < MAX_CONN_TYPES; type++) {
- struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
- struct ecore_cid_acquired_map *p_map;
+ p_cfg = &p_mngr->conn_cfg[type];
/* Handle PF maps */
p_map = &p_mngr->acquired[type];
- if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
- p_cfg->cid_count, p_map))
- goto cid_map_fail;
+ rc = __ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Handle VF maps */
+ for (vf = 0; vf < max_num_vfs; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ rc = __ecore_cid_map_alloc_single(p_hwfn, type, vf_start_cid,
+ p_cfg->cids_per_vf, p_map);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
- /* Handle VF maps */
- for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
- p_map = &p_mngr->acquired_vf[type][vf];
- if (ecore_cid_map_alloc_single(p_hwfn, type,
- vf_start_cid,
- p_cfg->cids_per_vf,
- p_map))
- goto cid_map_fail;
- }
+ return ECORE_SUCCESS;
+}
- start_cid += p_cfg->cid_count;
- vf_start_cid += p_cfg->cids_per_vf;
+static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type;
+ enum _ecore_status_t rc;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ rc = ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
+ vf_start_cid);
+ if (rc != ECORE_SUCCESS)
+ goto cid_map_fail;
+
+ start_cid += p_mngr->conn_cfg[type].cid_count;
+ vf_start_cid += p_mngr->conn_cfg[type].cids_per_vf;
}
return ECORE_SUCCESS;
cid_map_fail:
ecore_cid_map_free(p_hwfn);
- return ECORE_NOMEM;
+ return rc;
}
enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
{
+ struct ecore_cid_acquired_map *acquired_vf;
struct ecore_ilt_client_cfg *clients;
struct ecore_cxt_mngr *p_mngr;
- u32 i;
+ u32 i, max_num_vfs;
p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
if (!p_mngr) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_cxt_mngr'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
return ECORE_NOMEM;
}
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 64K */
- for (i = 0; i < ILT_CLI_MAX; i++)
+ for (i = 0; i < MAX_ILT_CLIENTS; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
/* due to removal of ISCSI/FCoE files union type0_task_context
/* Initialize the dynamic ILT allocation mutex */
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex)) {
+ DP_NOTICE(p_hwfn, false, "Failed to alloc p_mngr->mutex\n");
+ return ECORE_NOMEM;
+ }
#endif
OSAL_MUTEX_INIT(&p_mngr->mutex);
- /* Set the cxt mangr pointer priori to further allocations */
+ /* Set the cxt mangr pointer prior to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
+ max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ acquired_vf = OSAL_CALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ max_num_vfs, sizeof(*acquired_vf));
+ if (!acquired_vf) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate an array of `struct ecore_cid_acquired_map'\n");
+ return ECORE_NOMEM;
+ }
+
+ p_mngr->acquired_vf[i] = acquired_vf;
+ }
+
return ECORE_SUCCESS;
}
/* Allocate the ILT shadow table */
rc = ecore_ilt_shadow_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
goto tables_alloc_fail;
}
/* Allocate the T2 table */
rc = ecore_cxt_src_t2_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
goto tables_alloc_fail;
}
/* Allocate and initialize the acquired cids bitmaps */
rc = ecore_cid_map_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
goto tables_alloc_fail;
}
void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
{
+ u32 i;
+
if (!p_hwfn->p_cxt_mngr)
return;
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
#endif
+ for (i = 0; i < MAX_CONN_TYPES; i++)
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr->acquired_vf[i]);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
+
+ p_hwfn->p_cxt_mngr = OSAL_NULL;
}
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 len, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
struct ecore_cid_acquired_map *p_map;
struct ecore_conn_type_cfg *p_cfg;
int type;
- u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
if (!p_cfg->cids_per_vf)
continue;
- for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ for (vf = 0; vf < max_num_vfs; vf++) {
p_map = &p_mngr->acquired_vf[type][vf];
len = DIV_ROUND_UP(p_map->max_count,
BITS_PER_MAP_WORD) *
}
}
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool is_pf_loading)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- struct ecore_mcp_link_state *p_link;
struct ecore_qm_iids iids;
OSAL_MEM_ZERO(&iids, sizeof(iids));
ecore_cxt_qm_iids(p_hwfn, &iids);
-
- p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
-
- ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
- p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
+ ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ qm_info->max_phys_tcs_per_port,
+ is_pf_loading,
iids.cids, iids.vf_cids, iids.tids,
qm_info->start_pq,
qm_info->num_pqs - qm_info->num_vf_pqs,
qm_info->num_vf_pqs,
qm_info->start_vport,
qm_info->num_vports, qm_info->pf_wfq,
- qm_info->pf_rl, p_link->speed,
+ qm_info->pf_rl,
p_hwfn->qm_info.qm_pq_params,
p_hwfn->qm_info.qm_vport_params);
}
{
struct ecore_ilt_client_cfg *clients;
struct ecore_cxt_mngr *p_mngr;
- struct ecore_dma_mem *p_shdw;
+ struct phys_mem_desc *p_shdw;
u32 line, rt_offst, i;
ecore_ilt_bounds_init(p_hwfn);
/** p_virt could be OSAL_NULL incase of dynamic
* allocation
*/
- if (p_shdw[line].p_virt != OSAL_NULL) {
+ if (p_shdw[line].virt_addr != OSAL_NULL) {
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
- (p_shdw[line].p_phys >> 12));
+ (p_shdw[line].phys_addr >> 12));
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
"Setting RT[0x%08x] from"
" Physical addr: 0x%lx\n",
rt_offst, line, i,
(unsigned long)(p_shdw[line].
- p_phys >> 12));
+ phys_addr >> 12));
}
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
OSAL_LOG2(rounded_conn_num));
STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->first_free);
+ p_hwfn->p_cxt_mngr->src_t2.first_free);
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->last_free);
+ p_hwfn->p_cxt_mngr->src_t2.last_free);
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
"Configured SEARCHER for 0x%08x connections\n",
conn_num);
u8 i;
OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
- ecore_cxt_tm_iids(p_mngr, &tm_iids);
+ ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
/* @@@TBD No pre-scan for now */
- /* Note: We assume consecutive VFs for a PF */
- for (i = 0; i < p_mngr->vf_count; i++) {
cfg_word = 0;
SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
- SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
(sizeof(cfg_word) / sizeof(u32)) *
(p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
(NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
- /* enale scan */
+ /* enable scan */
STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
tm_iids.pf_cids ? 0x1 : 0x0);
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
- ecore_qm_init_pf(p_hwfn, p_ptt);
+ ecore_qm_init_pf(p_hwfn, p_ptt, true);
ecore_cm_init_pf(p_hwfn);
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);
enum protocol_type type,
u32 *p_cid, u8 vfid)
{
+ u32 rel_cid, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_cid_acquired_map *p_map;
- u32 rel_cid;
if (type >= MAX_CONN_TYPES) {
DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
return ECORE_INVAL;
}
- if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
+ if (vfid >= max_num_vfs && vfid != ECORE_CXT_PF_CID) {
DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
return ECORE_INVAL;
}
}
rel_cid = cid - (*pp_map)->start_cid;
- if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
+ if (!OSAL_GET_BIT(rel_cid, (*pp_map)->cid_map)) {
DP_NOTICE(p_hwfn, true,
"CID %d [vifd %02x] not acquired", cid, vfid);
goto fail;
void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
{
+ u32 rel_cid, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
struct ecore_cid_acquired_map *p_map = OSAL_NULL;
enum protocol_type type;
bool b_acquired;
- u32 rel_cid;
- if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
+ if (vfid != ECORE_CXT_PF_CID && vfid > max_num_vfs) {
DP_NOTICE(p_hwfn, true,
"Trying to return incorrect CID belonging to VF %02x\n",
vfid);
line = p_info->iid / cxts_per_p;
/* Make sure context is allocated (dynamic allocation) */
- if (!p_mngr->ilt_shadow[line].p_virt)
+ if (!p_mngr->ilt_shadow[line].virt_addr)
return ECORE_INVAL;
- p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
+ p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].virt_addr +
p_info->iid % cxts_per_p * conn_cxt_size;
DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
count = p_params->num_arfs_filters;
- if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
+ if (!OSAL_GET_BIT(ECORE_MF_DISABLE_ARFS,
&p_hwfn->p_dev->mf_bits))
p_hwfn->p_cxt_mngr->arfs_count = count;
OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
- if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
goto out0;
p_ptt = ecore_ptt_acquire(p_hwfn);
}
OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
p_blk->real_size_in_page;
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry,
ILT_ENTRY_PHY_ADDR,
- (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >> 12));
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
- 0 /* no flags */);
-
- if (elem_type == ECORE_ELEM_CXT) {
- u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
- elems_per_p;
-
- /* Update the relevant register in the parser */
- ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
- last_cid_allocated - 1);
-
- if (!p_hwfn->b_rdma_enabled_in_prs) {
- /* Enable RoCE search */
- ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
- p_hwfn->b_rdma_enabled_in_prs = true;
- }
- }
+ OSAL_NULL /* default parameters */);
out1:
ecore_ptt_release(p_hwfn, p_ptt);
}
for (i = shadow_start_line; i < shadow_end_line; i++) {
- if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
continue;
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = OSAL_NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
/* compute absolute offset */
(u64)(osal_uintptr_t)&ilt_hw_entry,
reg_offset,
sizeof(ilt_hw_entry) / sizeof(u32),
- 0 /* no flags */);
+ OSAL_NULL /* default parameters */);
}
ecore_ptt_release(p_hwfn, p_ptt);
return ECORE_SUCCESS;
}
+
+static u16 ecore_blk_calculate_pages(struct ecore_ilt_cli_blk *p_blk)
+{
+ if (p_blk->real_size_in_page == 0)
+ return 0;
+
+ return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+}
+
+u16 ecore_get_cdut_num_pf_init_pages(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ pages += ecore_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 ecore_get_cdut_num_vf_init_pages(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
+ pages += ecore_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 ecore_get_cdut_num_pf_work_pages(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ pages += ecore_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 ecore_get_cdut_num_vf_work_pages(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ u16 pages = 0, i;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
+ pages += ecore_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}