1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 #include "ecore_hsi_common.h"
11 #include "ecore_proto_if.h"
12 #include "ecore_cxt_api.h"
14 /* Tasks segments definitions */
15 #define ECORE_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI /* 0 */
16 #define ECORE_CXT_FCOE_TID_SEG PROTOCOLID_FCOE /* 1 */
17 #define ECORE_CXT_ROCE_TID_SEG PROTOCOLID_ROCE /* 2 */
19 enum ecore_cxt_elem_type {
37 u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
38 enum protocol_type type,
41 u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
42 enum protocol_type type);
44 u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
45 enum protocol_type type);
46 u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
49 * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
53 * @return enum _ecore_status_t
55 enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
58 * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
62 * @return enum _ecore_status_t
64 enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
67 * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
71 * @return enum _ecore_status_t
73 enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
76 * @brief ecore_cxt_mngr_free
80 void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
83 * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired
88 * @return enum _ecore_status_t
90 enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
93 * @brief ecore_cxt_mngr_setup - Reset the acquired CIDs
97 void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
100 * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per
105 void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
108 * @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
113 void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
116 * @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
120 * @param is_pf_loading
122 void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
126 * @brief Reconfigures QM pf on the fly
131 * @return enum _ecore_status_t
133 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
134 struct ecore_ptt *p_ptt);
136 #define ECORE_CXT_PF_CID (0xff)
139 * @brief ecore_cxt_release - Release a cid
144 void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
147 * @brief ecore_cxt_release - Release a cid belonging to a vf-queue
151 * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
153 void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
157 * @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
163 * @return enum _ecore_status_t
165 enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
166 enum protocol_type type,
170 * @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type
176 * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
178 * @return enum _ecore_status_t
180 enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
181 enum protocol_type type,
182 u32 *p_cid, u8 vfid);
185 * @brief ecore_cxt_get_tid_mem_info - function checks if the
186 * page containing the iid in the ilt is already
187 * allocated, if it is not it allocates the page.
193 * @return enum _ecore_status_t
196 ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
197 enum ecore_cxt_elem_type elem_type,
201 * @brief ecore_cxt_free_proto_ilt - function frees ilt pages
202 * associated with the protocol passed.
207 * @return enum _ecore_status_t
209 enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
210 enum protocol_type proto);
212 #define ECORE_CTX_WORKING_MEM 0
213 #define ECORE_CTX_FL_MEM 1
215 /* Max number of connection types in HW (DQ/CDU etc.) */
216 #define MAX_CONN_TYPES PROTOCOLID_COMMON
217 #define NUM_TASK_TYPES 2
218 #define NUM_TASK_PF_SEGMENTS 4
219 #define NUM_TASK_VF_SEGMENTS 1
221 /* PF per protocol configuration object */
222 #define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
223 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
225 struct ecore_tid_seg {
231 struct ecore_conn_type_cfg {
234 struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
237 /* ILT Client configuration,
238 * Per connection type (protocol) resources (cids, tis, vf cids etc.)
239 * 1 - for connection context (CDUC) and for each task context we need two
240 * values, for regular task context and for force load memory
242 #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
243 #define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
246 #define CDUT_SEG_BLK(n) (1 + (u8)(n))
247 #define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
249 struct ilt_cfg_pair {
254 struct ecore_ilt_cli_blk {
255 u32 total_size; /* 0 means not active */
256 u32 real_size_in_page;
258 u32 dynamic_line_offset;
259 u32 dynamic_line_cnt;
262 struct ecore_ilt_client_cfg {
266 struct ilt_cfg_pair first;
267 struct ilt_cfg_pair last;
268 struct ilt_cfg_pair p_size;
270 /* ILT client blocks for PF */
271 struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
274 /* ILT client blocks for VFs */
275 struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
279 #define MAP_WORD_SIZE sizeof(unsigned long)
280 #define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
282 struct ecore_cid_acquired_map {
288 struct ecore_src_t2 {
289 struct phys_mem_desc *dma_mem;
295 struct ecore_cxt_mngr {
296 /* Per protocol configuration */
297 struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
299 /* computed ILT structure */
300 struct ecore_ilt_client_cfg clients[MAX_ILT_CLIENTS];
302 /* Task type sizes */
303 u32 task_type_size[NUM_TASK_TYPES];
305 /* total number of VFs for this hwfn -
306 * ALL VFs are symmetric in terms of HW resources
312 struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
313 struct ecore_cid_acquired_map *acquired_vf[MAX_CONN_TYPES];
315 /* ILT shadow table */
316 struct phys_mem_desc *ilt_shadow;
320 /* Mutex for a dynamic ILT allocation */
324 struct ecore_src_t2 src_t2;
326 /* The infrastructure originally was very generic and context/task
327 * oriented - per connection-type we would set how many of those
328 * are needed, and later when determining how much memory we're
329 * needing for a given block we'd iterate over all the relevant
331 * But since then we've had some additional resources, some of which
332 * require memory which is independent of the general context/task
333 * scheme. We add those here explicitly per-feature.
336 /* total number of SRQ's for this hwfn */
339 /* Maximal number of L2 steering filters */
342 /* TODO - VF arfs filters ? */
349 u16 ecore_get_cdut_num_pf_init_pages(struct ecore_hwfn *p_hwfn);
350 u16 ecore_get_cdut_num_vf_init_pages(struct ecore_hwfn *p_hwfn);
351 u16 ecore_get_cdut_num_pf_work_pages(struct ecore_hwfn *p_hwfn);
352 u16 ecore_get_cdut_num_vf_work_pages(struct ecore_hwfn *p_hwfn);
353 #endif /* _ECORE_CID_ */