1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7 #ifndef __ECORE_HSI_COMMON__
8 #define __ECORE_HSI_COMMON__
9 /********************************/
10 /* Add include to common target */
11 /********************************/
12 #include "common_hsi.h"
16 * opcodes for the event ring
18 enum common_event_opcode {
19 COMMON_EVENT_PF_START,
21 COMMON_EVENT_VF_START,
23 COMMON_EVENT_VF_PF_CHANNEL,
25 COMMON_EVENT_PF_UPDATE,
26 COMMON_EVENT_MALICIOUS_VF,
27 COMMON_EVENT_RL_UPDATE,
29 MAX_COMMON_EVENT_OPCODE
34 * Common Ramrod Command IDs
36 enum common_ramrod_cmd_id {
38 COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
39 COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
40 COMMON_RAMROD_VF_START /* VF Function Start */,
41 COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
42 COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
43 COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
44 COMMON_RAMROD_EMPTY /* Empty Ramrod */,
45 MAX_COMMON_RAMROD_CMD_ID
50 * The core storm context for the Ystorm
52 struct ystorm_core_conn_st_ctx {
57 * The core storm context for the Pstorm
59 struct pstorm_core_conn_st_ctx {
64 * Core Slowpath Connection storm context of Xstorm
66 struct xstorm_core_conn_st_ctx {
67 __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
68 __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
69 /* Consolidation Ring Base Address */
70 struct regpair consolid_base_addr;
71 __le16 spq_cons /* SPQ Ring Consumer */;
72 __le16 consolid_cons /* Consolidation Ring Consumer */;
73 __le32 reserved0[55] /* Pad to 15 cycles */;
76 struct xstorm_core_conn_ag_ctx {
77 u8 reserved0 /* cdu_validation */;
78 u8 core_state /* state */;
80 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
81 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
82 #define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 /* exist_in_qm1 */
83 #define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
84 #define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 /* exist_in_qm2 */
85 #define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
86 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 /* exist_in_qm3 */
87 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
88 #define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */
89 #define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
91 #define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
92 #define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
93 #define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
94 #define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
95 #define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
96 #define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
98 #define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
99 #define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
100 #define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
101 #define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
102 #define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
103 #define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
104 #define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
105 #define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
106 #define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
107 #define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
108 #define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
109 #define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
110 #define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
111 #define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
112 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
113 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
115 #define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
116 #define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
117 #define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
118 #define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
119 #define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
120 #define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
122 #define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
123 #define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
125 #define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
126 #define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
127 #define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
128 #define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
129 #define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
130 #define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
131 #define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
132 #define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
134 #define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
135 #define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
136 #define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
137 #define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
138 #define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
139 #define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
140 #define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
141 #define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
143 #define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
144 #define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
145 #define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
146 #define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
147 #define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
148 #define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
149 #define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
150 #define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
152 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */
153 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
154 #define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 /* cf_array_cf */
155 #define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
156 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
157 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
158 #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
159 #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
161 #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
162 #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
163 #define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
164 #define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
165 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
166 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
167 #define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
168 #define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
169 #define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
170 #define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
172 #define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
173 #define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
174 #define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
175 #define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
176 #define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
177 #define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
178 #define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
179 #define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
180 #define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
181 #define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
182 #define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
183 #define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
184 #define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
185 #define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
186 #define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
187 #define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
189 #define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
190 #define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
191 #define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
192 #define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
193 #define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
194 #define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
195 #define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
196 #define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
197 #define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
198 #define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
199 #define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
200 #define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
201 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */
202 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
204 #define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
205 #define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
207 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
208 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
209 #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
210 #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
211 #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
212 #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
213 #define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
214 #define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
215 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
216 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
217 #define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
218 #define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
219 #define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
220 #define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
221 #define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
222 #define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
224 #define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
225 #define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
226 #define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
227 #define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
228 #define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
229 #define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
230 #define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
231 #define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
232 #define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
233 #define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
234 #define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
235 #define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
236 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
237 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
238 #define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
239 #define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
241 #define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
242 #define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
243 #define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
244 #define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
245 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
246 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
247 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
248 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
249 #define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
250 #define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
251 #define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
252 #define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
253 #define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
254 #define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
255 #define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
256 #define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
258 #define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
259 #define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
260 #define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
261 #define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
262 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
263 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
264 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
265 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
266 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
267 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
268 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
269 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
270 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
271 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
272 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
273 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
275 #define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
276 #define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
277 #define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
278 #define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
279 #define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */
280 #define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
281 #define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */
282 #define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
283 #define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
284 #define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
285 #define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */
286 #define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
287 #define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
288 #define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
289 u8 byte2 /* byte2 */;
290 __le16 physical_q0 /* physical_q0 */;
291 __le16 consolid_prod /* physical_q1 */;
292 __le16 reserved16 /* physical_q2 */;
293 __le16 tx_bd_cons /* word3 */;
294 __le16 tx_bd_or_spq_prod /* word4 */;
295 __le16 updated_qm_pq_id /* word5 */;
296 __le16 conn_dpi /* conn_dpi */;
297 u8 byte3 /* byte3 */;
298 u8 byte4 /* byte4 */;
299 u8 byte5 /* byte5 */;
300 u8 byte6 /* byte6 */;
301 __le32 reg0 /* reg0 */;
302 __le32 reg1 /* reg1 */;
303 __le32 reg2 /* reg2 */;
304 __le32 reg3 /* reg3 */;
305 __le32 reg4 /* reg4 */;
306 __le32 reg5 /* cf_array0 */;
307 __le32 reg6 /* cf_array1 */;
308 __le16 word7 /* word7 */;
309 __le16 word8 /* word8 */;
310 __le16 word9 /* word9 */;
311 __le16 word10 /* word10 */;
312 __le32 reg7 /* reg7 */;
313 __le32 reg8 /* reg8 */;
314 __le32 reg9 /* reg9 */;
315 u8 byte7 /* byte7 */;
316 u8 byte8 /* byte8 */;
317 u8 byte9 /* byte9 */;
318 u8 byte10 /* byte10 */;
319 u8 byte11 /* byte11 */;
320 u8 byte12 /* byte12 */;
321 u8 byte13 /* byte13 */;
322 u8 byte14 /* byte14 */;
323 u8 byte15 /* byte15 */;
324 u8 e5_reserved /* e5_reserved */;
325 __le16 word11 /* word11 */;
326 __le32 reg10 /* reg10 */;
327 __le32 reg11 /* reg11 */;
328 __le32 reg12 /* reg12 */;
329 __le32 reg13 /* reg13 */;
330 __le32 reg14 /* reg14 */;
331 __le32 reg15 /* reg15 */;
332 __le32 reg16 /* reg16 */;
333 __le32 reg17 /* reg17 */;
334 __le32 reg18 /* reg18 */;
335 __le32 reg19 /* reg19 */;
336 __le16 word12 /* word12 */;
337 __le16 word13 /* word13 */;
338 __le16 word14 /* word14 */;
339 __le16 word15 /* word15 */;
342 struct tstorm_core_conn_ag_ctx {
343 u8 byte0 /* cdu_validation */;
344 u8 byte1 /* state */;
346 #define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
347 #define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
348 #define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
349 #define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
350 #define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
351 #define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
352 #define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
353 #define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
354 #define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
355 #define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
356 #define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
357 #define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
358 #define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
359 #define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
361 #define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
362 #define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
363 #define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
364 #define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
365 #define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
366 #define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
367 #define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
368 #define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
370 #define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
371 #define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
372 #define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
373 #define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
374 #define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
375 #define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
376 #define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
377 #define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
379 #define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
380 #define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
381 #define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
382 #define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
383 #define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
384 #define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
385 #define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
386 #define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
387 #define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
388 #define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
389 #define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
390 #define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
392 #define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
393 #define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
394 #define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
395 #define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
396 #define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
397 #define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
398 #define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
399 #define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
400 #define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
401 #define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
402 #define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
403 #define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
404 #define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
405 #define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
406 #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
407 #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
409 #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
410 #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
411 #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
412 #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
413 #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
414 #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
415 #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
416 #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
417 #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
418 #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
419 #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
420 #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
421 #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
422 #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
423 #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
424 #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
425 __le32 reg0 /* reg0 */;
426 __le32 reg1 /* reg1 */;
427 __le32 reg2 /* reg2 */;
428 __le32 reg3 /* reg3 */;
429 __le32 reg4 /* reg4 */;
430 __le32 reg5 /* reg5 */;
431 __le32 reg6 /* reg6 */;
432 __le32 reg7 /* reg7 */;
433 __le32 reg8 /* reg8 */;
434 u8 byte2 /* byte2 */;
435 u8 byte3 /* byte3 */;
436 __le16 word0 /* word0 */;
437 u8 byte4 /* byte4 */;
438 u8 byte5 /* byte5 */;
439 __le16 word1 /* word1 */;
440 __le16 word2 /* conn_dpi */;
441 __le16 word3 /* word3 */;
442 __le32 reg9 /* reg9 */;
443 __le32 reg10 /* reg10 */;
446 struct ustorm_core_conn_ag_ctx {
447 u8 reserved /* cdu_validation */;
448 u8 byte1 /* state */;
450 #define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
451 #define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
452 #define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
453 #define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
454 #define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
455 #define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
456 #define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
457 #define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
458 #define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
459 #define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
461 #define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
462 #define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
463 #define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
464 #define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
465 #define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
466 #define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
467 #define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
468 #define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
470 #define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
471 #define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
472 #define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
473 #define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
474 #define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
475 #define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
476 #define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
477 #define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
478 #define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
479 #define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
480 #define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
481 #define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
482 #define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
483 #define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
484 #define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
485 #define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
487 #define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
488 #define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
489 #define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
490 #define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
491 #define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
492 #define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
493 #define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
494 #define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
495 #define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
496 #define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
497 #define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
498 #define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
499 #define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
500 #define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
501 #define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
502 #define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
503 u8 byte2 /* byte2 */;
504 u8 byte3 /* byte3 */;
505 __le16 word0 /* conn_dpi */;
506 __le16 word1 /* word1 */;
507 __le32 rx_producers /* reg0 */;
508 __le32 reg1 /* reg1 */;
509 __le32 reg2 /* reg2 */;
510 __le32 reg3 /* reg3 */;
511 __le16 word2 /* word2 */;
512 __le16 word3 /* word3 */;
516 * The core storm context for the Mstorm
518 struct mstorm_core_conn_st_ctx {
523 * The core storm context for the Ustorm
525 struct ustorm_core_conn_st_ctx {
530 * core connection context
532 struct core_conn_context {
533 /* ystorm storm context */
534 struct ystorm_core_conn_st_ctx ystorm_st_context;
535 struct regpair ystorm_st_padding[2] /* padding */;
536 /* pstorm storm context */
537 struct pstorm_core_conn_st_ctx pstorm_st_context;
538 struct regpair pstorm_st_padding[2] /* padding */;
539 /* xstorm storm context */
540 struct xstorm_core_conn_st_ctx xstorm_st_context;
541 /* xstorm aggregative context */
542 struct xstorm_core_conn_ag_ctx xstorm_ag_context;
543 /* tstorm aggregative context */
544 struct tstorm_core_conn_ag_ctx tstorm_ag_context;
545 /* ustorm aggregative context */
546 struct ustorm_core_conn_ag_ctx ustorm_ag_context;
547 /* mstorm storm context */
548 struct mstorm_core_conn_st_ctx mstorm_st_context;
549 /* ustorm storm context */
550 struct ustorm_core_conn_st_ctx ustorm_st_context;
551 struct regpair ustorm_st_padding[2] /* padding */;
556 * How ll2 should deal with packet upon errors
558 enum core_error_handle {
559 LL2_DROP_PACKET /* If error occurs drop packet */,
560 LL2_DO_NOTHING /* If error occurs do nothing */,
561 LL2_ASSERT /* If error occurs assert */,
562 MAX_CORE_ERROR_HANDLE
567 * opcodes for the event ring
569 enum core_event_opcode {
570 CORE_EVENT_TX_QUEUE_START,
571 CORE_EVENT_TX_QUEUE_STOP,
572 CORE_EVENT_RX_QUEUE_START,
573 CORE_EVENT_RX_QUEUE_STOP,
574 CORE_EVENT_RX_QUEUE_FLUSH,
575 CORE_EVENT_TX_QUEUE_UPDATE,
576 MAX_CORE_EVENT_OPCODE
581 * The L4 pseudo checksum mode for Core
583 enum core_l4_pseudo_checksum_mode {
584 /* Pseudo Checksum on packet is calculated with the correct packet length. */
585 CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
586 /* Pseudo Checksum on packet is calculated with zero length. */
587 CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
588 MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
593 * Light-L2 RX Producers in Tstorm RAM
595 struct core_ll2_port_stats {
596 struct regpair gsi_invalid_hdr;
597 struct regpair gsi_invalid_pkt_length;
598 struct regpair gsi_unsupported_pkt_typ;
599 struct regpair gsi_crcchksm_error;
604 * Ethernet TX Per Queue Stats
606 struct core_ll2_pstorm_per_queue_stat {
607 /* number of total bytes sent without errors */
608 struct regpair sent_ucast_bytes;
609 /* number of total bytes sent without errors */
610 struct regpair sent_mcast_bytes;
611 /* number of total bytes sent without errors */
612 struct regpair sent_bcast_bytes;
613 /* number of total packets sent without errors */
614 struct regpair sent_ucast_pkts;
615 /* number of total packets sent without errors */
616 struct regpair sent_mcast_pkts;
617 /* number of total packets sent without errors */
618 struct regpair sent_bcast_pkts;
623 * Light-L2 RX Producers in Tstorm RAM
625 struct core_ll2_rx_prod {
626 __le16 bd_prod /* BD Producer */;
627 __le16 cqe_prod /* CQE Producer */;
632 struct core_ll2_tstorm_per_queue_stat {
633 /* Number of packets discarded because they are bigger than MTU */
634 struct regpair packet_too_big_discard;
635 /* Number of packets discarded due to lack of host buffers */
636 struct regpair no_buff_discard;
640 struct core_ll2_ustorm_per_queue_stat {
641 struct regpair rcv_ucast_bytes;
642 struct regpair rcv_mcast_bytes;
643 struct regpair rcv_bcast_bytes;
644 struct regpair rcv_ucast_pkts;
645 struct regpair rcv_mcast_pkts;
646 struct regpair rcv_bcast_pkts;
651 * Core Ramrod Command IDs (light L2)
653 enum core_ramrod_cmd_id {
655 CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
656 CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
657 CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
658 CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
659 CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
660 CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
661 MAX_CORE_RAMROD_CMD_ID
666 * Core RX CQE Type for Light L2
668 enum core_roce_flavor_type {
671 MAX_CORE_ROCE_FLAVOR_TYPE
676 * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
678 struct core_rx_action_on_error {
680 /* ll2 how to handle error packet_too_big (use enum core_error_handle) */
681 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
682 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
683 /* ll2 how to handle error with no_buff (use enum core_error_handle) */
684 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
685 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
686 #define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
687 #define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
692 * Core RX BD for Light L2
701 * Core RX CM offload BD for Light L2
703 struct core_rx_bd_with_buff_len {
710 * Core RX CM offload BD for Light L2
712 union core_rx_bd_union {
713 struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
714 /* Core Rx Bd with dynamic buffer length */
715 struct core_rx_bd_with_buff_len rx_bd_with_len;
721 * Opaque Data for Light L2 RX CQE .
723 struct core_rx_cqe_opaque_data {
724 __le32 data[2] /* Opaque CQE Data */;
729 * Core RX CQE Type for Light L2
731 enum core_rx_cqe_type {
732 CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
733 CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
734 CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
735 CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
741 * Core RX CQE for Light L2 .
743 struct core_rx_fast_path_cqe {
744 u8 type /* CQE type */;
745 /* Offset (in bytes) of the packet from start of the buffer */
747 /* Parsing and error flags from the parser */
748 struct parsing_and_err_flags parse_flags;
749 __le16 packet_length /* Total packet length (from the parser) */;
750 __le16 vlan /* 802.1q VLAN tag */;
751 struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
752 /* bit- map: each bit represents a specific error. errors indications are
753 * provided by the cracker. see spec for detailed description
755 struct parsing_err_flags err_flags;
761 * Core Rx CM offload CQE .
763 struct core_rx_gsi_offload_cqe {
764 u8 type /* CQE type */;
765 u8 data_length_error /* set if gsi data is bigger than buff */;
766 /* Parsing and error flags from the parser */
767 struct parsing_and_err_flags parse_flags;
768 __le16 data_length /* Total packet length (from the parser) */;
769 __le16 vlan /* 802.1q VLAN tag */;
770 __le32 src_mac_addrhi /* hi 4 bytes source mac address */;
771 __le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
772 /* These are the lower 16 bit of QP id in RoCE BTH header */
774 __le32 src_qp /* Source QP from DETH header */;
779 * Core RX CQE for Light L2 .
781 struct core_rx_slow_path_cqe {
782 u8 type /* CQE type */;
785 struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
790 * Core RX CM offload BD for Light L2
792 union core_rx_cqe_union {
793 struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
794 struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
795 struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
803 * Ramrod data for rx queue start ramrod
805 struct core_rx_start_ramrod_data {
806 struct regpair bd_base /* bd address of the first bd page */;
807 struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
808 __le16 mtu /* Maximum transmission unit */;
809 __le16 sb_id /* Status block ID */;
810 u8 sb_index /* index of the protocol index */;
811 u8 complete_cqe_flg /* post completion to the CQE ring if set */;
812 u8 complete_event_flg /* post completion to the event ring if set */;
813 u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
814 __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
815 /* if set, 802.1q tags will be removed and copied to CQE */
816 /* if set, 802.1q tags will be removed and copied to CQE */
817 u8 inner_vlan_stripping_en;
818 /* if set and inner vlan does not exist, the outer vlan will copied to CQE as
819 * inner vlan. should be used in MF_OVLAN mode only.
821 u8 report_outer_vlan;
822 u8 queue_id /* Light L2 RX Queue ID */;
823 u8 main_func_queue /* Is this the main queue for the PF */;
824 /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
825 * main_func_queue is set.
827 u8 mf_si_bcast_accept_all;
828 /* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if
829 * main_func_queue is set.
831 u8 mf_si_mcast_accept_all;
832 /* Specifies how ll2 should deal with packets errors: packet_too_big and
835 struct core_rx_action_on_error action_on_error;
836 /* set when in GSI offload mode on ROCE connection */
838 /* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
839 * zero out, used for TenantDcb
841 u8 wipe_inner_vlan_pri_en;
847 * Ramrod data for rx queue stop ramrod
849 struct core_rx_stop_ramrod_data {
850 u8 complete_cqe_flg /* post completion to the CQE ring if set */;
851 u8 complete_event_flg /* post completion to the event ring if set */;
852 u8 queue_id /* Light L2 RX Queue ID */;
859 * Flags for Core TX BD
861 struct core_tx_bd_data {
863 /* Do not allow additional VLAN manipulations on this packet (DCB) */
864 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
865 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
866 /* Insert VLAN into packet. Cannot be set for LB packets
867 * (tx_dst == CORE_TX_DEST_LB)
869 #define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
870 #define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
871 /* This is the first BD of the packet (for debug) */
872 #define CORE_TX_BD_DATA_START_BD_MASK 0x1
873 #define CORE_TX_BD_DATA_START_BD_SHIFT 2
874 /* Calculate the IP checksum for the packet */
875 #define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
876 #define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
877 /* Calculate the L4 checksum for the packet */
878 #define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
879 #define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
880 /* Packet is IPv6 with extensions */
881 #define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
882 #define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
883 /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
886 #define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
887 #define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
888 /* The pseudo checksum mode to place in the L4 checksum field. Required only
889 * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
891 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
892 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
893 /* Number of BDs that make up one packet - width wide enough to present
894 * CORE_LL2_TX_MAX_BDS_PER_PACKET
896 #define CORE_TX_BD_DATA_NBDS_MASK 0xF
897 #define CORE_TX_BD_DATA_NBDS_SHIFT 8
898 /* Use roce_flavor enum - Differentiate between Roce flavors is valid when
899 * connType is ROCE (use enum core_roce_flavor_type)
901 #define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
902 #define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
903 /* Calculate ip length */
904 #define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
905 #define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
906 /* disables the STAG insertion, relevant only in MF OVLAN mode. */
907 #define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1
908 #define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
909 #define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
910 #define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
914 * Core TX BD for Light L2
917 struct regpair addr /* Buffer Address */;
918 __le16 nbytes /* Number of Bytes in Buffer */;
919 /* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack
920 * packets: echo data to pass to Rx
922 __le16 nw_vlan_or_lb_echo;
923 struct core_tx_bd_data bd_data /* BD Flags */;
925 /* L4 Header Offset from start of packet (in Words). This is needed if both
926 * l4_csum and ipv6_ext are set
928 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
929 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
930 /* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */
931 #define CORE_TX_BD_TX_DST_MASK 0x3
932 #define CORE_TX_BD_TX_DST_SHIFT 14
938 * Light L2 TX Destination
941 CORE_TX_DEST_NW /* TX Destination to the Network */,
942 CORE_TX_DEST_LB /* TX Destination to the Loopback */,
943 CORE_TX_DEST_RESERVED,
944 CORE_TX_DEST_DROP /* TX Drop */,
950 * Ramrod data for tx queue start ramrod
952 struct core_tx_start_ramrod_data {
953 struct regpair pbl_base_addr /* Address of the pbl page */;
954 __le16 mtu /* Maximum transmission unit */;
955 __le16 sb_id /* Status block ID */;
956 u8 sb_index /* Status block protocol index */;
957 u8 stats_en /* Statistics Enable */;
958 u8 stats_id /* Statistics Counter ID */;
959 u8 conn_type /* connection type that loaded ll2 */;
960 __le16 pbl_size /* Number of BD pages pointed by PBL */;
961 __le16 qm_pq_id /* QM PQ ID */;
962 /* set when in GSI offload mode on ROCE connection */
964 /* vport id of the current connection, used to access non_rdma_in_to_in_pri_map
973 * Ramrod data for tx queue stop ramrod
975 struct core_tx_stop_ramrod_data {
981 * Ramrod data for tx queue update ramrod
983 struct core_tx_update_ramrod_data {
984 u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
986 __le16 qm_pq_id /* Updated QM PQ ID */;
992 * Enum flag for what type of dcb data to update
994 enum dcb_dscp_update_mode {
995 /* use when no change should be done to DCB data */
996 DONT_UPDATE_DCB_DSCP,
997 UPDATE_DCB /* use to update only L2 (vlan) priority */,
998 UPDATE_DSCP /* use to update only IP DSCP */,
999 UPDATE_DCB_DSCP /* update vlan pri and DSCP */,
1000 MAX_DCB_DSCP_UPDATE_FLAG
1004 struct eth_mstorm_per_pf_stat {
1005 struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
1006 struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
1007 struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
1008 struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
1012 struct eth_mstorm_per_queue_stat {
1013 /* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */
1014 struct regpair ttl0_discard;
1015 /* Number of packets discarded because they are bigger than MTU */
1016 struct regpair packet_too_big_discard;
1017 /* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */
1018 struct regpair no_buff_discard;
1019 /* Number of packets discarded because of no active Rx connection */
1020 struct regpair not_active_discard;
1021 /* number of coalesced packets in all TPA aggregations */
1022 struct regpair tpa_coalesced_pkts;
1023 /* total number of TPA aggregations */
1024 struct regpair tpa_coalesced_events;
1025 /* number of aggregations, which abnormally ended */
1026 struct regpair tpa_aborts_num;
1027 /* total TCP payload length in all TPA aggregations */
1028 struct regpair tpa_coalesced_bytes;
1033 * Ethernet TX Per PF
1035 struct eth_pstorm_per_pf_stat {
1036 /* number of total ucast bytes sent on loopback port without errors */
1037 struct regpair sent_lb_ucast_bytes;
1038 /* number of total mcast bytes sent on loopback port without errors */
1039 struct regpair sent_lb_mcast_bytes;
1040 /* number of total bcast bytes sent on loopback port without errors */
1041 struct regpair sent_lb_bcast_bytes;
1042 /* number of total ucast packets sent on loopback port without errors */
1043 struct regpair sent_lb_ucast_pkts;
1044 /* number of total mcast packets sent on loopback port without errors */
1045 struct regpair sent_lb_mcast_pkts;
1046 /* number of total bcast packets sent on loopback port without errors */
1047 struct regpair sent_lb_bcast_pkts;
1048 struct regpair sent_gre_bytes /* Sent GRE bytes */;
1049 struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
1050 struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
1051 struct regpair sent_gre_pkts /* Sent GRE packets */;
1052 struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
1053 struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
1054 struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
1055 struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
1056 struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
1061 * Ethernet TX Per Queue Stats
1063 struct eth_pstorm_per_queue_stat {
1064 /* number of total bytes sent without errors */
1065 struct regpair sent_ucast_bytes;
1066 /* number of total bytes sent without errors */
1067 struct regpair sent_mcast_bytes;
1068 /* number of total bytes sent without errors */
1069 struct regpair sent_bcast_bytes;
1070 /* number of total packets sent without errors */
1071 struct regpair sent_ucast_pkts;
1072 /* number of total packets sent without errors */
1073 struct regpair sent_mcast_pkts;
1074 /* number of total packets sent without errors */
1075 struct regpair sent_bcast_pkts;
1076 /* number of total packets dropped due to errors */
1077 struct regpair error_drop_pkts;
1082 * ETH Rx producers data
1084 struct eth_rx_rate_limit {
1085 /* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */
1087 /* Constant term to add (or subtract from number of cycles) */
1089 u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
1095 /* Update RSS indirection table entry command. One outstanding command supported
1098 struct eth_tstorm_rss_update_data {
1099 /* Valid flag. Driver must set this flag, FW clear valid flag when ready for new
1100 * RSS update command.
1103 /* Global VPORT ID. If RSS is disable for VPORT, RSS update command will be
1107 u8 ind_table_index /* RSS indirect table index that will be updated. */;
1109 __le16 ind_table_value /* RSS indirect table new value. */;
1110 __le16 reserved1 /* reserved. */;
1114 struct eth_ustorm_per_pf_stat {
1115 /* number of total ucast bytes received on loopback port without errors */
1116 struct regpair rcv_lb_ucast_bytes;
1117 /* number of total mcast bytes received on loopback port without errors */
1118 struct regpair rcv_lb_mcast_bytes;
1119 /* number of total bcast bytes received on loopback port without errors */
1120 struct regpair rcv_lb_bcast_bytes;
1121 /* number of total ucast packets received on loopback port without errors */
1122 struct regpair rcv_lb_ucast_pkts;
1123 /* number of total mcast packets received on loopback port without errors */
1124 struct regpair rcv_lb_mcast_pkts;
1125 /* number of total bcast packets received on loopback port without errors */
1126 struct regpair rcv_lb_bcast_pkts;
1127 struct regpair rcv_gre_bytes /* Received GRE bytes */;
1128 struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
1129 struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
1130 struct regpair rcv_gre_pkts /* Received GRE packets */;
1131 struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
1132 struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
1136 struct eth_ustorm_per_queue_stat {
1137 struct regpair rcv_ucast_bytes;
1138 struct regpair rcv_mcast_bytes;
1139 struct regpair rcv_bcast_bytes;
1140 struct regpair rcv_ucast_pkts;
1141 struct regpair rcv_mcast_pkts;
1142 struct regpair rcv_bcast_pkts;
1147 * Event Ring VF-PF Channel data
1149 struct vf_pf_channel_eqe_data {
1150 struct regpair msg_addr /* VF-PF message address */;
1154 * Event Ring malicious VF data
1156 struct malicious_vf_eqe_data {
1157 u8 vf_id /* Malicious VF ID */;
1158 u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */;
1163 * Event Ring initial cleanup data
1165 struct initial_cleanup_eqe_data {
1166 u8 vf_id /* VF ID */;
1173 union event_ring_data {
1174 u8 bytes[8] /* Byte Array */;
1175 struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
1176 struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
1177 /* Dedicated fields to iscsi connect done results */
1178 struct iscsi_connect_done_results iscsi_conn_done_info;
1179 struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
1180 /* VF Initial Cleanup data */
1181 struct initial_cleanup_eqe_data vf_init_cleanup;
1188 struct event_ring_entry {
1189 u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
1190 u8 opcode /* Event Opcode */;
1191 __le16 reserved0 /* Reserved */;
1192 __le16 echo /* Echo value from ramrod data on the host */;
1193 u8 fw_return_code /* FW return code for SP ramrods */;
1195 /* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
1196 #define EVENT_RING_ENTRY_ASYNC_MASK 0x1
1197 #define EVENT_RING_ENTRY_ASYNC_SHIFT 0
1198 #define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
1199 #define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
1200 union event_ring_data data;
1204 * Event Ring Next Page Address
1206 struct event_ring_next_addr {
1207 struct regpair addr /* Next Page Address */;
1208 __le32 reserved[2] /* Reserved */;
1212 * Event Ring Element
1214 union event_ring_element {
1215 struct event_ring_entry entry /* Event Ring Entry */;
1216 /* Event Ring Next Page Address */
1217 struct event_ring_next_addr next_addr;
1225 enum fw_flow_ctrl_mode {
1228 MAX_FW_FLOW_CTRL_MODE
1235 enum gft_profile_type {
1236 /* tunnel type, inner 4 tuple, IP type and L4 type match. */
1237 GFT_PROFILE_TYPE_4_TUPLE,
1238 /* tunnel type, inner L4 destination port, IP type and L4 type match. */
1239 GFT_PROFILE_TYPE_L4_DST_PORT,
1240 /* tunnel type, inner IP destination address and IP type match. */
1241 GFT_PROFILE_TYPE_IP_DST_ADDR,
1242 /* tunnel type, inner IP source address and IP type match. */
1243 GFT_PROFILE_TYPE_IP_SRC_ADDR,
1244 GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */,
1245 MAX_GFT_PROFILE_TYPE
1250 * Major and Minor hsi Versions
1252 struct hsi_fp_ver_struct {
1253 u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
1254 u8 major_ver_arr[2] /* Major Version of driver loading pf */;
1262 INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */,
1263 INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */,
1264 INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */,
1272 enum iwarp_ll2_tx_queues {
1273 /* LL2 queue for OOO packets sent in-order by the driver */
1274 IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
1275 /* LL2 queue for unaligned packets sent aligned by the driver */
1276 IWARP_LL2_ALIGNED_TX_QUEUE,
1277 /* LL2 queue for unaligned packets sent aligned and was right-trimmed by the
1280 IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
1281 IWARP_LL2_ERROR /* Error indication */,
1282 MAX_IWARP_LL2_TX_QUEUES
1287 * Malicious VF error ID
1289 enum malicious_vf_error_id {
1290 MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
1291 /* Writing to VF/PF channel when it is not ready */
1292 VF_PF_CHANNEL_NOT_READY,
1293 VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
1294 VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
1295 /* TX packet is shorter then reported on BDs or from minimal size */
1296 ETH_PACKET_TOO_SMALL,
1297 /* Tx packet with marked as insert VLAN when its illegal */
1298 ETH_ILLEGAL_VLAN_MODE,
1299 ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
1300 /* TX packet has illegal inband tags marked */
1301 ETH_ILLEGAL_INBAND_TAGS,
1302 /* Vlan cant be added to inband tag */
1303 ETH_VLAN_INSERT_AND_INBAND_VLAN,
1304 /* indicated number of BDs for the packet is illegal */
1306 ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
1307 /* There are not enough BDs for transmission of even one packet */
1308 ETH_INSUFFICIENT_BDS,
1309 ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
1310 ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
1311 /* empty BD (which not contains control flags) is illegal */
1313 ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */,
1314 /* In LSO its expected that on the local BD ring there will be at least MSS
1317 ETH_INSUFFICIENT_PAYLOAD,
1318 ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
1319 /* Tunneled packet with IPv6+Ext without a proper number of BDs */
1320 ETH_TUNN_IPV6_EXT_NBD_ERR,
1321 ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
1322 ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
1323 MAX_MALICIOUS_VF_ERROR_ID
1329 * Mstorm non-triggering VF zone
1331 struct mstorm_non_trigger_vf_zone {
1332 /* VF statistic bucket */
1333 struct eth_mstorm_per_queue_stat eth_queue_stat;
1334 /* VF RX queues producers */
1335 struct eth_rx_prod_data
1336 eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
1343 struct mstorm_vf_zone {
1344 /* non-interrupt-triggering zone */
1345 struct mstorm_non_trigger_vf_zone non_trigger;
1350 * vlan header including TPID and TCI fields
1352 struct vlan_header {
1353 __le16 tpid /* Tag Protocol Identifier */;
1354 __le16 tci /* Tag Control Information */;
1358 * outer tag configurations
1360 struct outer_tag_config_struct {
1361 /* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
1362 * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
1365 u8 enable_stag_pri_change;
1366 /* If inner_to_outer_pri_map is initialize then set pri_map_valid */
1369 /* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol
1370 * identifier and outer tag control information
1372 struct vlan_header outer_tag;
1373 /* Map from inner to outer priority. Set pri_map_valid when init map */
1374 u8 inner_to_outer_pri_map[8];
1379 * personality per PF
1381 enum personality_type {
1382 BAD_PERSONALITY_TYP,
1383 PERSONALITY_ISCSI /* iSCSI and LL2 */,
1384 PERSONALITY_FCOE /* Fcoe and LL2 */,
1385 PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
1386 PERSONALITY_RDMA /* Roce and LL2 */,
1387 PERSONALITY_CORE /* CORE(LL2) */,
1388 PERSONALITY_ETH /* Ethernet */,
1389 PERSONALITY_TOE /* Toe and LL2 */,
1390 MAX_PERSONALITY_TYPE
1395 * tunnel configuration
1397 struct pf_start_tunnel_config {
1398 /* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set -
1399 * FW will use a default port
1401 u8 set_vxlan_udp_port_flg;
1402 /* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set -
1403 * FW will use a default port
1405 u8 set_geneve_udp_port_flg;
1406 /* Set no-innet-L2 VXLAN tunnel UDP destination port to
1407 * no_inner_l2_vxlan_udp_port. If not set - FW will use a default port
1409 u8 set_no_inner_l2_vxlan_udp_port_flg;
1410 u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
1411 /* Rx classification scheme for l2 GENEVE tunnel. */
1412 u8 tunnel_clss_l2geneve;
1413 /* Rx classification scheme for ip GENEVE tunnel. */
1414 u8 tunnel_clss_ipgeneve;
1415 u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
1416 u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
1417 /* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
1418 __le16 vxlan_udp_port;
1419 /* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
1420 __le16 geneve_udp_port;
1421 /* no-innet-L2 VXLAN tunnel UDP destination port. Valid if
1422 * set_no_inner_l2_vxlan_udp_port_flg=1
1424 __le16 no_inner_l2_vxlan_udp_port;
1429 * Ramrod data for PF start ramrod
1431 struct pf_start_ramrod_data {
1432 struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
1433 /* PBL address of consolidation queue */
1434 struct regpair consolid_q_pbl_addr;
1435 /* tunnel configuration. */
1436 struct pf_start_tunnel_config tunnel_config;
1437 __le16 event_ring_sb_id /* Status block ID */;
1438 /* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
1440 u8 num_vfs /* Amount of vfs owned by PF */;
1441 u8 event_ring_num_pages /* Number of PBL pages in event ring */;
1442 u8 event_ring_sb_index /* Status block index */;
1443 u8 path_id /* HW path ID (engine ID) */;
1444 u8 warning_as_error /* In FW asserts, treat warning as error */;
1445 /* If not set - throw a warning for each ramrod (for debug) */
1446 u8 dont_log_ramrods;
1447 u8 personality /* define what type of personality is new PF */;
1448 /* Log type mask. Each bit set enables a corresponding event type logging.
1449 * Event types are defined as ASSERT_LOG_TYPE_xxx
1451 __le16 log_type_mask;
1452 u8 mf_mode /* Multi function mode */;
1453 u8 integ_phase /* Integration phase */;
1454 /* If set, inter-pf tx switching is allowed in Switch Independent func mode */
1455 u8 allow_npar_tx_switching;
1457 /* FP HSI version to be used by FW */
1458 struct hsi_fp_ver_struct hsi_fp_ver;
1459 /* Outer tag configurations */
1460 struct outer_tag_config_struct outer_tag_config;
1466 * Per protocol DCB data
1468 struct protocol_dcb_data {
1469 u8 dcb_enable_flag /* Enable DCB */;
1470 u8 dscp_enable_flag /* Enable updating DSCP value */;
1471 u8 dcb_priority /* DCB priority */;
1472 u8 dcb_tc /* DCB TC */;
1473 u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */;
1474 /* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged
1477 u8 dcb_dont_add_vlan0;
1481 * Update tunnel configuration
1483 struct pf_update_tunnel_config {
1484 /* Update RX per PF tunnel classification scheme. */
1485 u8 update_rx_pf_clss;
1486 /* Update per PORT default tunnel RX classification scheme for traffic with
1487 * unknown unicast outer MAC in NPAR mode.
1489 u8 update_rx_def_ucast_clss;
1490 /* Update per PORT default tunnel RX classification scheme for traffic with non
1491 * unicast outer MAC in NPAR mode.
1493 u8 update_rx_def_non_ucast_clss;
1494 /* Update VXLAN tunnel UDP destination port. */
1495 u8 set_vxlan_udp_port_flg;
1496 /* Update GENEVE tunnel UDP destination port. */
1497 u8 set_geneve_udp_port_flg;
1498 /* Update no-innet-L2 VXLAN tunnel UDP destination port. */
1499 u8 set_no_inner_l2_vxlan_udp_port_flg;
1500 u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
1501 /* Classification scheme for l2 GENEVE tunnel. */
1502 u8 tunnel_clss_l2geneve;
1503 /* Classification scheme for ip GENEVE tunnel. */
1504 u8 tunnel_clss_ipgeneve;
1505 u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
1506 u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
1508 __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
1509 __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
1510 /* no-innet-L2 VXLAN tunnel UDP destination port. */
1511 __le16 no_inner_l2_vxlan_udp_port;
1512 __le16 reserved1[3];
1516 * Data for port update ramrod
1518 struct pf_update_ramrod_data {
1519 /* Update Eth DCB data indication (use enum dcb_dscp_update_mode) */
1520 u8 update_eth_dcb_data_mode;
1521 /* Update FCOE DCB data indication (use enum dcb_dscp_update_mode) */
1522 u8 update_fcoe_dcb_data_mode;
1523 /* Update iSCSI DCB data indication (use enum dcb_dscp_update_mode) */
1524 u8 update_iscsi_dcb_data_mode;
1525 u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */;
1526 /* Update RROCE (RoceV2) DCB data indication */
1527 u8 update_rroce_dcb_data_mode;
1528 u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */;
1529 u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
1530 /* Update Enable STAG Priority Change indication */
1531 u8 update_enable_stag_pri_change;
1532 struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
1533 struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
1534 /* core iscsi related fields */
1535 struct protocol_dcb_data iscsi_dcb_data;
1536 struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
1537 /* core roce related fields */
1538 struct protocol_dcb_data rroce_dcb_data;
1539 /* core iwarp related fields */
1540 struct protocol_dcb_data iwarp_dcb_data;
1541 __le16 mf_vlan /* new outer vlan id value */;
1542 /* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
1543 * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
1546 u8 enable_stag_pri_change;
1548 /* tunnel configuration. */
1549 struct pf_update_tunnel_config tunnel_config;
1558 ENGX2_PORTX1 /* 2 engines x 1 port */,
1559 ENGX2_PORTX2 /* 2 engines x 2 ports */,
1560 ENGX1_PORTX1 /* 1 engine x 1 port */,
1561 ENGX1_PORTX2 /* 1 engine x 2 ports */,
1562 ENGX1_PORTX4 /* 1 engine x 4 ports */,
1569 * use to index in hsi_fp_[major|minor]_ver_arr per protocol
1571 enum protocol_version_array_key {
1574 MAX_PROTOCOL_VERSION_ARRAY_KEY
1582 struct rdma_sent_stats {
1583 struct regpair sent_bytes /* number of total RDMA bytes sent */;
1584 struct regpair sent_pkts /* number of total RDMA packets sent */;
1588 * Pstorm non-triggering VF zone
1590 struct pstorm_non_trigger_vf_zone {
1591 /* VF statistic bucket */
1592 struct eth_pstorm_per_queue_stat eth_queue_stat;
1593 struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
1600 struct pstorm_vf_zone {
1601 /* non-interrupt-triggering zone */
1602 struct pstorm_non_trigger_vf_zone non_trigger;
1603 struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
1608 * Ramrod Header of SPQE
1610 struct ramrod_header {
1611 __le32 cid /* Slowpath Connection CID */;
1612 u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
1613 u8 protocol_id /* Ramrod Protocol ID */;
1614 __le16 echo /* Ramrod echo */;
1621 struct rdma_rcv_stats {
1622 struct regpair rcv_bytes /* number of total RDMA bytes received */;
1623 struct regpair rcv_pkts /* number of total RDMA packets received */;
1629 * Data for update QCN/DCQCN RL ramrod
1631 struct rl_update_ramrod_data {
1632 u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
1633 /* Update DCQCN global params: timeout, g, k. */
1634 u8 dcqcn_update_param_flg;
1635 u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
1636 u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
1637 u8 rl_stop_flg /* Stop RL. */;
1638 u8 rl_id_first /* ID of first or single RL, that will be updated. */;
1639 /* ID of last RL, that will be updated. If clear, single RL will updated. */
1641 u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
1642 /* If set, alpha will be reset to 1 when the state machine is idle. */
1643 u8 dcqcn_reset_alpha_on_idle;
1644 /* Byte counter threshold to change rate increase stage. */
1646 /* Timer threshold to change rate increase stage. */
1647 u8 rl_timer_stage_th;
1649 __le32 rl_bc_rate /* Byte Counter Limit. */;
1650 __le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
1651 __le16 rl_r_ai /* Active increase rate. */;
1652 __le16 rl_r_hai /* Hyper active increase rate. */;
1653 __le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
1654 __le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
1655 __le32 dcqcn_timeuot_us /* DCQCN timeout. */;
1656 __le32 qcn_timeuot_us /* QCN timeout. */;
1662 * Slowpath Element (SPQE)
1664 struct slow_path_element {
1665 struct ramrod_header hdr /* Ramrod Header */;
1666 struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
1671 * Tstorm non-triggering VF zone
1673 struct tstorm_non_trigger_vf_zone {
1674 struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
1678 struct tstorm_per_port_stat {
1679 /* packet is dropped because it was truncated in NIG */
1680 struct regpair trunc_error_discard;
1681 /* packet is dropped because of Ethernet FCS error */
1682 struct regpair mac_error_discard;
1683 /* packet is dropped because classification was unsuccessful */
1684 struct regpair mftag_filter_discard;
1685 /* packet was passed to Ethernet and dropped because of no mac filter match */
1686 struct regpair eth_mac_filter_discard;
1687 /* packet passed to Light L2 and dropped because Light L2 is not configured for
1690 struct regpair ll2_mac_filter_discard;
1691 /* packet passed to Light L2 and dropped because Light L2 is not configured for
1694 struct regpair ll2_conn_disabled_discard;
1695 /* packet is an ISCSI irregular packet */
1696 struct regpair iscsi_irregular_pkt;
1697 /* packet is an FCOE irregular packet */
1698 struct regpair fcoe_irregular_pkt;
1699 /* packet is an ROCE irregular packet */
1700 struct regpair roce_irregular_pkt;
1701 /* packet is an IWARP irregular packet */
1702 struct regpair iwarp_irregular_pkt;
1703 /* packet is an ETH irregular packet */
1704 struct regpair eth_irregular_pkt;
1705 /* packet is an TOE irregular packet */
1706 struct regpair toe_irregular_pkt;
1707 /* packet is an PREROCE irregular packet */
1708 struct regpair preroce_irregular_pkt;
1709 struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
1710 /* VXLAN dropped packets */
1711 struct regpair eth_vxlan_tunn_filter_discard;
1712 /* GENEVE dropped packets */
1713 struct regpair eth_geneve_tunn_filter_discard;
1714 struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
1721 struct tstorm_vf_zone {
1722 /* non-interrupt-triggering zone */
1723 struct tstorm_non_trigger_vf_zone non_trigger;
1728 * Tunnel classification scheme
1731 /* Use MAC and VLAN from first L2 header for vport classification. */
1732 TUNNEL_CLSS_MAC_VLAN = 0,
1733 /* Use MAC from first L2 header and VNI from tunnel header for vport
1736 TUNNEL_CLSS_MAC_VNI,
1737 /* Use MAC and VLAN from last L2 header for vport classification */
1738 TUNNEL_CLSS_INNER_MAC_VLAN,
1739 /* Use MAC from last L2 header and VNI from tunnel header for vport
1742 TUNNEL_CLSS_INNER_MAC_VNI,
1743 /* Use MAC and VLAN from last L2 header for vport classification. If no exact
1744 * match, use MAC and VLAN from first L2 header for classification.
1746 TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
1753 * Ustorm non-triggering VF zone
1755 struct ustorm_non_trigger_vf_zone {
1756 /* VF statistic bucket */
1757 struct eth_ustorm_per_queue_stat eth_queue_stat;
1758 struct regpair vf_pf_msg_addr /* VF-PF message address */;
1763 * Ustorm triggering VF zone
1765 struct ustorm_trigger_vf_zone {
1766 u8 vf_pf_msg_valid /* VF-PF message valid flag */;
1774 struct ustorm_vf_zone {
1775 /* non-interrupt-triggering zone */
1776 struct ustorm_non_trigger_vf_zone non_trigger;
1777 struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
1782 * VF-PF channel data
1784 struct vf_pf_channel_data {
1785 /* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel
1786 * is ready for a new transaction.
1789 /* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is
1799 * Ramrod data for VF start ramrod
1801 struct vf_start_ramrod_data {
1802 u8 vf_id /* VF ID */;
1803 /* If set, initial cleanup ack will be sent to parent PF SP event queue */
1805 __le16 opaque_fid /* VF opaque FID */;
1806 u8 personality /* define what type of personality is new VF */;
1808 /* FP HSI version to be used by FW */
1809 struct hsi_fp_ver_struct hsi_fp_ver;
1814 * Ramrod data for VF start ramrod
1816 struct vf_stop_ramrod_data {
1817 u8 vf_id /* VF ID */;
1825 * VF zone size mode.
1827 enum vf_zone_size_mode {
1828 /* Default VF zone size. Up to 192 VF supported. */
1829 VF_ZONE_SIZE_MODE_DEFAULT,
1830 /* Doubled VF zone size. Up to 96 VF supported. */
1831 VF_ZONE_SIZE_MODE_DOUBLE,
1832 /* Quad VF zone size. Up to 48 VF supported. */
1833 VF_ZONE_SIZE_MODE_QUAD,
1834 MAX_VF_ZONE_SIZE_MODE
1842 * Attentions status block
1844 struct atten_status_block {
1848 __le16 sb_index /* status block running index */;
1858 /* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
1859 #define DMAE_CMD_SRC_MASK 0x1
1860 #define DMAE_CMD_SRC_SHIFT 0
1861 /* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None
1862 * (use enum dmae_cmd_dst_enum)
1864 #define DMAE_CMD_DST_MASK 0x3
1865 #define DMAE_CMD_DST_SHIFT 1
1866 /* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
1867 #define DMAE_CMD_C_DST_MASK 0x1
1868 #define DMAE_CMD_C_DST_SHIFT 3
1869 /* Reset the CRC result (do not use the previous result as the seed) */
1870 #define DMAE_CMD_CRC_RESET_MASK 0x1
1871 #define DMAE_CMD_CRC_RESET_SHIFT 4
1872 /* Reset the source address in the next go to the same source address of the
1875 #define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
1876 #define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
1877 /* Reset the destination address in the next go to the same destination address
1878 * of the previous go
1880 #define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
1881 #define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
1882 /* 0 completion function is the same as src function, 1 - 0 completion
1883 * function is the same as dst function (use enum dmae_cmd_comp_func_enum)
1885 #define DMAE_CMD_COMP_FUNC_MASK 0x1
1886 #define DMAE_CMD_COMP_FUNC_SHIFT 7
1887 /* 0 - Do not write a completion word, 1 - Write a completion word
1888 * (use enum dmae_cmd_comp_word_en_enum)
1890 #define DMAE_CMD_COMP_WORD_EN_MASK 0x1
1891 #define DMAE_CMD_COMP_WORD_EN_SHIFT 8
1892 /* 0 - Do not write a CRC word, 1 - Write a CRC word
1893 * (use enum dmae_cmd_comp_crc_en_enum)
1895 #define DMAE_CMD_COMP_CRC_EN_MASK 0x1
1896 #define DMAE_CMD_COMP_CRC_EN_SHIFT 9
1897 /* The CRC word should be taken from the DMAE address space from address 9+X,
1898 * where X is the value in these bits.
1900 #define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
1901 #define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
1902 #define DMAE_CMD_RESERVED1_MASK 0x1
1903 #define DMAE_CMD_RESERVED1_SHIFT 13
1904 #define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
1905 #define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
1906 /* The field specifies how the completion word is affected by PCIe read error. 0
1907 * Send a regular completion, 1 - Send a completion with an error indication,
1908 * 2 do not send a completion (use enum dmae_cmd_error_handling_enum)
1910 #define DMAE_CMD_ERR_HANDLING_MASK 0x3
1911 #define DMAE_CMD_ERR_HANDLING_SHIFT 16
1912 /* The port ID to be placed on the RF FID field of the GRC bus. this field is
1913 * used both when GRC is the destination and when it is the source of the DMAE
1916 #define DMAE_CMD_PORT_ID_MASK 0x3
1917 #define DMAE_CMD_PORT_ID_SHIFT 18
1918 /* Source PCI function number [3:0] */
1919 #define DMAE_CMD_SRC_PF_ID_MASK 0xF
1920 #define DMAE_CMD_SRC_PF_ID_SHIFT 20
1921 /* Destination PCI function number [3:0] */
1922 #define DMAE_CMD_DST_PF_ID_MASK 0xF
1923 #define DMAE_CMD_DST_PF_ID_SHIFT 24
1924 #define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 /* Source VFID valid */
1925 #define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
1926 #define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 /* Destination VFID valid */
1927 #define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
1928 #define DMAE_CMD_RESERVED2_MASK 0x3
1929 #define DMAE_CMD_RESERVED2_SHIFT 30
1930 /* PCIe source address low in bytes or GRC source address in DW */
1932 /* PCIe source address high in bytes or reserved (if source is GRC) */
1934 /* PCIe destination address low in bytes or GRC destination address in DW */
1936 /* PCIe destination address high in bytes or reserved (if destination is GRC) */
1938 __le16 length_dw /* Length in DW */;
1940 #define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
1941 #define DMAE_CMD_SRC_VF_ID_SHIFT 0
1942 #define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
1943 #define DMAE_CMD_DST_VF_ID_SHIFT 8
1944 /* PCIe completion address low in bytes or GRC completion address in DW */
1945 __le32 comp_addr_lo;
1946 /* PCIe completion address high in bytes or reserved (if completion address is
1949 __le32 comp_addr_hi;
1950 __le32 comp_val /* Value to write to completion address */;
1951 __le32 crc32 /* crc16 result */;
1952 __le32 crc_32_c /* crc32_c result */;
1953 __le16 crc16 /* crc16 result */;
1954 __le16 crc16_c /* crc16_c result */;
1955 __le16 crc10 /* crc_t10 result */;
1957 __le16 xsum16 /* checksum16 result */;
1958 __le16 xsum8 /* checksum8 result */;
1962 enum dmae_cmd_comp_crc_en_enum {
1963 dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
1964 dmae_cmd_comp_crc_enabled /* Write a CRC word */,
1965 MAX_DMAE_CMD_COMP_CRC_EN_ENUM
1969 enum dmae_cmd_comp_func_enum {
1970 /* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */
1971 dmae_cmd_comp_func_to_src,
1972 /* completion word and/or CRC will be sent to DST-PCI function/DST VFID */
1973 dmae_cmd_comp_func_to_dst,
1974 MAX_DMAE_CMD_COMP_FUNC_ENUM
1978 enum dmae_cmd_comp_word_en_enum {
1979 dmae_cmd_comp_word_disabled /* Do not write a completion word */,
1980 dmae_cmd_comp_word_enabled /* Write the completion word */,
1981 MAX_DMAE_CMD_COMP_WORD_EN_ENUM
1985 enum dmae_cmd_c_dst_enum {
1986 dmae_cmd_c_dst_pcie,
1988 MAX_DMAE_CMD_C_DST_ENUM
1992 enum dmae_cmd_dst_enum {
1993 dmae_cmd_dst_none_0,
1996 dmae_cmd_dst_none_3,
1997 MAX_DMAE_CMD_DST_ENUM
2001 enum dmae_cmd_error_handling_enum {
2002 /* Send a regular completion (with no error indication) */
2003 dmae_cmd_error_handling_send_regular_comp,
2004 /* Send a completion with an error indication (i.e. set bit 31 of the completion
2007 dmae_cmd_error_handling_send_comp_with_err,
2008 dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
2009 MAX_DMAE_CMD_ERROR_HANDLING_ENUM
2013 enum dmae_cmd_src_enum {
2014 dmae_cmd_src_pcie /* The source is the PCIe */,
2015 dmae_cmd_src_grc /* The source is the GRC */,
2016 MAX_DMAE_CMD_SRC_ENUM
2022 struct fw_asserts_ram_section {
2023 /* The offset of the section in the RAM in RAM lines (64-bit units) */
2024 __le16 section_ram_line_offset;
2025 /* The size of the section in RAM lines (64-bit units) */
2026 __le16 section_ram_line_size;
2027 /* The offset of the asserts list within the section in dwords */
2028 u8 list_dword_offset;
2029 /* The size of an assert list element in dwords */
2030 u8 list_element_dword_size;
2031 u8 list_num_elements /* The number of elements in the asserts list */;
2032 /* The offset of the next list index field within the section in dwords */
2033 u8 list_next_index_dword_offset;
2038 u8 major /* Firmware major version number */;
2039 u8 minor /* Firmware minor version number */;
2040 u8 rev /* Firmware revision version number */;
2041 u8 eng /* Firmware engineering version number (for bootleg versions) */;
2044 struct fw_ver_info {
2045 __le16 tools_ver /* Tools version number */;
2046 u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
2048 struct fw_ver_num num /* FW version number */;
2049 __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
2054 struct fw_ver_info ver /* FW version information */;
2055 /* Info regarding the FW asserts section in the Storm RAM */
2056 struct fw_asserts_ram_section fw_asserts_section;
2060 struct fw_info_location {
2061 __le32 grc_addr /* GRC address where the fw_info struct is located. */;
2062 /* Size of the fw_info structure (thats located at the grc_addr). */
2070 * IGU cleanup command
2072 struct igu_cleanup {
2073 __le32 sb_id_and_flags;
2074 #define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
2075 #define IGU_CLEANUP_RESERVED0_SHIFT 0
2076 /* cleanup clear - 0, set - 1 */
2077 #define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
2078 #define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
2079 #define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
2080 #define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
2081 /* must always be set (use enum command_type_bit) */
2082 #define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1U
2083 #define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
2089 * IGU firmware driver command
2092 struct igu_prod_cons_update prod_cons_update;
2093 struct igu_cleanup cleanup;
2098 * IGU firmware driver command
2100 struct igu_command_reg_ctrl {
2102 __le16 igu_command_reg_ctrl_fields;
2103 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
2104 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
2105 #define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
2106 #define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
2107 /* command typ: 0 - read, 1 - write */
2108 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
2109 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
2114 * IGU mapping line structure
2116 struct igu_mapping_line {
2117 __le32 igu_mapping_line_fields;
2118 #define IGU_MAPPING_LINE_VALID_MASK 0x1
2119 #define IGU_MAPPING_LINE_VALID_SHIFT 0
2120 #define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
2121 #define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
2122 /* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
2123 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
2124 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
2125 #define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
2126 #define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
2127 #define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
2128 #define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
2129 #define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
2130 #define IGU_MAPPING_LINE_RESERVED_SHIFT 24
2135 * IGU MSIX line structure
2137 struct igu_msix_vector {
2138 struct regpair address;
2140 __le32 msix_vector_fields;
2141 #define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
2142 #define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
2143 #define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
2144 #define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
2145 #define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
2146 #define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
2147 #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
2148 #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
2153 * per encapsulation type enabling flags
2155 struct prs_reg_encapsulation_type_en {
2157 /* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
2158 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
2159 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
2160 /* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
2161 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
2162 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
2163 /* Enable bit for VXLAN encapsulation. */
2164 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
2165 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
2166 /* Enable bit for T-Tag encapsulation. */
2167 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
2168 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
2169 /* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
2170 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
2171 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
2172 /* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
2173 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
2174 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
2175 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
2176 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
2180 enum pxp_tph_st_hint {
2181 TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
2182 TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
2183 /* Device Write and Host Read, or Host Write and Device Read */
2185 /* Device Write and Host Read, or Host Write and Device Read - with temporal
2188 TPH_ST_HINT_TARGET_PRIO,
2194 * QM hardware structure of enable bypass credit mask
2196 struct qm_rf_bypass_mask {
2198 #define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
2199 #define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
2200 #define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
2201 #define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
2202 #define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
2203 #define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
2204 #define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
2205 #define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
2206 #define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
2207 #define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
2208 #define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
2209 #define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
2210 #define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
2211 #define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
2212 #define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
2213 #define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
2218 * QM hardware structure of opportunistic credit mask
2220 struct qm_rf_opportunistic_mask {
2222 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
2223 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
2224 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
2225 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
2226 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
2227 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
2228 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
2229 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
2230 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
2231 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
2232 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
2233 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
2234 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
2235 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
2236 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
2237 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
2238 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
2239 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
2240 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
2241 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
2246 * QM hardware structure of QM map memory
2248 struct qm_rf_pq_map {
2250 #define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
2251 #define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
2252 #define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
2253 #define QM_RF_PQ_MAP_RL_ID_SHIFT 1
2254 /* the first PQ associated with the VPORT and VOQ of this PQ */
2255 #define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
2256 #define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
2257 #define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
2258 #define QM_RF_PQ_MAP_VOQ_SHIFT 18
2259 #define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
2260 #define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
2261 #define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
2262 #define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
2263 #define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
2264 #define QM_RF_PQ_MAP_RESERVED_SHIFT 26
2269 * Completion params for aggregated interrupt completion
2271 struct sdm_agg_int_comp_params {
2273 /* the number of aggregated interrupt, 0-31 */
2274 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
2275 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
2276 /* 1 - set a bit in aggregated vector, 0 - dont set */
2277 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
2278 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
2279 /* Number of bit in the aggregated vector, 0-279 (TBD) */
2280 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
2281 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
2286 * SDM operation gen command (generate aggregative interrupt)
2290 /* completion parameters 0-15 */
2291 #define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
2292 #define SDM_OP_GEN_COMP_PARAM_SHIFT 0
2293 #define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
2294 #define SDM_OP_GEN_COMP_TYPE_SHIFT 16
2295 #define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
2296 #define SDM_OP_GEN_RESERVED_SHIFT 20
2299 struct ystorm_core_conn_ag_ctx {
2300 u8 byte0 /* cdu_validation */;
2301 u8 byte1 /* state */;
2303 #define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
2304 #define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
2305 #define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
2306 #define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
2307 #define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
2308 #define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
2309 #define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
2310 #define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
2311 #define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
2312 #define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
2314 #define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
2315 #define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
2316 #define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
2317 #define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
2318 #define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
2319 #define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
2320 #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
2321 #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2322 #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
2323 #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2324 #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
2325 #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2326 #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
2327 #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2328 #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
2329 #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2330 u8 byte2 /* byte2 */;
2331 u8 byte3 /* byte3 */;
2332 __le16 word0 /* word0 */;
2333 __le32 reg0 /* reg0 */;
2334 __le32 reg1 /* reg1 */;
2335 __le16 word1 /* word1 */;
2336 __le16 word2 /* word2 */;
2337 __le16 word3 /* word3 */;
2338 __le16 word4 /* word4 */;
2339 __le32 reg2 /* reg2 */;
2340 __le32 reg3 /* reg3 */;
2343 #endif /* __ECORE_HSI_COMMON__ */