2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #ifndef __ECORE_HSI_COMMON__
10 #define __ECORE_HSI_COMMON__
11 /********************************/
12 /* Add include to common target */
13 /********************************/
14 #include "common_hsi.h"
18 * opcodes for the event ring
20 enum common_event_opcode {
21 COMMON_EVENT_PF_START,
23 COMMON_EVENT_VF_START,
25 COMMON_EVENT_VF_PF_CHANNEL,
27 COMMON_EVENT_PF_UPDATE,
28 COMMON_EVENT_MALICIOUS_VF,
29 COMMON_EVENT_RL_UPDATE,
31 MAX_COMMON_EVENT_OPCODE
36 * Common Ramrod Command IDs
38 enum common_ramrod_cmd_id {
40 COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
41 COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
42 COMMON_RAMROD_VF_START /* VF Function Start */,
43 COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
44 COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
45 COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
46 COMMON_RAMROD_EMPTY /* Empty Ramrod */,
47 MAX_COMMON_RAMROD_CMD_ID
52 * The core storm context for the Ystorm
54 struct ystorm_core_conn_st_ctx {
59 * The core storm context for the Pstorm
61 struct pstorm_core_conn_st_ctx {
66 * Core Slowpath Connection storm context of Xstorm
68 struct xstorm_core_conn_st_ctx {
69 __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
70 __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
71 /* Consolidation Ring Base Address */
72 struct regpair consolid_base_addr;
73 __le16 spq_cons /* SPQ Ring Consumer */;
74 __le16 consolid_cons /* Consolidation Ring Consumer */;
75 __le32 reserved0[55] /* Pad to 15 cycles */;
78 struct xstorm_core_conn_ag_ctx {
79 u8 reserved0 /* cdu_validation */;
80 u8 core_state /* state */;
83 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
84 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
86 #define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
87 #define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
89 #define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
90 #define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
92 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
93 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
95 #define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
96 #define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
98 #define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
99 #define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
101 #define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
102 #define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
104 #define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
105 #define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
108 #define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
109 #define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
111 #define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
112 #define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
114 #define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
115 #define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
117 #define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
118 #define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
120 #define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
121 #define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
123 #define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
124 #define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
126 #define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
127 #define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
129 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
130 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
133 #define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
134 #define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
136 #define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
137 #define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
139 #define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
140 #define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
142 #define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
143 #define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
145 #define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
146 #define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
147 #define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
148 #define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
149 #define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
150 #define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
151 #define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
152 #define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
154 #define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
155 #define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
156 #define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
157 #define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
159 #define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
160 #define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
162 #define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
163 #define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
166 #define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
167 #define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
169 #define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
170 #define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
172 #define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
173 #define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
175 #define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
176 #define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
179 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
180 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
182 #define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
183 #define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
185 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
186 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
188 #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
189 #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
192 #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
193 #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
195 #define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
196 #define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
198 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
199 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
201 #define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
202 #define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
204 #define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
205 #define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
208 #define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
209 #define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
211 #define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
212 #define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
214 #define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
215 #define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
217 #define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
218 #define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
220 #define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
221 #define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
223 #define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
224 #define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
226 #define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
227 #define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
229 #define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
230 #define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
233 #define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
234 #define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
236 #define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
237 #define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
239 #define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
240 #define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
242 #define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
243 #define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
245 #define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
246 #define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
248 #define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
249 #define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
251 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
252 #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
254 #define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
255 #define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
258 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
259 #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
261 #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
262 #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
264 #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
265 #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
267 #define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
268 #define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
270 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
271 #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
273 #define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
274 #define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
276 #define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
277 #define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
279 #define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
280 #define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
283 #define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
284 #define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
286 #define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
287 #define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
289 #define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
290 #define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
292 #define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
293 #define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
295 #define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
296 #define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
298 #define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
299 #define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
301 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
302 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
304 #define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
305 #define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
308 #define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
309 #define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
311 #define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
312 #define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
314 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
315 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
317 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
318 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
320 #define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
321 #define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
323 #define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
324 #define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
326 #define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
327 #define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
329 #define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
330 #define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
333 #define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
334 #define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
336 #define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
337 #define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
339 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
340 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
342 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
343 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
345 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
346 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
348 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
349 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
351 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
352 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
354 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
355 #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
358 #define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
359 #define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
361 #define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
362 #define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
364 #define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
365 #define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
367 #define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
368 #define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
370 #define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
371 #define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
373 #define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
374 #define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
376 #define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
377 #define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
378 u8 byte2 /* byte2 */;
379 __le16 physical_q0 /* physical_q0 */;
380 __le16 consolid_prod /* physical_q1 */;
381 __le16 reserved16 /* physical_q2 */;
382 __le16 tx_bd_cons /* word3 */;
383 __le16 tx_bd_or_spq_prod /* word4 */;
384 __le16 word5 /* word5 */;
385 __le16 conn_dpi /* conn_dpi */;
386 u8 byte3 /* byte3 */;
387 u8 byte4 /* byte4 */;
388 u8 byte5 /* byte5 */;
389 u8 byte6 /* byte6 */;
390 __le32 reg0 /* reg0 */;
391 __le32 reg1 /* reg1 */;
392 __le32 reg2 /* reg2 */;
393 __le32 reg3 /* reg3 */;
394 __le32 reg4 /* reg4 */;
395 __le32 reg5 /* cf_array0 */;
396 __le32 reg6 /* cf_array1 */;
397 __le16 word7 /* word7 */;
398 __le16 word8 /* word8 */;
399 __le16 word9 /* word9 */;
400 __le16 word10 /* word10 */;
401 __le32 reg7 /* reg7 */;
402 __le32 reg8 /* reg8 */;
403 __le32 reg9 /* reg9 */;
404 u8 byte7 /* byte7 */;
405 u8 byte8 /* byte8 */;
406 u8 byte9 /* byte9 */;
407 u8 byte10 /* byte10 */;
408 u8 byte11 /* byte11 */;
409 u8 byte12 /* byte12 */;
410 u8 byte13 /* byte13 */;
411 u8 byte14 /* byte14 */;
412 u8 byte15 /* byte15 */;
413 u8 byte16 /* byte16 */;
414 __le16 word11 /* word11 */;
415 __le32 reg10 /* reg10 */;
416 __le32 reg11 /* reg11 */;
417 __le32 reg12 /* reg12 */;
418 __le32 reg13 /* reg13 */;
419 __le32 reg14 /* reg14 */;
420 __le32 reg15 /* reg15 */;
421 __le32 reg16 /* reg16 */;
422 __le32 reg17 /* reg17 */;
423 __le32 reg18 /* reg18 */;
424 __le32 reg19 /* reg19 */;
425 __le16 word12 /* word12 */;
426 __le16 word13 /* word13 */;
427 __le16 word14 /* word14 */;
428 __le16 word15 /* word15 */;
431 struct tstorm_core_conn_ag_ctx {
432 u8 byte0 /* cdu_validation */;
433 u8 byte1 /* state */;
435 #define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
436 #define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
437 #define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
438 #define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
439 #define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
440 #define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
441 #define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
442 #define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
443 #define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
444 #define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
445 #define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
446 #define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
447 #define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
448 #define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
450 #define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
451 #define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
452 #define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
453 #define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
454 #define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
455 #define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
456 #define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
457 #define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
459 #define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
460 #define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
461 #define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
462 #define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
463 #define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
464 #define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
465 #define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
466 #define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
468 #define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
469 #define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
470 #define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
471 #define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
472 #define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
473 #define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
474 #define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
475 #define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
476 #define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
477 #define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
478 #define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
479 #define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
481 #define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
482 #define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
483 #define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
484 #define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
485 #define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
486 #define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
487 #define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
488 #define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
489 #define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
490 #define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
491 #define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
492 #define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
493 #define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
494 #define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
495 #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
496 #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
498 #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
499 #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
500 #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
501 #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
502 #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
503 #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
504 #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
505 #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
506 #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
507 #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
508 #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
509 #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
510 #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
511 #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
512 #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
513 #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
514 __le32 reg0 /* reg0 */;
515 __le32 reg1 /* reg1 */;
516 __le32 reg2 /* reg2 */;
517 __le32 reg3 /* reg3 */;
518 __le32 reg4 /* reg4 */;
519 __le32 reg5 /* reg5 */;
520 __le32 reg6 /* reg6 */;
521 __le32 reg7 /* reg7 */;
522 __le32 reg8 /* reg8 */;
523 u8 byte2 /* byte2 */;
524 u8 byte3 /* byte3 */;
525 __le16 word0 /* word0 */;
526 u8 byte4 /* byte4 */;
527 u8 byte5 /* byte5 */;
528 __le16 word1 /* word1 */;
529 __le16 word2 /* conn_dpi */;
530 __le16 word3 /* word3 */;
531 __le32 reg9 /* reg9 */;
532 __le32 reg10 /* reg10 */;
535 struct ustorm_core_conn_ag_ctx {
536 u8 reserved /* cdu_validation */;
537 u8 byte1 /* state */;
539 #define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
540 #define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
541 #define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
542 #define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
543 #define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
544 #define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
545 #define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
546 #define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
547 #define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
548 #define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
550 #define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
551 #define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
552 #define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
553 #define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
554 #define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
555 #define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
556 #define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
557 #define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
559 #define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
560 #define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
561 #define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
562 #define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
563 #define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
564 #define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
565 #define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
566 #define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
567 #define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
568 #define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
569 #define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
570 #define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
571 #define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
572 #define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
573 #define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
574 #define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
576 #define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
577 #define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
578 #define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
579 #define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
580 #define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
581 #define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
582 #define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
583 #define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
584 #define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
585 #define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
586 #define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
587 #define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
588 #define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
589 #define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
590 #define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
591 #define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
592 u8 byte2 /* byte2 */;
593 u8 byte3 /* byte3 */;
594 __le16 word0 /* conn_dpi */;
595 __le16 word1 /* word1 */;
596 __le32 rx_producers /* reg0 */;
597 __le32 reg1 /* reg1 */;
598 __le32 reg2 /* reg2 */;
599 __le32 reg3 /* reg3 */;
600 __le16 word2 /* word2 */;
601 __le16 word3 /* word3 */;
605 * The core storm context for the Mstorm
607 struct mstorm_core_conn_st_ctx {
612 * The core storm context for the Ustorm
614 struct ustorm_core_conn_st_ctx {
619 * core connection context
621 struct core_conn_context {
622 /* ystorm storm context */
623 struct ystorm_core_conn_st_ctx ystorm_st_context;
624 struct regpair ystorm_st_padding[2] /* padding */;
625 /* pstorm storm context */
626 struct pstorm_core_conn_st_ctx pstorm_st_context;
627 struct regpair pstorm_st_padding[2] /* padding */;
628 /* xstorm storm context */
629 struct xstorm_core_conn_st_ctx xstorm_st_context;
630 /* xstorm aggregative context */
631 struct xstorm_core_conn_ag_ctx xstorm_ag_context;
632 /* tstorm aggregative context */
633 struct tstorm_core_conn_ag_ctx tstorm_ag_context;
634 /* ustorm aggregative context */
635 struct ustorm_core_conn_ag_ctx ustorm_ag_context;
636 /* mstorm storm context */
637 struct mstorm_core_conn_st_ctx mstorm_st_context;
638 /* ustorm storm context */
639 struct ustorm_core_conn_st_ctx ustorm_st_context;
640 struct regpair ustorm_st_padding[2] /* padding */;
645 * How ll2 should deal with packet upon errors
647 enum core_error_handle {
648 LL2_DROP_PACKET /* If error occurs drop packet */,
649 LL2_DO_NOTHING /* If error occurs do nothing */,
650 LL2_ASSERT /* If error occurs assert */,
651 MAX_CORE_ERROR_HANDLE
656 * opcodes for the event ring
658 enum core_event_opcode {
659 CORE_EVENT_TX_QUEUE_START,
660 CORE_EVENT_TX_QUEUE_STOP,
661 CORE_EVENT_RX_QUEUE_START,
662 CORE_EVENT_RX_QUEUE_STOP,
663 CORE_EVENT_RX_QUEUE_FLUSH,
664 MAX_CORE_EVENT_OPCODE
669 * The L4 pseudo checksum mode for Core
671 enum core_l4_pseudo_checksum_mode {
672 /* Pseudo Checksum on packet is calculated with the correct packet length. */
673 CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
674 /* Pseudo Checksum on packet is calculated with zero length. */
675 CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
676 MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
681 * Light-L2 RX Producers in Tstorm RAM
683 struct core_ll2_port_stats {
684 struct regpair gsi_invalid_hdr;
685 struct regpair gsi_invalid_pkt_length;
686 struct regpair gsi_unsupported_pkt_typ;
687 struct regpair gsi_crcchksm_error;
692 * Ethernet TX Per Queue Stats
694 struct core_ll2_pstorm_per_queue_stat {
695 /* number of total bytes sent without errors */
696 struct regpair sent_ucast_bytes;
697 /* number of total bytes sent without errors */
698 struct regpair sent_mcast_bytes;
699 /* number of total bytes sent without errors */
700 struct regpair sent_bcast_bytes;
701 /* number of total packets sent without errors */
702 struct regpair sent_ucast_pkts;
703 /* number of total packets sent without errors */
704 struct regpair sent_mcast_pkts;
705 /* number of total packets sent without errors */
706 struct regpair sent_bcast_pkts;
711 * Light-L2 RX Producers in Tstorm RAM
713 struct core_ll2_rx_prod {
714 __le16 bd_prod /* BD Producer */;
715 __le16 cqe_prod /* CQE Producer */;
720 struct core_ll2_tstorm_per_queue_stat {
721 /* Number of packets discarded because they are bigger than MTU */
722 struct regpair packet_too_big_discard;
723 /* Number of packets discarded due to lack of host buffers */
724 struct regpair no_buff_discard;
728 struct core_ll2_ustorm_per_queue_stat {
729 struct regpair rcv_ucast_bytes;
730 struct regpair rcv_mcast_bytes;
731 struct regpair rcv_bcast_bytes;
732 struct regpair rcv_ucast_pkts;
733 struct regpair rcv_mcast_pkts;
734 struct regpair rcv_bcast_pkts;
739 * Core Ramrod Command IDs (light L2)
741 enum core_ramrod_cmd_id {
743 CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
744 CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
745 CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
746 CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
747 CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
748 MAX_CORE_RAMROD_CMD_ID
753 * Core RX CQE Type for Light L2
755 enum core_roce_flavor_type {
758 MAX_CORE_ROCE_FLAVOR_TYPE
763 * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
765 struct core_rx_action_on_error {
767 /* ll2 how to handle error packet_too_big (use enum core_error_handle) */
768 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
769 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
770 /* ll2 how to handle error with no_buff (use enum core_error_handle) */
771 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
772 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
773 #define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
774 #define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
779 * Core RX BD for Light L2
788 * Core RX CM offload BD for Light L2
790 struct core_rx_bd_with_buff_len {
797 * Core RX CM offload BD for Light L2
799 union core_rx_bd_union {
800 struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
801 /* Core Rx Bd with dynamic buffer length */
802 struct core_rx_bd_with_buff_len rx_bd_with_len;
808 * Opaque Data for Light L2 RX CQE .
810 struct core_rx_cqe_opaque_data {
811 __le32 data[2] /* Opaque CQE Data */;
816 * Core RX CQE Type for Light L2
818 enum core_rx_cqe_type {
819 CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
820 CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
821 CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
822 CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
828 * Core RX CQE for Light L2 .
830 struct core_rx_fast_path_cqe {
831 u8 type /* CQE type */;
832 /* Offset (in bytes) of the packet from start of the buffer */
834 /* Parsing and error flags from the parser */
835 struct parsing_and_err_flags parse_flags;
836 __le16 packet_length /* Total packet length (from the parser) */;
837 __le16 vlan /* 802.1q VLAN tag */;
838 struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
843 * Core Rx CM offload CQE .
845 struct core_rx_gsi_offload_cqe {
846 u8 type /* CQE type */;
847 u8 data_length_error /* set if gsi data is bigger than buff */;
848 /* Parsing and error flags from the parser */
849 struct parsing_and_err_flags parse_flags;
850 __le16 data_length /* Total packet length (from the parser) */;
851 __le16 vlan /* 802.1q VLAN tag */;
852 __le32 src_mac_addrhi /* hi 4 bytes source mac address */;
853 __le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
855 __le32 gid_dst[4] /* Gid destination address */;
859 * Core RX CQE for Light L2 .
861 struct core_rx_slow_path_cqe {
862 u8 type /* CQE type */;
865 struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
870 * Core RX CM offload BD for Light L2
872 union core_rx_cqe_union {
873 struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
874 struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
875 struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
883 * Ramrod data for rx queue start ramrod
885 struct core_rx_start_ramrod_data {
886 struct regpair bd_base /* bd address of the first bd page */;
887 struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
888 __le16 mtu /* Maximum transmission unit */;
889 __le16 sb_id /* Status block ID */;
890 u8 sb_index /* index of the protocol index */;
891 u8 complete_cqe_flg /* post completion to the CQE ring if set */;
892 u8 complete_event_flg /* post completion to the event ring if set */;
893 u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
894 __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
895 /* if set, 802.1q tags will be removed and copied to CQE */
896 u8 inner_vlan_removal_en;
897 u8 queue_id /* Light L2 RX Queue ID */;
898 u8 main_func_queue /* Is this the main queue for the PF */;
899 /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
900 * main_func_queue is set.
902 u8 mf_si_bcast_accept_all;
903 /* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if
904 * main_func_queue is set.
906 u8 mf_si_mcast_accept_all;
907 /* Specifies how ll2 should deal with packets errors: packet_too_big and
910 struct core_rx_action_on_error action_on_error;
911 /* set when in GSI offload mode on ROCE connection */
918 * Ramrod data for rx queue stop ramrod
920 struct core_rx_stop_ramrod_data {
921 u8 complete_cqe_flg /* post completion to the CQE ring if set */;
922 u8 complete_event_flg /* post completion to the event ring if set */;
923 u8 queue_id /* Light L2 RX Queue ID */;
930 * Flags for Core TX BD
932 struct core_tx_bd_data {
934 /* Do not allow additional VLAN manipulations on this packet (DCB) */
935 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
936 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
937 /* Insert VLAN into packet */
938 #define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
939 #define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
940 /* This is the first BD of the packet (for debug) */
941 #define CORE_TX_BD_DATA_START_BD_MASK 0x1
942 #define CORE_TX_BD_DATA_START_BD_SHIFT 2
943 /* Calculate the IP checksum for the packet */
944 #define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
945 #define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
946 /* Calculate the L4 checksum for the packet */
947 #define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
948 #define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
949 /* Packet is IPv6 with extensions */
950 #define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
951 #define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
952 /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
955 #define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
956 #define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
957 /* The pseudo checksum mode to place in the L4 checksum field. Required only
958 * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
960 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
961 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
962 /* Number of BDs that make up one packet - width wide enough to present
963 * CORE_LL2_TX_MAX_BDS_PER_PACKET
965 #define CORE_TX_BD_DATA_NBDS_MASK 0xF
966 #define CORE_TX_BD_DATA_NBDS_SHIFT 8
967 /* Use roce_flavor enum - Differentiate between Roce flavors is valid when
968 * connType is ROCE (use enum core_roce_flavor_type)
970 #define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
971 #define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
972 /* Calculate ip length */
973 #define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
974 #define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
975 #define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
976 #define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
980 * Core TX BD for Light L2
983 struct regpair addr /* Buffer Address */;
984 __le16 nbytes /* Number of Bytes in Buffer */;
985 /* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack
986 * packets: echo data to pass to Rx
988 __le16 nw_vlan_or_lb_echo;
989 struct core_tx_bd_data bd_data /* BD Flags */;
991 /* L4 Header Offset from start of packet (in Words). This is needed if both
992 * l4_csum and ipv6_ext are set
994 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
995 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
996 /* Packet destination - Network, LB (use enum core_tx_dest) */
997 #define CORE_TX_BD_TX_DST_MASK 0x1
998 #define CORE_TX_BD_TX_DST_SHIFT 14
999 #define CORE_TX_BD_RESERVED_MASK 0x1
1000 #define CORE_TX_BD_RESERVED_SHIFT 15
1006 * Light L2 TX Destination
1009 CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */,
1010 CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
1016 * Ramrod data for tx queue start ramrod
1018 struct core_tx_start_ramrod_data {
1019 struct regpair pbl_base_addr /* Address of the pbl page */;
1020 __le16 mtu /* Maximum transmission unit */;
1021 __le16 sb_id /* Status block ID */;
1022 u8 sb_index /* Status block protocol index */;
1023 u8 stats_en /* Statistics Enable */;
1024 u8 stats_id /* Statistics Counter ID */;
1025 u8 conn_type /* connection type that loaded ll2 */;
1026 __le16 pbl_size /* Number of BD pages pointed by PBL */;
1027 __le16 qm_pq_id /* QM PQ ID */;
1028 /* set when in GSI offload mode on ROCE connection */
1029 u8 gsi_offload_flag;
1035 * Ramrod data for tx queue stop ramrod
1037 struct core_tx_stop_ramrod_data {
1038 __le32 reserved0[2];
1043 * Enum flag for what type of dcb data to update
1045 enum dcb_dhcp_update_flag {
1046 /* use when no change should be done to dcb data */
1047 DONT_UPDATE_DCB_DHCP,
1048 UPDATE_DCB /* use to update only l2 (vlan) priority */,
1049 UPDATE_DSCP /* use to update only l3 dhcp */,
1050 UPDATE_DCB_DSCP /* update vlan pri and dhcp */,
1051 MAX_DCB_DHCP_UPDATE_FLAG
1055 struct eth_mstorm_per_pf_stat {
1056 struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
1057 struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
1058 struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
1059 struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
1063 struct eth_mstorm_per_queue_stat {
1064 /* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */
1065 struct regpair ttl0_discard;
1066 /* Number of packets discarded because they are bigger than MTU */
1067 struct regpair packet_too_big_discard;
1068 /* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */
1069 struct regpair no_buff_discard;
1070 /* Number of packets discarded because of no active Rx connection */
1071 struct regpair not_active_discard;
1072 /* number of coalesced packets in all TPA aggregations */
1073 struct regpair tpa_coalesced_pkts;
1074 /* total number of TPA aggregations */
1075 struct regpair tpa_coalesced_events;
1076 /* number of aggregations, which abnormally ended */
1077 struct regpair tpa_aborts_num;
1078 /* total TCP payload length in all TPA aggregations */
1079 struct regpair tpa_coalesced_bytes;
1084 * Ethernet TX Per PF
1086 struct eth_pstorm_per_pf_stat {
1087 /* number of total ucast bytes sent on loopback port without errors */
1088 struct regpair sent_lb_ucast_bytes;
1089 /* number of total mcast bytes sent on loopback port without errors */
1090 struct regpair sent_lb_mcast_bytes;
1091 /* number of total bcast bytes sent on loopback port without errors */
1092 struct regpair sent_lb_bcast_bytes;
1093 /* number of total ucast packets sent on loopback port without errors */
1094 struct regpair sent_lb_ucast_pkts;
1095 /* number of total mcast packets sent on loopback port without errors */
1096 struct regpair sent_lb_mcast_pkts;
1097 /* number of total bcast packets sent on loopback port without errors */
1098 struct regpair sent_lb_bcast_pkts;
1099 struct regpair sent_gre_bytes /* Sent GRE bytes */;
1100 struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
1101 struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
1102 struct regpair sent_gre_pkts /* Sent GRE packets */;
1103 struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
1104 struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
1105 struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
1106 struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
1107 struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
1112 * Ethernet TX Per Queue Stats
1114 struct eth_pstorm_per_queue_stat {
1115 /* number of total bytes sent without errors */
1116 struct regpair sent_ucast_bytes;
1117 /* number of total bytes sent without errors */
1118 struct regpair sent_mcast_bytes;
1119 /* number of total bytes sent without errors */
1120 struct regpair sent_bcast_bytes;
1121 /* number of total packets sent without errors */
1122 struct regpair sent_ucast_pkts;
1123 /* number of total packets sent without errors */
1124 struct regpair sent_mcast_pkts;
1125 /* number of total packets sent without errors */
1126 struct regpair sent_bcast_pkts;
1127 /* number of total packets dropped due to errors */
1128 struct regpair error_drop_pkts;
1133 * ETH Rx producers data
1135 struct eth_rx_rate_limit {
1136 /* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */
1138 /* Constant term to add (or subtract from number of cycles) */
1140 u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
1146 struct eth_ustorm_per_pf_stat {
1147 /* number of total ucast bytes received on loopback port without errors */
1148 struct regpair rcv_lb_ucast_bytes;
1149 /* number of total mcast bytes received on loopback port without errors */
1150 struct regpair rcv_lb_mcast_bytes;
1151 /* number of total bcast bytes received on loopback port without errors */
1152 struct regpair rcv_lb_bcast_bytes;
1153 /* number of total ucast packets received on loopback port without errors */
1154 struct regpair rcv_lb_ucast_pkts;
1155 /* number of total mcast packets received on loopback port without errors */
1156 struct regpair rcv_lb_mcast_pkts;
1157 /* number of total bcast packets received on loopback port without errors */
1158 struct regpair rcv_lb_bcast_pkts;
1159 struct regpair rcv_gre_bytes /* Received GRE bytes */;
1160 struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
1161 struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
1162 struct regpair rcv_gre_pkts /* Received GRE packets */;
1163 struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
1164 struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
1168 struct eth_ustorm_per_queue_stat {
1169 struct regpair rcv_ucast_bytes;
1170 struct regpair rcv_mcast_bytes;
1171 struct regpair rcv_bcast_bytes;
1172 struct regpair rcv_ucast_pkts;
1173 struct regpair rcv_mcast_pkts;
1174 struct regpair rcv_bcast_pkts;
1179 * Event Ring Next Page Address
1181 struct event_ring_next_addr {
1182 struct regpair addr /* Next Page Address */;
1183 __le32 reserved[2] /* Reserved */;
1187 * Event Ring Element
1189 union event_ring_element {
1190 struct event_ring_entry entry /* Event Ring Entry */;
1191 /* Event Ring Next Page Address */
1192 struct event_ring_next_addr next_addr;
1200 enum fw_flow_ctrl_mode {
1203 MAX_FW_FLOW_CTRL_MODE
1208 * Major and Minor hsi Versions
1210 struct hsi_fp_ver_struct {
1211 u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
1212 u8 major_ver_arr[2] /* Major Version of driver loading pf */;
1220 INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */,
1221 INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */,
1222 INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */,
1230 enum iwarp_ll2_tx_queues {
1231 /* LL2 queue for OOO packets sent in-order by the driver */
1232 IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
1233 /* LL2 queue for unaligned packets sent aligned by the driver */
1234 IWARP_LL2_ALIGNED_TX_QUEUE,
1235 IWARP_LL2_ERROR /* Error indication */,
1236 MAX_IWARP_LL2_TX_QUEUES
1241 * Malicious VF error ID
1243 enum malicious_vf_error_id {
1244 MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
1245 /* Writing to VF/PF channel when it is not ready */
1246 VF_PF_CHANNEL_NOT_READY,
1247 VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
1248 VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
1249 /* TX packet is shorter then reported on BDs or from minimal size */
1250 ETH_PACKET_TOO_SMALL,
1251 /* Tx packet with marked as insert VLAN when its illegal */
1252 ETH_ILLEGAL_VLAN_MODE,
1253 ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
1254 /* TX packet has illegal inband tags marked */
1255 ETH_ILLEGAL_INBAND_TAGS,
1256 /* Vlan cant be added to inband tag */
1257 ETH_VLAN_INSERT_AND_INBAND_VLAN,
1258 /* indicated number of BDs for the packet is illegal */
1260 ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
1261 /* There are not enough BDs for transmission of even one packet */
1262 ETH_INSUFFICIENT_BDS,
1263 ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
1264 ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
1265 /* empty BD (which not contains control flags) is illegal */
1267 ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */,
1268 /* In LSO its expected that on the local BD ring there will be at least MSS
1271 ETH_INSUFFICIENT_PAYLOAD,
1272 ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
1273 /* Tunneled packet with IPv6+Ext without a proper number of BDs */
1274 ETH_TUNN_IPV6_EXT_NBD_ERR,
1275 ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
1276 ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
1277 MAX_MALICIOUS_VF_ERROR_ID
1283 * Mstorm non-triggering VF zone
1285 struct mstorm_non_trigger_vf_zone {
1286 /* VF statistic bucket */
1287 struct eth_mstorm_per_queue_stat eth_queue_stat;
1288 /* VF RX queues producers */
1289 struct eth_rx_prod_data
1290 eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
1297 struct mstorm_vf_zone {
1298 /* non-interrupt-triggering zone */
1299 struct mstorm_non_trigger_vf_zone non_trigger;
1304 * personality per PF
1306 enum personality_type {
1307 BAD_PERSONALITY_TYP,
1308 PERSONALITY_ISCSI /* iSCSI and LL2 */,
1309 PERSONALITY_FCOE /* Fcoe and LL2 */,
1310 PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
1311 PERSONALITY_RDMA /* Roce and LL2 */,
1312 PERSONALITY_CORE /* CORE(LL2) */,
1313 PERSONALITY_ETH /* Ethernet */,
1314 PERSONALITY_TOE /* Toe and LL2 */,
1315 MAX_PERSONALITY_TYPE
1320 * tunnel configuration
1322 struct pf_start_tunnel_config {
1323 /* Set VXLAN tunnel UDP destination port. */
1324 u8 set_vxlan_udp_port_flg;
1325 /* Set GENEVE tunnel UDP destination port. */
1326 u8 set_geneve_udp_port_flg;
1327 u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
1328 /* If set, enable l2 GENEVE tunnel in TX path. */
1329 u8 tx_enable_l2geneve;
1330 /* If set, enable IP GENEVE tunnel in TX path. */
1331 u8 tx_enable_ipgeneve;
1332 u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
1333 u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
1334 u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
1335 /* Classification scheme for l2 GENEVE tunnel. */
1336 u8 tunnel_clss_l2geneve;
1337 /* Classification scheme for ip GENEVE tunnel. */
1338 u8 tunnel_clss_ipgeneve;
1339 u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
1340 u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
1341 __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
1342 __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
1346 * Ramrod data for PF start ramrod
1348 struct pf_start_ramrod_data {
1349 struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
1350 /* PBL address of consolidation queue */
1351 struct regpair consolid_q_pbl_addr;
1352 /* tunnel configuration. */
1353 struct pf_start_tunnel_config tunnel_config;
1354 __le16 event_ring_sb_id /* Status block ID */;
1355 /* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
1357 u8 num_vfs /* Amount of vfs owned by PF */;
1358 u8 event_ring_num_pages /* Number of PBL pages in event ring */;
1359 u8 event_ring_sb_index /* Status block index */;
1360 u8 path_id /* HW path ID (engine ID) */;
1361 u8 warning_as_error /* In FW asserts, treat warning as error */;
1362 /* If not set - throw a warning for each ramrod (for debug) */
1363 u8 dont_log_ramrods;
1364 u8 personality /* define what type of personality is new PF */;
1365 /* Log type mask. Each bit set enables a corresponding event type logging.
1366 * Event types are defined as ASSERT_LOG_TYPE_xxx
1368 __le16 log_type_mask;
1369 u8 mf_mode /* Multi function mode */;
1370 u8 integ_phase /* Integration phase */;
1371 /* If set, inter-pf tx switching is allowed in Switch Independent func mode */
1372 u8 allow_npar_tx_switching;
1373 /* Map from inner to outer priority. Set pri_map_valid when init map */
1374 u8 inner_to_outer_pri_map[8];
1375 /* If inner_to_outer_pri_map is initialize then set pri_map_valid */
1377 /* In case mf_mode is MF_OVLAN, this field specifies the outer vlan
1378 * (lower 16 bits) and ethType to use (higher 16 bits)
1381 /* FP HSI version to be used by FW */
1382 struct hsi_fp_ver_struct hsi_fp_ver;
1388 * Data for port update ramrod
1390 struct protocol_dcb_data {
1391 u8 dcb_enable_flag /* dcbEnable flag value */;
1392 u8 dscp_enable_flag /* If set use dscp value */;
1393 u8 dcb_priority /* dcbPri flag value */;
1394 u8 dcb_tc /* dcb TC value */;
1395 u8 dscp_val /* dscp value to write if dscp_enable_flag is set */;
1400 * Update tunnel configuration
1402 struct pf_update_tunnel_config {
1403 /* Update RX per PF tunnel classification scheme. */
1404 u8 update_rx_pf_clss;
1405 /* Update per PORT default tunnel RX classification scheme for traffic with
1406 * unknown unicast outer MAC in NPAR mode.
1408 u8 update_rx_def_ucast_clss;
1409 /* Update per PORT default tunnel RX classification scheme for traffic with non
1410 * unicast outer MAC in NPAR mode.
1412 u8 update_rx_def_non_ucast_clss;
1413 /* Update TX per PF tunnel classification scheme. used by pf update. */
1414 u8 update_tx_pf_clss;
1415 /* Update VXLAN tunnel UDP destination port. */
1416 u8 set_vxlan_udp_port_flg;
1417 /* Update GENEVE tunnel UDP destination port. */
1418 u8 set_geneve_udp_port_flg;
1419 u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
1420 /* If set, enable l2 GENEVE tunnel in TX path. */
1421 u8 tx_enable_l2geneve;
1422 /* If set, enable IP GENEVE tunnel in TX path. */
1423 u8 tx_enable_ipgeneve;
1424 u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
1425 u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
1426 u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
1427 /* Classification scheme for l2 GENEVE tunnel. */
1428 u8 tunnel_clss_l2geneve;
1429 /* Classification scheme for ip GENEVE tunnel. */
1430 u8 tunnel_clss_ipgeneve;
1431 u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
1432 u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
1433 __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
1434 __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
1439 * Data for port update ramrod
1441 struct pf_update_ramrod_data {
1443 u8 update_eth_dcb_data_flag /* Update Eth DCB data indication */;
1444 u8 update_fcoe_dcb_data_flag /* Update FCOE DCB data indication */;
1445 u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB data indication */;
1446 u8 update_roce_dcb_data_flag /* Update ROCE DCB data indication */;
1447 /* Update RROCE (RoceV2) DCB data indication */
1448 u8 update_rroce_dcb_data_flag;
1449 u8 update_iwarp_dcb_data_flag /* Update IWARP DCB data indication */;
1450 u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
1451 struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
1452 struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
1453 /* core iscsi related fields */
1454 struct protocol_dcb_data iscsi_dcb_data;
1455 struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
1456 /* core roce related fields */
1457 struct protocol_dcb_data rroce_dcb_data;
1458 /* core iwarp related fields */
1459 struct protocol_dcb_data iwarp_dcb_data;
1460 __le16 mf_vlan /* new outer vlan id value */;
1462 /* tunnel configuration. */
1463 struct pf_update_tunnel_config tunnel_config;
1472 ENGX2_PORTX1 /* 2 engines x 1 port */,
1473 ENGX2_PORTX2 /* 2 engines x 2 ports */,
1474 ENGX1_PORTX1 /* 1 engine x 1 port */,
1475 ENGX1_PORTX2 /* 1 engine x 2 ports */,
1476 ENGX1_PORTX4 /* 1 engine x 4 ports */,
1483 * use to index in hsi_fp_[major|minor]_ver_arr per protocol
1485 enum protocol_version_array_key {
1488 MAX_PROTOCOL_VERSION_ARRAY_KEY
1496 struct rdma_sent_stats {
1497 struct regpair sent_bytes /* number of total RDMA bytes sent */;
1498 struct regpair sent_pkts /* number of total RDMA packets sent */;
1502 * Pstorm non-triggering VF zone
1504 struct pstorm_non_trigger_vf_zone {
1505 /* VF statistic bucket */
1506 struct eth_pstorm_per_queue_stat eth_queue_stat;
1507 struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
1514 struct pstorm_vf_zone {
1515 /* non-interrupt-triggering zone */
1516 struct pstorm_non_trigger_vf_zone non_trigger;
1517 struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
1522 * Ramrod Header of SPQE
1524 struct ramrod_header {
1525 __le32 cid /* Slowpath Connection CID */;
1526 u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
1527 u8 protocol_id /* Ramrod Protocol ID */;
1528 __le16 echo /* Ramrod echo */;
1535 struct rdma_rcv_stats {
1536 struct regpair rcv_bytes /* number of total RDMA bytes received */;
1537 struct regpair rcv_pkts /* number of total RDMA packets received */;
1543 * Data for update QCN/DCQCN RL ramrod
1545 struct rl_update_ramrod_data {
1546 u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
1547 /* Update DCQCN global params: timeout, g, k. */
1548 u8 dcqcn_update_param_flg;
1549 u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
1550 u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
1551 u8 rl_stop_flg /* Stop RL. */;
1552 u8 rl_id_first /* ID of first or single RL, that will be updated. */;
1553 /* ID of last RL, that will be updated. If clear, single RL will updated. */
1555 u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
1556 __le32 rl_bc_rate /* Byte Counter Limit. */;
1557 __le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
1558 __le16 rl_r_ai /* Active increase rate. */;
1559 __le16 rl_r_hai /* Hyper active increase rate. */;
1560 __le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
1561 __le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
1562 __le32 dcqcn_timeuot_us /* DCQCN timeout. */;
1563 __le32 qcn_timeuot_us /* QCN timeout. */;
1569 * Slowpath Element (SPQE)
1571 struct slow_path_element {
1572 struct ramrod_header hdr /* Ramrod Header */;
1573 struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
1578 * Tstorm non-triggering VF zone
1580 struct tstorm_non_trigger_vf_zone {
1581 struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
1585 struct tstorm_per_port_stat {
1586 /* packet is dropped because it was truncated in NIG */
1587 struct regpair trunc_error_discard;
1588 /* packet is dropped because of Ethernet FCS error */
1589 struct regpair mac_error_discard;
1590 /* packet is dropped because classification was unsuccessful */
1591 struct regpair mftag_filter_discard;
1592 /* packet was passed to Ethernet and dropped because of no mac filter match */
1593 struct regpair eth_mac_filter_discard;
1594 /* packet passed to Light L2 and dropped because Light L2 is not configured for
1597 struct regpair ll2_mac_filter_discard;
1598 /* packet passed to Light L2 and dropped because Light L2 is not configured for
1601 struct regpair ll2_conn_disabled_discard;
1602 /* packet is an ISCSI irregular packet */
1603 struct regpair iscsi_irregular_pkt;
1604 /* packet is an FCOE irregular packet */
1605 struct regpair fcoe_irregular_pkt;
1606 /* packet is an ROCE irregular packet */
1607 struct regpair roce_irregular_pkt;
1608 /* packet is an ETH irregular packet */
1609 struct regpair eth_irregular_pkt;
1610 /* packet is an TOE irregular packet */
1611 struct regpair toe_irregular_pkt;
1612 /* packet is an PREROCE irregular packet */
1613 struct regpair preroce_irregular_pkt;
1614 struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
1615 /* VXLAN dropped packets */
1616 struct regpair eth_vxlan_tunn_filter_discard;
1617 /* GENEVE dropped packets */
1618 struct regpair eth_geneve_tunn_filter_discard;
1625 struct tstorm_vf_zone {
1626 /* non-interrupt-triggering zone */
1627 struct tstorm_non_trigger_vf_zone non_trigger;
1632 * Tunnel classification scheme
1635 /* Use MAC and VLAN from first L2 header for vport classification. */
1636 TUNNEL_CLSS_MAC_VLAN = 0,
1637 /* Use MAC from first L2 header and VNI from tunnel header for vport
1640 TUNNEL_CLSS_MAC_VNI,
1641 /* Use MAC and VLAN from last L2 header for vport classification */
1642 TUNNEL_CLSS_INNER_MAC_VLAN,
1643 /* Use MAC from last L2 header and VNI from tunnel header for vport
1646 TUNNEL_CLSS_INNER_MAC_VNI,
1647 /* Use MAC and VLAN from last L2 header for vport classification. If no exact
1648 * match, use MAC and VLAN from first L2 header for classification.
1650 TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
1657 * Ustorm non-triggering VF zone
1659 struct ustorm_non_trigger_vf_zone {
1660 /* VF statistic bucket */
1661 struct eth_ustorm_per_queue_stat eth_queue_stat;
1662 struct regpair vf_pf_msg_addr /* VF-PF message address */;
1667 * Ustorm triggering VF zone
1669 struct ustorm_trigger_vf_zone {
1670 u8 vf_pf_msg_valid /* VF-PF message valid flag */;
1678 struct ustorm_vf_zone {
1679 /* non-interrupt-triggering zone */
1680 struct ustorm_non_trigger_vf_zone non_trigger;
1681 struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
1686 * VF-PF channel data
1688 struct vf_pf_channel_data {
1689 /* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel
1690 * is ready for a new transaction.
1693 /* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is
1703 * Ramrod data for VF start ramrod
1705 struct vf_start_ramrod_data {
1706 u8 vf_id /* VF ID */;
1707 /* If set, initial cleanup ack will be sent to parent PF SP event queue */
1709 __le16 opaque_fid /* VF opaque FID */;
1710 u8 personality /* define what type of personality is new VF */;
1712 /* FP HSI version to be used by FW */
1713 struct hsi_fp_ver_struct hsi_fp_ver;
1718 * Ramrod data for VF start ramrod
1720 struct vf_stop_ramrod_data {
1721 u8 vf_id /* VF ID */;
1729 * VF zone size mode.
1731 enum vf_zone_size_mode {
1732 /* Default VF zone size. Up to 192 VF supported. */
1733 VF_ZONE_SIZE_MODE_DEFAULT,
1734 /* Doubled VF zone size. Up to 96 VF supported. */
1735 VF_ZONE_SIZE_MODE_DOUBLE,
1736 /* Quad VF zone size. Up to 48 VF supported. */
1737 VF_ZONE_SIZE_MODE_QUAD,
1738 MAX_VF_ZONE_SIZE_MODE
1745 * Attentions status block
1747 struct atten_status_block {
1751 __le16 sb_index /* status block running index */;
1757 * Igu cleanup bit values to distinguish between clean or producer consumer
1760 enum command_type_bit {
1761 IGU_COMMAND_TYPE_NOP = 0,
1762 IGU_COMMAND_TYPE_SET = 1,
1763 MAX_COMMAND_TYPE_BIT
1772 /* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
1773 #define DMAE_CMD_SRC_MASK 0x1
1774 #define DMAE_CMD_SRC_SHIFT 0
1775 /* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None
1776 * (use enum dmae_cmd_dst_enum)
1778 #define DMAE_CMD_DST_MASK 0x3
1779 #define DMAE_CMD_DST_SHIFT 1
1780 /* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
1781 #define DMAE_CMD_C_DST_MASK 0x1
1782 #define DMAE_CMD_C_DST_SHIFT 3
1783 /* Reset the CRC result (do not use the previous result as the seed) */
1784 #define DMAE_CMD_CRC_RESET_MASK 0x1
1785 #define DMAE_CMD_CRC_RESET_SHIFT 4
1786 /* Reset the source address in the next go to the same source address of the
1789 #define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
1790 #define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
1791 /* Reset the destination address in the next go to the same destination address
1792 * of the previous go
1794 #define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
1795 #define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
1796 /* 0 completion function is the same as src function, 1 - 0 completion
1797 * function is the same as dst function (use enum dmae_cmd_comp_func_enum)
1799 #define DMAE_CMD_COMP_FUNC_MASK 0x1
1800 #define DMAE_CMD_COMP_FUNC_SHIFT 7
1801 /* 0 - Do not write a completion word, 1 - Write a completion word
1802 * (use enum dmae_cmd_comp_word_en_enum)
1804 #define DMAE_CMD_COMP_WORD_EN_MASK 0x1
1805 #define DMAE_CMD_COMP_WORD_EN_SHIFT 8
1806 /* 0 - Do not write a CRC word, 1 - Write a CRC word
1807 * (use enum dmae_cmd_comp_crc_en_enum)
1809 #define DMAE_CMD_COMP_CRC_EN_MASK 0x1
1810 #define DMAE_CMD_COMP_CRC_EN_SHIFT 9
1811 /* The CRC word should be taken from the DMAE address space from address 9+X,
1812 * where X is the value in these bits.
1814 #define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
1815 #define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
1816 #define DMAE_CMD_RESERVED1_MASK 0x1
1817 #define DMAE_CMD_RESERVED1_SHIFT 13
1818 #define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
1819 #define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
1820 /* The field specifies how the completion word is affected by PCIe read error. 0
1821 * Send a regular completion, 1 - Send a completion with an error indication,
1822 * 2 do not send a completion (use enum dmae_cmd_error_handling_enum)
1824 #define DMAE_CMD_ERR_HANDLING_MASK 0x3
1825 #define DMAE_CMD_ERR_HANDLING_SHIFT 16
1826 /* The port ID to be placed on the RF FID field of the GRC bus. this field is
1827 * used both when GRC is the destination and when it is the source of the DMAE
1830 #define DMAE_CMD_PORT_ID_MASK 0x3
1831 #define DMAE_CMD_PORT_ID_SHIFT 18
1832 /* Source PCI function number [3:0] */
1833 #define DMAE_CMD_SRC_PF_ID_MASK 0xF
1834 #define DMAE_CMD_SRC_PF_ID_SHIFT 20
1835 /* Destination PCI function number [3:0] */
1836 #define DMAE_CMD_DST_PF_ID_MASK 0xF
1837 #define DMAE_CMD_DST_PF_ID_SHIFT 24
1838 #define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 /* Source VFID valid */
1839 #define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
1840 #define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 /* Destination VFID valid */
1841 #define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
1842 #define DMAE_CMD_RESERVED2_MASK 0x3
1843 #define DMAE_CMD_RESERVED2_SHIFT 30
1844 /* PCIe source address low in bytes or GRC source address in DW */
1846 /* PCIe source address high in bytes or reserved (if source is GRC) */
1848 /* PCIe destination address low in bytes or GRC destination address in DW */
1850 /* PCIe destination address high in bytes or reserved (if destination is GRC) */
1852 __le16 length_dw /* Length in DW */;
1854 #define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
1855 #define DMAE_CMD_SRC_VF_ID_SHIFT 0
1856 #define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
1857 #define DMAE_CMD_DST_VF_ID_SHIFT 8
1858 __le32 comp_addr_lo /* PCIe completion address low or grc address */;
1859 /* PCIe completion address high or reserved (if completion address is in GRC) */
1860 __le32 comp_addr_hi;
1861 __le32 comp_val /* Value to write to completion address */;
1862 __le32 crc32 /* crc16 result */;
1863 __le32 crc_32_c /* crc32_c result */;
1864 __le16 crc16 /* crc16 result */;
1865 __le16 crc16_c /* crc16_c result */;
1866 __le16 crc10 /* crc_t10 result */;
1868 __le16 xsum16 /* checksum16 result */;
1869 __le16 xsum8 /* checksum8 result */;
1873 enum dmae_cmd_comp_crc_en_enum {
1874 dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
1875 dmae_cmd_comp_crc_enabled /* Write a CRC word */,
1876 MAX_DMAE_CMD_COMP_CRC_EN_ENUM
1880 enum dmae_cmd_comp_func_enum {
1881 /* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */
1882 dmae_cmd_comp_func_to_src,
1883 /* completion word and/or CRC will be sent to DST-PCI function/DST VFID */
1884 dmae_cmd_comp_func_to_dst,
1885 MAX_DMAE_CMD_COMP_FUNC_ENUM
1889 enum dmae_cmd_comp_word_en_enum {
1890 dmae_cmd_comp_word_disabled /* Do not write a completion word */,
1891 dmae_cmd_comp_word_enabled /* Write the completion word */,
1892 MAX_DMAE_CMD_COMP_WORD_EN_ENUM
1896 enum dmae_cmd_c_dst_enum {
1897 dmae_cmd_c_dst_pcie,
1899 MAX_DMAE_CMD_C_DST_ENUM
1903 enum dmae_cmd_dst_enum {
1904 dmae_cmd_dst_none_0,
1907 dmae_cmd_dst_none_3,
1908 MAX_DMAE_CMD_DST_ENUM
1912 enum dmae_cmd_error_handling_enum {
1913 /* Send a regular completion (with no error indication) */
1914 dmae_cmd_error_handling_send_regular_comp,
1915 /* Send a completion with an error indication (i.e. set bit 31 of the completion
1918 dmae_cmd_error_handling_send_comp_with_err,
1919 dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
1920 MAX_DMAE_CMD_ERROR_HANDLING_ENUM
1924 enum dmae_cmd_src_enum {
1925 dmae_cmd_src_pcie /* The source is the PCIe */,
1926 dmae_cmd_src_grc /* The source is the GRC */,
1927 MAX_DMAE_CMD_SRC_ENUM
1932 * IGU cleanup command
1934 struct igu_cleanup {
1935 __le32 sb_id_and_flags;
1936 #define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
1937 #define IGU_CLEANUP_RESERVED0_SHIFT 0
1938 /* cleanup clear - 0, set - 1 */
1939 #define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
1940 #define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
1941 #define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
1942 #define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
1943 /* must always be set (use enum command_type_bit) */
1944 #define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
1945 #define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
1951 * IGU firmware driver command
1954 struct igu_prod_cons_update prod_cons_update;
1955 struct igu_cleanup cleanup;
1960 * IGU firmware driver command
1962 struct igu_command_reg_ctrl {
1964 __le16 igu_command_reg_ctrl_fields;
1965 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
1966 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
1967 #define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
1968 #define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
1969 /* command typ: 0 - read, 1 - write */
1970 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
1971 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
1976 * IGU mapping line structure
1978 struct igu_mapping_line {
1979 __le32 igu_mapping_line_fields;
1980 #define IGU_MAPPING_LINE_VALID_MASK 0x1
1981 #define IGU_MAPPING_LINE_VALID_SHIFT 0
1982 #define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
1983 #define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
1984 /* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
1985 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
1986 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
1987 #define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
1988 #define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
1989 #define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
1990 #define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
1991 #define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
1992 #define IGU_MAPPING_LINE_RESERVED_SHIFT 24
1997 * IGU MSIX line structure
1999 struct igu_msix_vector {
2000 struct regpair address;
2002 __le32 msix_vector_fields;
2003 #define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
2004 #define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
2005 #define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
2006 #define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
2007 #define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
2008 #define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
2009 #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
2010 #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
2014 struct mstorm_core_conn_ag_ctx {
2015 u8 byte0 /* cdu_validation */;
2016 u8 byte1 /* state */;
2018 #define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
2019 #define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
2020 #define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
2021 #define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
2022 #define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
2023 #define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
2024 #define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
2025 #define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
2026 #define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
2027 #define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
2029 #define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
2030 #define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
2031 #define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
2032 #define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
2033 #define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
2034 #define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
2035 #define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
2036 #define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2037 #define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
2038 #define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2039 #define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
2040 #define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2041 #define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
2042 #define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2043 #define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
2044 #define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2045 __le16 word0 /* word0 */;
2046 __le16 word1 /* word1 */;
2047 __le32 reg0 /* reg0 */;
2048 __le32 reg1 /* reg1 */;
2053 * per encapsulation type enabling flags
2055 struct prs_reg_encapsulation_type_en {
2057 /* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
2058 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
2059 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
2060 /* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
2061 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
2062 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
2063 /* Enable bit for VXLAN encapsulation. */
2064 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
2065 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
2066 /* Enable bit for T-Tag encapsulation. */
2067 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
2068 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
2069 /* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
2070 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
2071 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
2072 /* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
2073 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
2074 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
2075 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
2076 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
2080 enum pxp_tph_st_hint {
2081 TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
2082 TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
2083 /* Device Write and Host Read, or Host Write and Device Read */
2085 /* Device Write and Host Read, or Host Write and Device Read - with temporal
2088 TPH_ST_HINT_TARGET_PRIO,
2094 * QM hardware structure of enable bypass credit mask
2096 struct qm_rf_bypass_mask {
2098 #define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
2099 #define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
2100 #define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
2101 #define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
2102 #define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
2103 #define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
2104 #define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
2105 #define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
2106 #define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
2107 #define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
2108 #define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
2109 #define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
2110 #define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
2111 #define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
2112 #define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
2113 #define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
2118 * QM hardware structure of opportunistic credit mask
2120 struct qm_rf_opportunistic_mask {
2122 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
2123 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
2124 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
2125 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
2126 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
2127 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
2128 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
2129 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
2130 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
2131 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
2132 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
2133 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
2134 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
2135 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
2136 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
2137 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
2138 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
2139 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
2140 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
2141 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
2146 * QM hardware structure of QM map memory
2148 struct qm_rf_pq_map {
2150 #define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
2151 #define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
2152 #define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
2153 #define QM_RF_PQ_MAP_RL_ID_SHIFT 1
2154 /* the first PQ associated with the VPORT and VOQ of this PQ */
2155 #define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
2156 #define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
2157 #define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
2158 #define QM_RF_PQ_MAP_VOQ_SHIFT 18
2159 #define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
2160 #define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
2161 #define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
2162 #define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
2163 #define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
2164 #define QM_RF_PQ_MAP_RESERVED_SHIFT 26
2169 * Completion params for aggregated interrupt completion
2171 struct sdm_agg_int_comp_params {
2173 /* the number of aggregated interrupt, 0-31 */
2174 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
2175 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
2176 /* 1 - set a bit in aggregated vector, 0 - dont set */
2177 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
2178 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
2179 /* Number of bit in the aggregated vector, 0-279 (TBD) */
2180 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
2181 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
2186 * SDM operation gen command (generate aggregative interrupt)
2190 /* completion parameters 0-15 */
2191 #define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
2192 #define SDM_OP_GEN_COMP_PARAM_SHIFT 0
2193 #define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
2194 #define SDM_OP_GEN_COMP_TYPE_SHIFT 16
2195 #define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
2196 #define SDM_OP_GEN_RESERVED_SHIFT 20
2203 struct ystorm_core_conn_ag_ctx {
2204 u8 byte0 /* cdu_validation */;
2205 u8 byte1 /* state */;
2207 #define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
2208 #define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
2209 #define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
2210 #define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
2211 #define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
2212 #define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
2213 #define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
2214 #define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
2215 #define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
2216 #define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
2218 #define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
2219 #define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
2220 #define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
2221 #define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
2222 #define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
2223 #define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
2224 #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
2225 #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2226 #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
2227 #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2228 #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
2229 #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2230 #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
2231 #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2232 #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
2233 #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2234 u8 byte2 /* byte2 */;
2235 u8 byte3 /* byte3 */;
2236 __le16 word0 /* word0 */;
2237 __le32 reg0 /* reg0 */;
2238 __le32 reg1 /* reg1 */;
2239 __le16 word1 /* word1 */;
2240 __le16 word2 /* word2 */;
2241 __le16 word3 /* word3 */;
2242 __le16 word4 /* word4 */;
2243 __le32 reg2 /* reg2 */;
2244 __le32 reg3 /* reg3 */;
2247 #endif /* __ECORE_HSI_COMMON__ */