1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_ETH_P_8021Q 0x8100
19 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
20 * struct to configure any switch filter rules.
21 * {DA (6 bytes), SA(6 bytes),
22 * Ether type (2 bytes for header without VLAN tag) OR
23 * VLAN tag (4 bytes for header with VLAN tag) }
25 * Word on Hardcoded values
26 * byte 0 = 0x2: to identify it as locally administered DA MAC
27 * byte 6 = 0x2: to identify it as locally administered SA MAC
28 * byte 12 = 0x81 & byte 13 = 0x00:
29 * In case of VLAN filter first two bytes defines ether type (0x8100)
30 * and remaining two bytes are placeholder for programming a given VLAN ID
31 * In case of Ether type filter it is treated as header without VLAN tag
32 * and byte 12 and 13 is used to program a given Ether type instead
34 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
38 struct ice_dummy_pkt_offsets {
39 enum ice_protocol_type type;
40 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46 { ICE_IPV4_OFOS, 14 },
51 { ICE_PROTOCOL_LAST, 0 },
54 static const u8 dummy_gre_tcp_packet[] = {
55 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
56 0x00, 0x00, 0x00, 0x00,
57 0x00, 0x00, 0x00, 0x00,
59 0x08, 0x00, /* ICE_ETYPE_OL 12 */
61 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x2F, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x00, 0x00, 0x00,
67 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
68 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
71 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00,
75 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x06, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x50, 0x02, 0x20, 0x00,
85 0x00, 0x00, 0x00, 0x00
88 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91 { ICE_IPV4_OFOS, 14 },
96 { ICE_PROTOCOL_LAST, 0 },
99 static const u8 dummy_gre_udp_packet[] = {
100 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
104 0x08, 0x00, /* ICE_ETYPE_OL 12 */
106 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x2F, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
112 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
113 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
120 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x11, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
127 0x00, 0x08, 0x00, 0x00,
130 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
132 { ICE_ETYPE_OL, 12 },
133 { ICE_IPV4_OFOS, 14 },
137 { ICE_VXLAN_GPE, 42 },
141 { ICE_PROTOCOL_LAST, 0 },
144 static const u8 dummy_udp_tun_tcp_packet[] = {
145 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
146 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00,
149 0x08, 0x00, /* ICE_ETYPE_OL 12 */
151 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
152 0x00, 0x01, 0x00, 0x00,
153 0x40, 0x11, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
158 0x00, 0x46, 0x00, 0x00,
160 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
161 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
164 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
168 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x06, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x50, 0x02, 0x20, 0x00,
178 0x00, 0x00, 0x00, 0x00
181 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
183 { ICE_ETYPE_OL, 12 },
184 { ICE_IPV4_OFOS, 14 },
188 { ICE_VXLAN_GPE, 42 },
191 { ICE_UDP_ILOS, 84 },
192 { ICE_PROTOCOL_LAST, 0 },
195 static const u8 dummy_udp_tun_udp_packet[] = {
196 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
200 0x08, 0x00, /* ICE_ETYPE_OL 12 */
202 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
203 0x00, 0x01, 0x00, 0x00,
204 0x00, 0x11, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
209 0x00, 0x3a, 0x00, 0x00,
211 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
212 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
215 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00,
219 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
226 0x00, 0x08, 0x00, 0x00,
229 /* offset info for MAC + IPv4 + UDP dummy packet */
230 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
232 { ICE_ETYPE_OL, 12 },
233 { ICE_IPV4_OFOS, 14 },
234 { ICE_UDP_ILOS, 34 },
235 { ICE_PROTOCOL_LAST, 0 },
238 /* Dummy packet for MAC + IPv4 + UDP */
239 static const u8 dummy_udp_packet[] = {
240 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
241 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00,
244 0x08, 0x00, /* ICE_ETYPE_OL 12 */
246 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
247 0x00, 0x01, 0x00, 0x00,
248 0x00, 0x11, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
253 0x00, 0x08, 0x00, 0x00,
255 0x00, 0x00, /* 2 bytes for 4 byte alignment */
258 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
259 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
261 { ICE_ETYPE_OL, 12 },
262 { ICE_VLAN_OFOS, 14 },
263 { ICE_IPV4_OFOS, 18 },
264 { ICE_UDP_ILOS, 38 },
265 { ICE_PROTOCOL_LAST, 0 },
268 /* C-tag (801.1Q), IPv4:UDP dummy packet */
269 static const u8 dummy_vlan_udp_packet[] = {
270 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x81, 0x00, /* ICE_ETYPE_OL 12 */
276 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
278 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
279 0x00, 0x01, 0x00, 0x00,
280 0x00, 0x11, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
285 0x00, 0x08, 0x00, 0x00,
287 0x00, 0x00, /* 2 bytes for 4 byte alignment */
290 /* offset info for MAC + IPv4 + TCP dummy packet */
291 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
293 { ICE_ETYPE_OL, 12 },
294 { ICE_IPV4_OFOS, 14 },
296 { ICE_PROTOCOL_LAST, 0 },
299 /* Dummy packet for MAC + IPv4 + TCP */
300 static const u8 dummy_tcp_packet[] = {
301 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x08, 0x00, /* ICE_ETYPE_OL 12 */
307 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
308 0x00, 0x01, 0x00, 0x00,
309 0x00, 0x06, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
314 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
316 0x50, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
319 0x00, 0x00, /* 2 bytes for 4 byte alignment */
322 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
323 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
325 { ICE_ETYPE_OL, 12 },
326 { ICE_VLAN_OFOS, 14 },
327 { ICE_IPV4_OFOS, 18 },
329 { ICE_PROTOCOL_LAST, 0 },
332 /* C-tag (801.1Q), IPv4:TCP dummy packet */
333 static const u8 dummy_vlan_tcp_packet[] = {
334 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
338 0x81, 0x00, /* ICE_ETYPE_OL 12 */
340 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
342 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
343 0x00, 0x01, 0x00, 0x00,
344 0x00, 0x06, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
349 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
351 0x50, 0x00, 0x00, 0x00,
352 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, /* 2 bytes for 4 byte alignment */
357 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
359 { ICE_ETYPE_OL, 12 },
360 { ICE_IPV6_OFOS, 14 },
362 { ICE_PROTOCOL_LAST, 0 },
365 static const u8 dummy_tcp_ipv6_packet[] = {
366 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
367 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00,
370 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
372 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
373 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
386 0x50, 0x00, 0x00, 0x00,
387 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, /* 2 bytes for 4 byte alignment */
392 /* C-tag (802.1Q): IPv6 + TCP */
393 static const struct ice_dummy_pkt_offsets
394 dummy_vlan_tcp_ipv6_packet_offsets[] = {
396 { ICE_ETYPE_OL, 12 },
397 { ICE_VLAN_OFOS, 14 },
398 { ICE_IPV6_OFOS, 18 },
400 { ICE_PROTOCOL_LAST, 0 },
403 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
404 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
405 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x81, 0x00, /* ICE_ETYPE_OL 12 */
411 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
413 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
414 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
425 0x00, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
427 0x50, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
430 0x00, 0x00, /* 2 bytes for 4 byte alignment */
434 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
436 { ICE_ETYPE_OL, 12 },
437 { ICE_IPV6_OFOS, 14 },
438 { ICE_UDP_ILOS, 54 },
439 { ICE_PROTOCOL_LAST, 0 },
442 /* IPv6 + UDP dummy packet */
443 static const u8 dummy_udp_ipv6_packet[] = {
444 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
448 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
450 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
451 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00,
461 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
462 0x00, 0x10, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
465 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, /* 2 bytes for 4 byte alignment */
470 /* C-tag (802.1Q): IPv6 + UDP */
471 static const struct ice_dummy_pkt_offsets
472 dummy_vlan_udp_ipv6_packet_offsets[] = {
474 { ICE_ETYPE_OL, 12 },
475 { ICE_VLAN_OFOS, 14 },
476 { ICE_IPV6_OFOS, 18 },
477 { ICE_UDP_ILOS, 58 },
478 { ICE_PROTOCOL_LAST, 0 },
481 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
482 static const u8 dummy_vlan_udp_ipv6_packet[] = {
483 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
484 0x00, 0x00, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00,
487 0x81, 0x00, /* ICE_ETYPE_OL 12 */
489 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
491 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
492 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00,
502 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
503 0x00, 0x08, 0x00, 0x00,
505 0x00, 0x00, /* 2 bytes for 4 byte alignment */
508 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
509 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
511 { ICE_IPV4_OFOS, 14 },
516 { ICE_PROTOCOL_LAST, 0 },
519 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
520 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
525 0x45, 0x00, 0x00, 0x58, /* IP 14 */
526 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x11, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x00,
529 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
532 0x00, 0x44, 0x00, 0x00,
534 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
535 0x00, 0x00, 0x00, 0x00,
536 0x00, 0x00, 0x00, 0x85,
538 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
539 0x00, 0x00, 0x00, 0x00,
541 0x45, 0x00, 0x00, 0x28, /* IP 62 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x06, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
548 0x00, 0x00, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x50, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
553 0x00, 0x00, /* 2 bytes for 4 byte alignment */
556 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
557 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
559 { ICE_IPV4_OFOS, 14 },
563 { ICE_UDP_ILOS, 82 },
564 { ICE_PROTOCOL_LAST, 0 },
567 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
568 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
573 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
574 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x11, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
580 0x00, 0x38, 0x00, 0x00,
582 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x85,
586 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
587 0x00, 0x00, 0x00, 0x00,
589 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x11, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00,
595 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
596 0x00, 0x08, 0x00, 0x00,
598 0x00, 0x00, /* 2 bytes for 4 byte alignment */
601 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
602 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
604 { ICE_IPV4_OFOS, 14 },
609 { ICE_PROTOCOL_LAST, 0 },
612 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
613 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
618 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x11, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
625 0x00, 0x58, 0x00, 0x00,
627 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
628 0x00, 0x00, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x85,
631 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
632 0x00, 0x00, 0x00, 0x00,
634 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
635 0x00, 0x14, 0x06, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x50, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 byte alignment */
654 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
656 { ICE_IPV4_OFOS, 14 },
660 { ICE_UDP_ILOS, 102 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
670 0x45, 0x00, 0x00, 0x60, /* IP 14 */
671 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x11, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00,
676 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
677 0x00, 0x4c, 0x00, 0x00,
679 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x85,
683 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
684 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
687 0x00, 0x08, 0x11, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
698 0x00, 0x08, 0x00, 0x00,
700 0x00, 0x00, /* 2 bytes for 4 byte alignment */
703 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
705 { ICE_IPV6_OFOS, 14 },
710 { ICE_PROTOCOL_LAST, 0 },
713 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
714 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
719 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
720 0x00, 0x44, 0x11, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
731 0x00, 0x44, 0x00, 0x00,
733 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
734 0x00, 0x00, 0x00, 0x00,
735 0x00, 0x00, 0x00, 0x85,
737 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
738 0x00, 0x00, 0x00, 0x00,
740 0x45, 0x00, 0x00, 0x28, /* IP 82 */
741 0x00, 0x00, 0x00, 0x00,
742 0x00, 0x06, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x50, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, /* 2 bytes for 4 byte alignment */
755 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
757 { ICE_IPV6_OFOS, 14 },
761 { ICE_UDP_ILOS, 102 },
762 { ICE_PROTOCOL_LAST, 0 },
765 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
766 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
771 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
772 0x00, 0x38, 0x11, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
783 0x00, 0x38, 0x00, 0x00,
785 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
786 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x85,
789 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
790 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
793 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x11, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
799 0x00, 0x08, 0x00, 0x00,
801 0x00, 0x00, /* 2 bytes for 4 byte alignment */
804 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
806 { ICE_IPV6_OFOS, 14 },
811 { ICE_PROTOCOL_LAST, 0 },
814 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
815 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
820 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
821 0x00, 0x58, 0x11, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, 0x00, 0x00,
831 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
832 0x00, 0x58, 0x00, 0x00,
834 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
835 0x00, 0x00, 0x00, 0x00,
836 0x00, 0x00, 0x00, 0x85,
838 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
839 0x00, 0x00, 0x00, 0x00,
841 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
842 0x00, 0x14, 0x06, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
855 0x50, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x00, 0x00,
858 0x00, 0x00, /* 2 bytes for 4 byte alignment */
861 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
863 { ICE_IPV6_OFOS, 14 },
867 { ICE_UDP_ILOS, 102 },
868 { ICE_PROTOCOL_LAST, 0 },
871 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
872 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
877 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
878 0x00, 0x4c, 0x11, 0x00,
879 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
889 0x00, 0x4c, 0x00, 0x00,
891 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
892 0x00, 0x00, 0x00, 0x00,
893 0x00, 0x00, 0x00, 0x85,
895 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
896 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
899 0x00, 0x08, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
910 0x00, 0x08, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 byte alignment */
915 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
917 { ICE_IPV4_OFOS, 14 },
921 { ICE_PROTOCOL_LAST, 0 },
924 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
925 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
930 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
931 0x00, 0x00, 0x40, 0x00,
932 0x40, 0x11, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
936 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
937 0x00, 0x00, 0x00, 0x00,
939 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
940 0x00, 0x00, 0x00, 0x00,
941 0x00, 0x00, 0x00, 0x85,
943 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
944 0x00, 0x00, 0x00, 0x00,
946 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
947 0x00, 0x00, 0x40, 0x00,
948 0x40, 0x00, 0x00, 0x00,
949 0x00, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
955 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
957 { ICE_IPV4_OFOS, 14 },
961 { ICE_PROTOCOL_LAST, 0 },
964 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
965 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
966 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00,
970 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
971 0x00, 0x00, 0x40, 0x00,
972 0x40, 0x11, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
976 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
977 0x00, 0x00, 0x00, 0x00,
979 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
980 0x00, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x85,
983 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
984 0x00, 0x00, 0x00, 0x00,
986 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
987 0x00, 0x00, 0x3b, 0x00,
988 0x00, 0x00, 0x00, 0x00,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
1001 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1002 { ICE_MAC_OFOS, 0 },
1003 { ICE_IPV6_OFOS, 14 },
1006 { ICE_IPV4_IL, 82 },
1007 { ICE_PROTOCOL_LAST, 0 },
1010 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1011 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x00,
1016 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1017 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1018 0x00, 0x00, 0x00, 0x00,
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1027 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1028 0x00, 0x00, 0x00, 0x00,
1030 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1031 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x85,
1034 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1035 0x00, 0x00, 0x00, 0x00,
1037 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1038 0x00, 0x00, 0x40, 0x00,
1039 0x40, 0x00, 0x00, 0x00,
1040 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00,
1047 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1048 { ICE_MAC_OFOS, 0 },
1049 { ICE_IPV6_OFOS, 14 },
1052 { ICE_IPV6_IL, 82 },
1053 { ICE_PROTOCOL_LAST, 0 },
1056 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1057 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1058 0x00, 0x00, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00,
1062 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1063 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1064 0x00, 0x00, 0x00, 0x00,
1065 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x00,
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1069 0x00, 0x00, 0x00, 0x00,
1070 0x00, 0x00, 0x00, 0x00,
1071 0x00, 0x00, 0x00, 0x00,
1073 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1074 0x00, 0x00, 0x00, 0x00,
1076 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1077 0x00, 0x00, 0x00, 0x00,
1078 0x00, 0x00, 0x00, 0x85,
1080 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1081 0x00, 0x00, 0x00, 0x00,
1083 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1084 0x00, 0x00, 0x3b, 0x00,
1085 0x00, 0x00, 0x00, 0x00,
1086 0x00, 0x00, 0x00, 0x00,
1087 0x00, 0x00, 0x00, 0x00,
1088 0x00, 0x00, 0x00, 0x00,
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1092 0x00, 0x00, 0x00, 0x00,
1097 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1098 { ICE_MAC_OFOS, 0 },
1099 { ICE_IPV4_OFOS, 14 },
1102 { ICE_PROTOCOL_LAST, 0 },
1105 static const u8 dummy_udp_gtp_packet[] = {
1106 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1107 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x00, 0x00,
1111 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x11, 0x00, 0x00,
1114 0x00, 0x00, 0x00, 0x00,
1115 0x00, 0x00, 0x00, 0x00,
1117 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1118 0x00, 0x1c, 0x00, 0x00,
1120 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1121 0x00, 0x00, 0x00, 0x00,
1122 0x00, 0x00, 0x00, 0x85,
1124 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1125 0x00, 0x00, 0x00, 0x00,
1128 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1129 { ICE_MAC_OFOS, 0 },
1130 { ICE_IPV4_OFOS, 14 },
1132 { ICE_GTP_NO_PAY, 42 },
1133 { ICE_PROTOCOL_LAST, 0 },
1137 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1138 { ICE_MAC_OFOS, 0 },
1139 { ICE_IPV6_OFOS, 14 },
1141 { ICE_GTP_NO_PAY, 62 },
1142 { ICE_PROTOCOL_LAST, 0 },
1145 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1146 { ICE_MAC_OFOS, 0 },
1147 { ICE_ETYPE_OL, 12 },
1148 { ICE_VLAN_OFOS, 14},
1150 { ICE_PROTOCOL_LAST, 0 },
1153 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1154 { ICE_MAC_OFOS, 0 },
1155 { ICE_ETYPE_OL, 12 },
1156 { ICE_VLAN_OFOS, 14},
1158 { ICE_IPV4_OFOS, 26 },
1159 { ICE_PROTOCOL_LAST, 0 },
1162 static const u8 dummy_pppoe_ipv4_packet[] = {
1163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1164 0x00, 0x00, 0x00, 0x00,
1165 0x00, 0x00, 0x00, 0x00,
1167 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1169 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1171 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1174 0x00, 0x21, /* PPP Link Layer 24 */
1176 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1177 0x00, 0x00, 0x00, 0x00,
1178 0x00, 0x00, 0x00, 0x00,
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1182 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1186 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1187 { ICE_MAC_OFOS, 0 },
1188 { ICE_ETYPE_OL, 12 },
1189 { ICE_VLAN_OFOS, 14},
1191 { ICE_IPV4_OFOS, 26 },
1193 { ICE_PROTOCOL_LAST, 0 },
1196 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1197 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1198 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00,
1201 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1203 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1205 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1208 0x00, 0x21, /* PPP Link Layer 24 */
1210 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1211 0x00, 0x01, 0x00, 0x00,
1212 0x00, 0x06, 0x00, 0x00,
1213 0x00, 0x00, 0x00, 0x00,
1214 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1217 0x00, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00,
1219 0x50, 0x00, 0x00, 0x00,
1220 0x00, 0x00, 0x00, 0x00,
1222 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1226 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1227 { ICE_MAC_OFOS, 0 },
1228 { ICE_ETYPE_OL, 12 },
1229 { ICE_VLAN_OFOS, 14},
1231 { ICE_IPV4_OFOS, 26 },
1232 { ICE_UDP_ILOS, 46 },
1233 { ICE_PROTOCOL_LAST, 0 },
1236 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1237 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1238 0x00, 0x00, 0x00, 0x00,
1239 0x00, 0x00, 0x00, 0x00,
1241 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1243 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1245 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1248 0x00, 0x21, /* PPP Link Layer 24 */
1250 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1251 0x00, 0x01, 0x00, 0x00,
1252 0x00, 0x11, 0x00, 0x00,
1253 0x00, 0x00, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1257 0x00, 0x08, 0x00, 0x00,
1259 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1262 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1263 { ICE_MAC_OFOS, 0 },
1264 { ICE_ETYPE_OL, 12 },
1265 { ICE_VLAN_OFOS, 14},
1267 { ICE_IPV6_OFOS, 26 },
1268 { ICE_PROTOCOL_LAST, 0 },
1271 static const u8 dummy_pppoe_ipv6_packet[] = {
1272 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1273 0x00, 0x00, 0x00, 0x00,
1274 0x00, 0x00, 0x00, 0x00,
1276 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1278 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1280 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1283 0x00, 0x57, /* PPP Link Layer 24 */
1285 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1286 0x00, 0x00, 0x3b, 0x00,
1287 0x00, 0x00, 0x00, 0x00,
1288 0x00, 0x00, 0x00, 0x00,
1289 0x00, 0x00, 0x00, 0x00,
1290 0x00, 0x00, 0x00, 0x00,
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1293 0x00, 0x00, 0x00, 0x00,
1294 0x00, 0x00, 0x00, 0x00,
1296 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1300 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1301 { ICE_MAC_OFOS, 0 },
1302 { ICE_ETYPE_OL, 12 },
1303 { ICE_VLAN_OFOS, 14},
1305 { ICE_IPV6_OFOS, 26 },
1307 { ICE_PROTOCOL_LAST, 0 },
1310 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1311 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1312 0x00, 0x00, 0x00, 0x00,
1313 0x00, 0x00, 0x00, 0x00,
1315 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1317 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1319 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1322 0x00, 0x57, /* PPP Link Layer 24 */
1324 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1325 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1326 0x00, 0x00, 0x00, 0x00,
1327 0x00, 0x00, 0x00, 0x00,
1328 0x00, 0x00, 0x00, 0x00,
1329 0x00, 0x00, 0x00, 0x00,
1330 0x00, 0x00, 0x00, 0x00,
1331 0x00, 0x00, 0x00, 0x00,
1332 0x00, 0x00, 0x00, 0x00,
1333 0x00, 0x00, 0x00, 0x00,
1335 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1336 0x00, 0x00, 0x00, 0x00,
1337 0x00, 0x00, 0x00, 0x00,
1338 0x50, 0x00, 0x00, 0x00,
1339 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1345 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1346 { ICE_MAC_OFOS, 0 },
1347 { ICE_ETYPE_OL, 12 },
1348 { ICE_VLAN_OFOS, 14},
1350 { ICE_IPV6_OFOS, 26 },
1351 { ICE_UDP_ILOS, 66 },
1352 { ICE_PROTOCOL_LAST, 0 },
1355 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1356 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1357 0x00, 0x00, 0x00, 0x00,
1358 0x00, 0x00, 0x00, 0x00,
1360 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1362 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1364 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1367 0x00, 0x57, /* PPP Link Layer 24 */
1369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1371 0x00, 0x00, 0x00, 0x00,
1372 0x00, 0x00, 0x00, 0x00,
1373 0x00, 0x00, 0x00, 0x00,
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1381 0x00, 0x08, 0x00, 0x00,
1383 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1386 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1387 { ICE_MAC_OFOS, 0 },
1388 { ICE_IPV4_OFOS, 14 },
1390 { ICE_PROTOCOL_LAST, 0 },
1393 static const u8 dummy_ipv4_esp_pkt[] = {
1394 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1395 0x00, 0x00, 0x00, 0x00,
1396 0x00, 0x00, 0x00, 0x00,
1399 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1400 0x00, 0x00, 0x40, 0x00,
1401 0x40, 0x32, 0x00, 0x00,
1402 0x00, 0x00, 0x00, 0x00,
1403 0x00, 0x00, 0x00, 0x00,
1405 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1406 0x00, 0x00, 0x00, 0x00,
1407 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1410 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1411 { ICE_MAC_OFOS, 0 },
1412 { ICE_IPV6_OFOS, 14 },
1414 { ICE_PROTOCOL_LAST, 0 },
1417 static const u8 dummy_ipv6_esp_pkt[] = {
1418 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1419 0x00, 0x00, 0x00, 0x00,
1420 0x00, 0x00, 0x00, 0x00,
1423 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1424 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1425 0x00, 0x00, 0x00, 0x00,
1426 0x00, 0x00, 0x00, 0x00,
1427 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00,
1429 0x00, 0x00, 0x00, 0x00,
1430 0x00, 0x00, 0x00, 0x00,
1431 0x00, 0x00, 0x00, 0x00,
1432 0x00, 0x00, 0x00, 0x00,
1434 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1435 0x00, 0x00, 0x00, 0x00,
1436 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1439 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1440 { ICE_MAC_OFOS, 0 },
1441 { ICE_IPV4_OFOS, 14 },
1443 { ICE_PROTOCOL_LAST, 0 },
1446 static const u8 dummy_ipv4_ah_pkt[] = {
1447 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1448 0x00, 0x00, 0x00, 0x00,
1449 0x00, 0x00, 0x00, 0x00,
1452 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1453 0x00, 0x00, 0x40, 0x00,
1454 0x40, 0x33, 0x00, 0x00,
1455 0x00, 0x00, 0x00, 0x00,
1456 0x00, 0x00, 0x00, 0x00,
1458 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1459 0x00, 0x00, 0x00, 0x00,
1460 0x00, 0x00, 0x00, 0x00,
1461 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1464 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1465 { ICE_MAC_OFOS, 0 },
1466 { ICE_IPV6_OFOS, 14 },
1468 { ICE_PROTOCOL_LAST, 0 },
1471 static const u8 dummy_ipv6_ah_pkt[] = {
1472 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1473 0x00, 0x00, 0x00, 0x00,
1474 0x00, 0x00, 0x00, 0x00,
1477 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1478 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1479 0x00, 0x00, 0x00, 0x00,
1480 0x00, 0x00, 0x00, 0x00,
1481 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, 0x00, 0x00,
1485 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x00,
1488 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1489 0x00, 0x00, 0x00, 0x00,
1490 0x00, 0x00, 0x00, 0x00,
1491 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1494 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1495 { ICE_MAC_OFOS, 0 },
1496 { ICE_IPV4_OFOS, 14 },
1497 { ICE_UDP_ILOS, 34 },
1499 { ICE_PROTOCOL_LAST, 0 },
1502 static const u8 dummy_ipv4_nat_pkt[] = {
1503 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1504 0x00, 0x00, 0x00, 0x00,
1505 0x00, 0x00, 0x00, 0x00,
1508 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1509 0x00, 0x00, 0x40, 0x00,
1510 0x40, 0x11, 0x00, 0x00,
1511 0x00, 0x00, 0x00, 0x00,
1512 0x00, 0x00, 0x00, 0x00,
1514 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1515 0x00, 0x00, 0x00, 0x00,
1517 0x00, 0x00, 0x00, 0x00,
1518 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1522 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1523 { ICE_MAC_OFOS, 0 },
1524 { ICE_IPV6_OFOS, 14 },
1525 { ICE_UDP_ILOS, 54 },
1527 { ICE_PROTOCOL_LAST, 0 },
1530 static const u8 dummy_ipv6_nat_pkt[] = {
1531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1532 0x00, 0x00, 0x00, 0x00,
1533 0x00, 0x00, 0x00, 0x00,
1536 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1537 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1538 0x00, 0x00, 0x00, 0x00,
1539 0x00, 0x00, 0x00, 0x00,
1540 0x00, 0x00, 0x00, 0x00,
1541 0x00, 0x00, 0x00, 0x00,
1542 0x00, 0x00, 0x00, 0x00,
1543 0x00, 0x00, 0x00, 0x00,
1544 0x00, 0x00, 0x00, 0x00,
1545 0x00, 0x00, 0x00, 0x00,
1547 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1548 0x00, 0x00, 0x00, 0x00,
1550 0x00, 0x00, 0x00, 0x00,
1551 0x00, 0x00, 0x00, 0x00,
1552 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1556 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1557 { ICE_MAC_OFOS, 0 },
1558 { ICE_IPV4_OFOS, 14 },
1560 { ICE_PROTOCOL_LAST, 0 },
1563 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1564 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1565 0x00, 0x00, 0x00, 0x00,
1566 0x00, 0x00, 0x00, 0x00,
1569 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1570 0x00, 0x00, 0x40, 0x00,
1571 0x40, 0x73, 0x00, 0x00,
1572 0x00, 0x00, 0x00, 0x00,
1573 0x00, 0x00, 0x00, 0x00,
1575 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1576 0x00, 0x00, 0x00, 0x00,
1577 0x00, 0x00, 0x00, 0x00,
1578 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1581 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1582 { ICE_MAC_OFOS, 0 },
1583 { ICE_IPV6_OFOS, 14 },
1585 { ICE_PROTOCOL_LAST, 0 },
1588 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1590 0x00, 0x00, 0x00, 0x00,
1591 0x00, 0x00, 0x00, 0x00,
1594 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1595 0x00, 0x0c, 0x73, 0x40,
1596 0x00, 0x00, 0x00, 0x00,
1597 0x00, 0x00, 0x00, 0x00,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, 0x00, 0x00,
1601 0x00, 0x00, 0x00, 0x00,
1602 0x00, 0x00, 0x00, 0x00,
1603 0x00, 0x00, 0x00, 0x00,
1605 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1606 0x00, 0x00, 0x00, 0x00,
1607 0x00, 0x00, 0x00, 0x00,
1608 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1611 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1612 { ICE_MAC_OFOS, 0 },
1613 { ICE_ETYPE_OL, 12 },
1614 { ICE_VLAN_EX, 14 },
1615 { ICE_VLAN_OFOS, 18 },
1616 { ICE_IPV4_OFOS, 22 },
1617 { ICE_PROTOCOL_LAST, 0 },
1620 static const u8 dummy_qinq_ipv4_pkt[] = {
1621 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1622 0x00, 0x00, 0x00, 0x00,
1623 0x00, 0x00, 0x00, 0x00,
1625 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1627 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1628 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1630 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1631 0x00, 0x01, 0x00, 0x00,
1632 0x00, 0x11, 0x00, 0x00,
1633 0x00, 0x00, 0x00, 0x00,
1634 0x00, 0x00, 0x00, 0x00,
1636 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1637 0x00, 0x08, 0x00, 0x00,
1639 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1642 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1643 { ICE_MAC_OFOS, 0 },
1644 { ICE_ETYPE_OL, 12 },
1645 { ICE_VLAN_EX, 14 },
1646 { ICE_VLAN_OFOS, 18 },
1647 { ICE_IPV6_OFOS, 22 },
1648 { ICE_PROTOCOL_LAST, 0 },
1651 static const u8 dummy_qinq_ipv6_pkt[] = {
1652 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1653 0x00, 0x00, 0x00, 0x00,
1654 0x00, 0x00, 0x00, 0x00,
1656 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1658 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1659 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1661 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1662 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1663 0x00, 0x00, 0x00, 0x00,
1664 0x00, 0x00, 0x00, 0x00,
1665 0x00, 0x00, 0x00, 0x00,
1666 0x00, 0x00, 0x00, 0x00,
1667 0x00, 0x00, 0x00, 0x00,
1668 0x00, 0x00, 0x00, 0x00,
1669 0x00, 0x00, 0x00, 0x00,
1670 0x00, 0x00, 0x00, 0x00,
1672 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1673 0x00, 0x10, 0x00, 0x00,
1675 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1676 0x00, 0x00, 0x00, 0x00,
1678 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1681 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1682 { ICE_MAC_OFOS, 0 },
1683 { ICE_ETYPE_OL, 12 },
1684 { ICE_VLAN_EX, 14 },
1685 { ICE_VLAN_OFOS, 18 },
1687 { ICE_PROTOCOL_LAST, 0 },
1691 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1692 { ICE_MAC_OFOS, 0 },
1693 { ICE_ETYPE_OL, 12 },
1694 { ICE_VLAN_EX, 14 },
1695 { ICE_VLAN_OFOS, 18 },
1697 { ICE_IPV4_OFOS, 30 },
1698 { ICE_PROTOCOL_LAST, 0 },
1701 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1702 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1703 0x00, 0x00, 0x00, 0x00,
1704 0x00, 0x00, 0x00, 0x00,
1706 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1708 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1709 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1711 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1714 0x00, 0x21, /* PPP Link Layer 28 */
1716 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1717 0x00, 0x00, 0x00, 0x00,
1718 0x00, 0x00, 0x00, 0x00,
1719 0x00, 0x00, 0x00, 0x00,
1720 0x00, 0x00, 0x00, 0x00,
1722 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1726 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1727 { ICE_MAC_OFOS, 0 },
1728 { ICE_ETYPE_OL, 12 },
1730 { ICE_VLAN_OFOS, 18 },
1732 { ICE_IPV6_OFOS, 30 },
1733 { ICE_PROTOCOL_LAST, 0 },
1736 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1737 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1738 0x00, 0x00, 0x00, 0x00,
1739 0x00, 0x00, 0x00, 0x00,
1741 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1743 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1744 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1746 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1749 0x00, 0x57, /* PPP Link Layer 28*/
1751 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1752 0x00, 0x00, 0x3b, 0x00,
1753 0x00, 0x00, 0x00, 0x00,
1754 0x00, 0x00, 0x00, 0x00,
1755 0x00, 0x00, 0x00, 0x00,
1756 0x00, 0x00, 0x00, 0x00,
1757 0x00, 0x00, 0x00, 0x00,
1758 0x00, 0x00, 0x00, 0x00,
1759 0x00, 0x00, 0x00, 0x00,
1760 0x00, 0x00, 0x00, 0x00,
1762 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1765 /* this is a recipe to profile association bitmap */
1766 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1767 ICE_MAX_NUM_PROFILES);
1769 /* this is a profile to recipe association bitmap */
1770 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1771 ICE_MAX_NUM_RECIPES);
1773 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1776 * ice_collect_result_idx - copy result index values
1777 * @buf: buffer that contains the result index
1778 * @recp: the recipe struct to copy data into
1780 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1781 struct ice_sw_recipe *recp)
1783 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1784 ice_set_bit(buf->content.result_indx &
1785 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1789 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1790 * @rid: recipe ID that we are populating
1792 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1794 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1795 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1796 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1797 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1798 enum ice_sw_tunnel_type tun_type;
1799 u16 i, j, profile_num = 0;
1800 bool non_tun_valid = false;
1801 bool pppoe_valid = false;
1802 bool vxlan_valid = false;
1803 bool gre_valid = false;
1804 bool gtp_valid = false;
1805 bool flag_valid = false;
1807 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1808 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1813 for (i = 0; i < 12; i++) {
1814 if (gre_profile[i] == j)
1818 for (i = 0; i < 12; i++) {
1819 if (vxlan_profile[i] == j)
1823 for (i = 0; i < 7; i++) {
1824 if (pppoe_profile[i] == j)
1828 for (i = 0; i < 6; i++) {
1829 if (non_tun_profile[i] == j)
1830 non_tun_valid = true;
1833 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1834 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1837 if ((j >= ICE_PROFID_IPV4_ESP &&
1838 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1839 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1840 j <= ICE_PROFID_IPV6_GTPU_TEID))
1844 if (!non_tun_valid && vxlan_valid)
1845 tun_type = ICE_SW_TUN_VXLAN;
1846 else if (!non_tun_valid && gre_valid)
1847 tun_type = ICE_SW_TUN_NVGRE;
1848 else if (!non_tun_valid && pppoe_valid)
1849 tun_type = ICE_SW_TUN_PPPOE;
1850 else if (!non_tun_valid && gtp_valid)
1851 tun_type = ICE_SW_TUN_GTP;
1852 else if (non_tun_valid &&
1853 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1854 tun_type = ICE_SW_TUN_AND_NON_TUN;
1855 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1857 tun_type = ICE_NON_TUN;
1859 tun_type = ICE_NON_TUN;
1861 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1862 i = ice_is_bit_set(recipe_to_profile[rid],
1863 ICE_PROFID_PPPOE_IPV4_OTHER);
1864 j = ice_is_bit_set(recipe_to_profile[rid],
1865 ICE_PROFID_PPPOE_IPV6_OTHER);
1867 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1869 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1872 if (tun_type == ICE_SW_TUN_GTP) {
1873 if (ice_is_bit_set(recipe_to_profile[rid],
1874 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1875 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1876 else if (ice_is_bit_set(recipe_to_profile[rid],
1877 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1878 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1879 else if (ice_is_bit_set(recipe_to_profile[rid],
1880 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1881 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1882 else if (ice_is_bit_set(recipe_to_profile[rid],
1883 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1884 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1887 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1888 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1889 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1891 case ICE_PROFID_IPV4_TCP:
1892 tun_type = ICE_SW_IPV4_TCP;
1894 case ICE_PROFID_IPV4_UDP:
1895 tun_type = ICE_SW_IPV4_UDP;
1897 case ICE_PROFID_IPV6_TCP:
1898 tun_type = ICE_SW_IPV6_TCP;
1900 case ICE_PROFID_IPV6_UDP:
1901 tun_type = ICE_SW_IPV6_UDP;
1903 case ICE_PROFID_PPPOE_PAY:
1904 tun_type = ICE_SW_TUN_PPPOE_PAY;
1906 case ICE_PROFID_PPPOE_IPV4_TCP:
1907 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1909 case ICE_PROFID_PPPOE_IPV4_UDP:
1910 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1912 case ICE_PROFID_PPPOE_IPV4_OTHER:
1913 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1915 case ICE_PROFID_PPPOE_IPV6_TCP:
1916 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1918 case ICE_PROFID_PPPOE_IPV6_UDP:
1919 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1921 case ICE_PROFID_PPPOE_IPV6_OTHER:
1922 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1924 case ICE_PROFID_IPV4_ESP:
1925 tun_type = ICE_SW_TUN_IPV4_ESP;
1927 case ICE_PROFID_IPV6_ESP:
1928 tun_type = ICE_SW_TUN_IPV6_ESP;
1930 case ICE_PROFID_IPV4_AH:
1931 tun_type = ICE_SW_TUN_IPV4_AH;
1933 case ICE_PROFID_IPV6_AH:
1934 tun_type = ICE_SW_TUN_IPV6_AH;
1936 case ICE_PROFID_IPV4_NAT_T:
1937 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1939 case ICE_PROFID_IPV6_NAT_T:
1940 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1942 case ICE_PROFID_IPV4_PFCP_NODE:
1944 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1946 case ICE_PROFID_IPV6_PFCP_NODE:
1948 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1950 case ICE_PROFID_IPV4_PFCP_SESSION:
1952 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1954 case ICE_PROFID_IPV6_PFCP_SESSION:
1956 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1958 case ICE_PROFID_MAC_IPV4_L2TPV3:
1959 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1961 case ICE_PROFID_MAC_IPV6_L2TPV3:
1962 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1964 case ICE_PROFID_IPV4_GTPU_TEID:
1965 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1967 case ICE_PROFID_IPV6_GTPU_TEID:
1968 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1979 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1980 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1981 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1982 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1983 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1984 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1985 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1986 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1987 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1988 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1989 else if (vlan && tun_type == ICE_NON_TUN)
1990 tun_type = ICE_NON_TUN_QINQ;
1996 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1997 * @hw: pointer to hardware structure
1998 * @recps: struct that we need to populate
1999 * @rid: recipe ID that we are populating
2000 * @refresh_required: true if we should get recipe to profile mapping from FW
2002 * This function is used to populate all the necessary entries into our
2003 * bookkeeping so that we have a current list of all the recipes that are
2004 * programmed in the firmware.
2006 static enum ice_status
2007 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2008 bool *refresh_required)
2010 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2011 struct ice_aqc_recipe_data_elem *tmp;
2012 u16 num_recps = ICE_MAX_NUM_RECIPES;
2013 struct ice_prot_lkup_ext *lkup_exts;
2014 enum ice_status status;
2019 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2021 /* we need a buffer big enough to accommodate all the recipes */
2022 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2023 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2025 return ICE_ERR_NO_MEMORY;
2027 tmp[0].recipe_indx = rid;
2028 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2029 /* non-zero status meaning recipe doesn't exist */
2033 /* Get recipe to profile map so that we can get the fv from lkups that
2034 * we read for a recipe from FW. Since we want to minimize the number of
2035 * times we make this FW call, just make one call and cache the copy
2036 * until a new recipe is added. This operation is only required the
2037 * first time to get the changes from FW. Then to search existing
2038 * entries we don't need to update the cache again until another recipe
2041 if (*refresh_required) {
2042 ice_get_recp_to_prof_map(hw);
2043 *refresh_required = false;
2046 /* Start populating all the entries for recps[rid] based on lkups from
2047 * firmware. Note that we are only creating the root recipe in our
2050 lkup_exts = &recps[rid].lkup_exts;
2052 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2053 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2054 struct ice_recp_grp_entry *rg_entry;
2055 u8 i, prof, idx, prot = 0;
2059 rg_entry = (struct ice_recp_grp_entry *)
2060 ice_malloc(hw, sizeof(*rg_entry));
2062 status = ICE_ERR_NO_MEMORY;
2066 idx = root_bufs.recipe_indx;
2067 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2069 /* Mark all result indices in this chain */
2070 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2071 ice_set_bit(root_bufs.content.result_indx &
2072 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2074 /* get the first profile that is associated with rid */
2075 prof = ice_find_first_bit(recipe_to_profile[idx],
2076 ICE_MAX_NUM_PROFILES);
2077 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2078 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2080 rg_entry->fv_idx[i] = lkup_indx;
2081 rg_entry->fv_mask[i] =
2082 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2084 /* If the recipe is a chained recipe then all its
2085 * child recipe's result will have a result index.
2086 * To fill fv_words we should not use those result
2087 * index, we only need the protocol ids and offsets.
2088 * We will skip all the fv_idx which stores result
2089 * index in them. We also need to skip any fv_idx which
2090 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2091 * valid offset value.
2093 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2094 rg_entry->fv_idx[i]) ||
2095 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2096 rg_entry->fv_idx[i] == 0)
2099 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2100 rg_entry->fv_idx[i], &prot, &off);
2101 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2102 lkup_exts->fv_words[fv_word_idx].off = off;
2103 lkup_exts->field_mask[fv_word_idx] =
2104 rg_entry->fv_mask[i];
2105 if (prot == ICE_META_DATA_ID_HW &&
2106 off == ICE_TUN_FLAG_MDID_OFF)
2110 /* populate rg_list with the data from the child entry of this
2113 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2115 /* Propagate some data to the recipe database */
2116 recps[idx].is_root = !!is_root;
2117 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2118 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2119 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2120 recps[idx].chain_idx = root_bufs.content.result_indx &
2121 ~ICE_AQ_RECIPE_RESULT_EN;
2122 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2124 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2130 /* Only do the following for root recipes entries */
2131 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2132 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2133 recps[idx].root_rid = root_bufs.content.rid &
2134 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2135 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2138 /* Complete initialization of the root recipe entry */
2139 lkup_exts->n_val_words = fv_word_idx;
2140 recps[rid].big_recp = (num_recps > 1);
2141 recps[rid].n_grp_count = (u8)num_recps;
2142 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2143 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2144 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2145 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2146 if (!recps[rid].root_buf)
2149 /* Copy result indexes */
2150 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2151 recps[rid].recp_created = true;
2159 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2160 * @hw: pointer to hardware structure
2162 * This function is used to populate recipe_to_profile matrix where index to
2163 * this array is the recipe ID and the element is the mapping of which profiles
2164 * is this recipe mapped to.
2166 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2168 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2171 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2174 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2175 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2176 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2178 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2179 ICE_MAX_NUM_RECIPES);
2180 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2181 ice_set_bit(i, recipe_to_profile[j]);
2186 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2187 * @hw: pointer to the HW struct
2188 * @recp_list: pointer to sw recipe list
2190 * Allocate memory for the entire recipe table and initialize the structures/
2191 * entries corresponding to basic recipes.
2194 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2196 struct ice_sw_recipe *recps;
2199 recps = (struct ice_sw_recipe *)
2200 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2202 return ICE_ERR_NO_MEMORY;
2204 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2205 recps[i].root_rid = i;
2206 INIT_LIST_HEAD(&recps[i].filt_rules);
2207 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2208 INIT_LIST_HEAD(&recps[i].rg_list);
2209 ice_init_lock(&recps[i].filt_rule_lock);
2218 * ice_aq_get_sw_cfg - get switch configuration
2219 * @hw: pointer to the hardware structure
2220 * @buf: pointer to the result buffer
2221 * @buf_size: length of the buffer available for response
2222 * @req_desc: pointer to requested descriptor
2223 * @num_elems: pointer to number of elements
2224 * @cd: pointer to command details structure or NULL
2226 * Get switch configuration (0x0200) to be placed in buf.
2227 * This admin command returns information such as initial VSI/port number
2228 * and switch ID it belongs to.
2230 * NOTE: *req_desc is both an input/output parameter.
2231 * The caller of this function first calls this function with *request_desc set
2232 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2233 * configuration information has been returned; if non-zero (meaning not all
2234 * the information was returned), the caller should call this function again
2235 * with *req_desc set to the previous value returned by f/w to get the
2236 * next block of switch configuration information.
2238 * *num_elems is output only parameter. This reflects the number of elements
2239 * in response buffer. The caller of this function to use *num_elems while
2240 * parsing the response buffer.
2242 static enum ice_status
2243 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2244 u16 buf_size, u16 *req_desc, u16 *num_elems,
2245 struct ice_sq_cd *cd)
2247 struct ice_aqc_get_sw_cfg *cmd;
2248 struct ice_aq_desc desc;
2249 enum ice_status status;
2251 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2252 cmd = &desc.params.get_sw_conf;
2253 cmd->element = CPU_TO_LE16(*req_desc);
2255 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2257 *req_desc = LE16_TO_CPU(cmd->element);
2258 *num_elems = LE16_TO_CPU(cmd->num_elems);
2265 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2266 * @hw: pointer to the HW struct
2267 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2268 * @global_lut_id: output parameter for the RSS global LUT's ID
2270 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2272 struct ice_aqc_alloc_free_res_elem *sw_buf;
2273 enum ice_status status;
2276 buf_len = ice_struct_size(sw_buf, elem, 1);
2277 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2279 return ICE_ERR_NO_MEMORY;
2281 sw_buf->num_elems = CPU_TO_LE16(1);
2282 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2283 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2284 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2286 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2288 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2289 shared_res ? "shared" : "dedicated", status);
2290 goto ice_alloc_global_lut_exit;
2293 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2295 ice_alloc_global_lut_exit:
2296 ice_free(hw, sw_buf);
2301 * ice_free_rss_global_lut - free a RSS global LUT
2302 * @hw: pointer to the HW struct
2303 * @global_lut_id: ID of the RSS global LUT to free
2305 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2307 struct ice_aqc_alloc_free_res_elem *sw_buf;
2308 u16 buf_len, num_elems = 1;
2309 enum ice_status status;
2311 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2312 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2314 return ICE_ERR_NO_MEMORY;
2316 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2317 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2318 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2320 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2322 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2323 global_lut_id, status);
2325 ice_free(hw, sw_buf);
2330 * ice_alloc_sw - allocate resources specific to switch
2331 * @hw: pointer to the HW struct
2332 * @ena_stats: true to turn on VEB stats
2333 * @shared_res: true for shared resource, false for dedicated resource
2334 * @sw_id: switch ID returned
2335 * @counter_id: VEB counter ID returned
2337 * allocates switch resources (SWID and VEB counter) (0x0208)
2340 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2343 struct ice_aqc_alloc_free_res_elem *sw_buf;
2344 struct ice_aqc_res_elem *sw_ele;
2345 enum ice_status status;
2348 buf_len = ice_struct_size(sw_buf, elem, 1);
2349 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2351 return ICE_ERR_NO_MEMORY;
2353 /* Prepare buffer for switch ID.
2354 * The number of resource entries in buffer is passed as 1 since only a
2355 * single switch/VEB instance is allocated, and hence a single sw_id
2358 sw_buf->num_elems = CPU_TO_LE16(1);
2360 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2361 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2362 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2364 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2365 ice_aqc_opc_alloc_res, NULL);
2368 goto ice_alloc_sw_exit;
2370 sw_ele = &sw_buf->elem[0];
2371 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2374 /* Prepare buffer for VEB Counter */
2375 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2376 struct ice_aqc_alloc_free_res_elem *counter_buf;
2377 struct ice_aqc_res_elem *counter_ele;
2379 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2380 ice_malloc(hw, buf_len);
2382 status = ICE_ERR_NO_MEMORY;
2383 goto ice_alloc_sw_exit;
2386 /* The number of resource entries in buffer is passed as 1 since
2387 * only a single switch/VEB instance is allocated, and hence a
2388 * single VEB counter is requested.
2390 counter_buf->num_elems = CPU_TO_LE16(1);
2391 counter_buf->res_type =
2392 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2393 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2394 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2398 ice_free(hw, counter_buf);
2399 goto ice_alloc_sw_exit;
2401 counter_ele = &counter_buf->elem[0];
2402 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2403 ice_free(hw, counter_buf);
2407 ice_free(hw, sw_buf);
2412 * ice_free_sw - free resources specific to switch
2413 * @hw: pointer to the HW struct
2414 * @sw_id: switch ID returned
2415 * @counter_id: VEB counter ID returned
2417 * free switch resources (SWID and VEB counter) (0x0209)
2419 * NOTE: This function frees multiple resources. It continues
2420 * releasing other resources even after it encounters error.
2421 * The error code returned is the last error it encountered.
2423 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2425 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2426 enum ice_status status, ret_status;
2429 buf_len = ice_struct_size(sw_buf, elem, 1);
2430 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2432 return ICE_ERR_NO_MEMORY;
2434 /* Prepare buffer to free for switch ID res.
2435 * The number of resource entries in buffer is passed as 1 since only a
2436 * single switch/VEB instance is freed, and hence a single sw_id
2439 sw_buf->num_elems = CPU_TO_LE16(1);
2440 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2441 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2443 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2444 ice_aqc_opc_free_res, NULL);
2447 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2449 /* Prepare buffer to free for VEB Counter resource */
2450 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2451 ice_malloc(hw, buf_len);
2453 ice_free(hw, sw_buf);
2454 return ICE_ERR_NO_MEMORY;
2457 /* The number of resource entries in buffer is passed as 1 since only a
2458 * single switch/VEB instance is freed, and hence a single VEB counter
2461 counter_buf->num_elems = CPU_TO_LE16(1);
2462 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2463 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2465 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2466 ice_aqc_opc_free_res, NULL);
2468 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2469 ret_status = status;
2472 ice_free(hw, counter_buf);
2473 ice_free(hw, sw_buf);
2479 * @hw: pointer to the HW struct
2480 * @vsi_ctx: pointer to a VSI context struct
2481 * @cd: pointer to command details structure or NULL
2483 * Add a VSI context to the hardware (0x0210)
2486 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2487 struct ice_sq_cd *cd)
2489 struct ice_aqc_add_update_free_vsi_resp *res;
2490 struct ice_aqc_add_get_update_free_vsi *cmd;
2491 struct ice_aq_desc desc;
2492 enum ice_status status;
2494 cmd = &desc.params.vsi_cmd;
2495 res = &desc.params.add_update_free_vsi_res;
2497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2499 if (!vsi_ctx->alloc_from_pool)
2500 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2501 ICE_AQ_VSI_IS_VALID);
2503 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2505 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2507 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2508 sizeof(vsi_ctx->info), cd);
2511 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2512 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2513 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2521 * @hw: pointer to the HW struct
2522 * @vsi_ctx: pointer to a VSI context struct
2523 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2524 * @cd: pointer to command details structure or NULL
2526 * Free VSI context info from hardware (0x0213)
2529 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2530 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2532 struct ice_aqc_add_update_free_vsi_resp *resp;
2533 struct ice_aqc_add_get_update_free_vsi *cmd;
2534 struct ice_aq_desc desc;
2535 enum ice_status status;
2537 cmd = &desc.params.vsi_cmd;
2538 resp = &desc.params.add_update_free_vsi_res;
2540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2542 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2544 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2546 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2548 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2549 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2557 * @hw: pointer to the HW struct
2558 * @vsi_ctx: pointer to a VSI context struct
2559 * @cd: pointer to command details structure or NULL
2561 * Update VSI context in the hardware (0x0211)
2564 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2565 struct ice_sq_cd *cd)
2567 struct ice_aqc_add_update_free_vsi_resp *resp;
2568 struct ice_aqc_add_get_update_free_vsi *cmd;
2569 struct ice_aq_desc desc;
2570 enum ice_status status;
2572 cmd = &desc.params.vsi_cmd;
2573 resp = &desc.params.add_update_free_vsi_res;
2575 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2577 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2579 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2581 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2582 sizeof(vsi_ctx->info), cd);
2585 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2586 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2593 * ice_is_vsi_valid - check whether the VSI is valid or not
2594 * @hw: pointer to the HW struct
2595 * @vsi_handle: VSI handle
2597 * check whether the VSI is valid or not
2599 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2601 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2605 * ice_get_hw_vsi_num - return the HW VSI number
2606 * @hw: pointer to the HW struct
2607 * @vsi_handle: VSI handle
2609 * return the HW VSI number
2610 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2612 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2614 return hw->vsi_ctx[vsi_handle]->vsi_num;
2618 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2619 * @hw: pointer to the HW struct
2620 * @vsi_handle: VSI handle
2622 * return the VSI context entry for a given VSI handle
2624 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2626 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2630 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2631 * @hw: pointer to the HW struct
2632 * @vsi_handle: VSI handle
2633 * @vsi: VSI context pointer
2635 * save the VSI context entry for a given VSI handle
2638 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2640 hw->vsi_ctx[vsi_handle] = vsi;
2644 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2645 * @hw: pointer to the HW struct
2646 * @vsi_handle: VSI handle
2648 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2650 struct ice_vsi_ctx *vsi;
2653 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2656 ice_for_each_traffic_class(i) {
2657 if (vsi->lan_q_ctx[i]) {
2658 ice_free(hw, vsi->lan_q_ctx[i]);
2659 vsi->lan_q_ctx[i] = NULL;
2665 * ice_clear_vsi_ctx - clear the VSI context entry
2666 * @hw: pointer to the HW struct
2667 * @vsi_handle: VSI handle
2669 * clear the VSI context entry
2671 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2673 struct ice_vsi_ctx *vsi;
2675 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2677 ice_clear_vsi_q_ctx(hw, vsi_handle);
2679 hw->vsi_ctx[vsi_handle] = NULL;
2684 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2685 * @hw: pointer to the HW struct
2687 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2691 for (i = 0; i < ICE_MAX_VSI; i++)
2692 ice_clear_vsi_ctx(hw, i);
2696 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2697 * @hw: pointer to the HW struct
2698 * @vsi_handle: unique VSI handle provided by drivers
2699 * @vsi_ctx: pointer to a VSI context struct
2700 * @cd: pointer to command details structure or NULL
2702 * Add a VSI context to the hardware also add it into the VSI handle list.
2703 * If this function gets called after reset for existing VSIs then update
2704 * with the new HW VSI number in the corresponding VSI handle list entry.
2707 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2708 struct ice_sq_cd *cd)
2710 struct ice_vsi_ctx *tmp_vsi_ctx;
2711 enum ice_status status;
2713 if (vsi_handle >= ICE_MAX_VSI)
2714 return ICE_ERR_PARAM;
2715 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2718 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2720 /* Create a new VSI context */
2721 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2722 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2724 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2725 return ICE_ERR_NO_MEMORY;
2727 *tmp_vsi_ctx = *vsi_ctx;
2729 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2731 /* update with new HW VSI num */
2732 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2739 * ice_free_vsi- free VSI context from hardware and VSI handle list
2740 * @hw: pointer to the HW struct
2741 * @vsi_handle: unique VSI handle
2742 * @vsi_ctx: pointer to a VSI context struct
2743 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2744 * @cd: pointer to command details structure or NULL
2746 * Free VSI context info from hardware as well as from VSI handle list
2749 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2750 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2752 enum ice_status status;
2754 if (!ice_is_vsi_valid(hw, vsi_handle))
2755 return ICE_ERR_PARAM;
2756 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2757 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2759 ice_clear_vsi_ctx(hw, vsi_handle);
2765 * @hw: pointer to the HW struct
2766 * @vsi_handle: unique VSI handle
2767 * @vsi_ctx: pointer to a VSI context struct
2768 * @cd: pointer to command details structure or NULL
2770 * Update VSI context in the hardware
2773 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2774 struct ice_sq_cd *cd)
2776 if (!ice_is_vsi_valid(hw, vsi_handle))
2777 return ICE_ERR_PARAM;
2778 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2779 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2783 * ice_aq_get_vsi_params
2784 * @hw: pointer to the HW struct
2785 * @vsi_ctx: pointer to a VSI context struct
2786 * @cd: pointer to command details structure or NULL
2788 * Get VSI context info from hardware (0x0212)
2791 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2792 struct ice_sq_cd *cd)
2794 struct ice_aqc_add_get_update_free_vsi *cmd;
2795 struct ice_aqc_get_vsi_resp *resp;
2796 struct ice_aq_desc desc;
2797 enum ice_status status;
2799 cmd = &desc.params.vsi_cmd;
2800 resp = &desc.params.get_vsi_resp;
2802 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2804 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2806 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2807 sizeof(vsi_ctx->info), cd);
2809 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2811 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2812 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2819 * ice_aq_add_update_mir_rule - add/update a mirror rule
2820 * @hw: pointer to the HW struct
2821 * @rule_type: Rule Type
2822 * @dest_vsi: VSI number to which packets will be mirrored
2823 * @count: length of the list
2824 * @mr_buf: buffer for list of mirrored VSI numbers
2825 * @cd: pointer to command details structure or NULL
2828 * Add/Update Mirror Rule (0x260).
2831 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2832 u16 count, struct ice_mir_rule_buf *mr_buf,
2833 struct ice_sq_cd *cd, u16 *rule_id)
2835 struct ice_aqc_add_update_mir_rule *cmd;
2836 struct ice_aq_desc desc;
2837 enum ice_status status;
2838 __le16 *mr_list = NULL;
2841 switch (rule_type) {
2842 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2843 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2844 /* Make sure count and mr_buf are set for these rule_types */
2845 if (!(count && mr_buf))
2846 return ICE_ERR_PARAM;
2848 buf_size = count * sizeof(__le16);
2849 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2851 return ICE_ERR_NO_MEMORY;
2853 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2854 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2855 /* Make sure count and mr_buf are not set for these
2858 if (count || mr_buf)
2859 return ICE_ERR_PARAM;
2862 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2863 return ICE_ERR_OUT_OF_RANGE;
2866 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2868 /* Pre-process 'mr_buf' items for add/update of virtual port
2869 * ingress/egress mirroring (but not physical port ingress/egress
2875 for (i = 0; i < count; i++) {
2878 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2880 /* Validate specified VSI number, make sure it is less
2881 * than ICE_MAX_VSI, if not return with error.
2883 if (id >= ICE_MAX_VSI) {
2884 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2886 ice_free(hw, mr_list);
2887 return ICE_ERR_OUT_OF_RANGE;
2890 /* add VSI to mirror rule */
2893 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2894 else /* remove VSI from mirror rule */
2895 mr_list[i] = CPU_TO_LE16(id);
2899 cmd = &desc.params.add_update_rule;
2900 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2901 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2902 ICE_AQC_RULE_ID_VALID_M);
2903 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2904 cmd->num_entries = CPU_TO_LE16(count);
2905 cmd->dest = CPU_TO_LE16(dest_vsi);
2907 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2909 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2911 ice_free(hw, mr_list);
2917 * ice_aq_delete_mir_rule - delete a mirror rule
2918 * @hw: pointer to the HW struct
2919 * @rule_id: Mirror rule ID (to be deleted)
2920 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2921 * otherwise it is returned to the shared pool
2922 * @cd: pointer to command details structure or NULL
2924 * Delete Mirror Rule (0x261).
2927 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2928 struct ice_sq_cd *cd)
2930 struct ice_aqc_delete_mir_rule *cmd;
2931 struct ice_aq_desc desc;
2933 /* rule_id should be in the range 0...63 */
2934 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2935 return ICE_ERR_OUT_OF_RANGE;
2937 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2939 cmd = &desc.params.del_rule;
2940 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2941 cmd->rule_id = CPU_TO_LE16(rule_id);
2944 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2946 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2950 * ice_aq_alloc_free_vsi_list
2951 * @hw: pointer to the HW struct
2952 * @vsi_list_id: VSI list ID returned or used for lookup
2953 * @lkup_type: switch rule filter lookup type
2954 * @opc: switch rules population command type - pass in the command opcode
2956 * allocates or free a VSI list resource
2958 static enum ice_status
2959 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2960 enum ice_sw_lkup_type lkup_type,
2961 enum ice_adminq_opc opc)
2963 struct ice_aqc_alloc_free_res_elem *sw_buf;
2964 struct ice_aqc_res_elem *vsi_ele;
2965 enum ice_status status;
2968 buf_len = ice_struct_size(sw_buf, elem, 1);
2969 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2971 return ICE_ERR_NO_MEMORY;
2972 sw_buf->num_elems = CPU_TO_LE16(1);
2974 if (lkup_type == ICE_SW_LKUP_MAC ||
2975 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2976 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2977 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2978 lkup_type == ICE_SW_LKUP_PROMISC ||
2979 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2980 lkup_type == ICE_SW_LKUP_LAST) {
2981 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2982 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2984 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2986 status = ICE_ERR_PARAM;
2987 goto ice_aq_alloc_free_vsi_list_exit;
2990 if (opc == ice_aqc_opc_free_res)
2991 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2993 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2995 goto ice_aq_alloc_free_vsi_list_exit;
2997 if (opc == ice_aqc_opc_alloc_res) {
2998 vsi_ele = &sw_buf->elem[0];
2999 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3002 ice_aq_alloc_free_vsi_list_exit:
3003 ice_free(hw, sw_buf);
3008 * ice_aq_set_storm_ctrl - Sets storm control configuration
3009 * @hw: pointer to the HW struct
3010 * @bcast_thresh: represents the upper threshold for broadcast storm control
3011 * @mcast_thresh: represents the upper threshold for multicast storm control
3012 * @ctl_bitmask: storm control knobs
3014 * Sets the storm control configuration (0x0280)
3017 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3020 struct ice_aqc_storm_cfg *cmd;
3021 struct ice_aq_desc desc;
3023 cmd = &desc.params.storm_conf;
3025 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3027 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3028 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3029 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3031 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3035 * ice_aq_get_storm_ctrl - gets storm control configuration
3036 * @hw: pointer to the HW struct
3037 * @bcast_thresh: represents the upper threshold for broadcast storm control
3038 * @mcast_thresh: represents the upper threshold for multicast storm control
3039 * @ctl_bitmask: storm control knobs
3041 * Gets the storm control configuration (0x0281)
3044 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3047 enum ice_status status;
3048 struct ice_aq_desc desc;
3050 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3052 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3054 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3057 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3060 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3063 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3070 * ice_aq_sw_rules - add/update/remove switch rules
3071 * @hw: pointer to the HW struct
3072 * @rule_list: pointer to switch rule population list
3073 * @rule_list_sz: total size of the rule list in bytes
3074 * @num_rules: number of switch rules in the rule_list
3075 * @opc: switch rules population command type - pass in the command opcode
3076 * @cd: pointer to command details structure or NULL
3078 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3080 static enum ice_status
3081 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3082 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3084 struct ice_aq_desc desc;
3085 enum ice_status status;
3087 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3089 if (opc != ice_aqc_opc_add_sw_rules &&
3090 opc != ice_aqc_opc_update_sw_rules &&
3091 opc != ice_aqc_opc_remove_sw_rules)
3092 return ICE_ERR_PARAM;
3094 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3096 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3097 desc.params.sw_rules.num_rules_fltr_entry_index =
3098 CPU_TO_LE16(num_rules);
3099 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3100 if (opc != ice_aqc_opc_add_sw_rules &&
3101 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3102 status = ICE_ERR_DOES_NOT_EXIST;
3108 * ice_aq_add_recipe - add switch recipe
3109 * @hw: pointer to the HW struct
3110 * @s_recipe_list: pointer to switch rule population list
3111 * @num_recipes: number of switch recipes in the list
3112 * @cd: pointer to command details structure or NULL
3117 ice_aq_add_recipe(struct ice_hw *hw,
3118 struct ice_aqc_recipe_data_elem *s_recipe_list,
3119 u16 num_recipes, struct ice_sq_cd *cd)
3121 struct ice_aqc_add_get_recipe *cmd;
3122 struct ice_aq_desc desc;
3125 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3126 cmd = &desc.params.add_get_recipe;
3127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3129 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3130 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3132 buf_size = num_recipes * sizeof(*s_recipe_list);
3134 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3138 * ice_aq_get_recipe - get switch recipe
3139 * @hw: pointer to the HW struct
3140 * @s_recipe_list: pointer to switch rule population list
3141 * @num_recipes: pointer to the number of recipes (input and output)
3142 * @recipe_root: root recipe number of recipe(s) to retrieve
3143 * @cd: pointer to command details structure or NULL
3147 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3148 * On output, *num_recipes will equal the number of entries returned in
3151 * The caller must supply enough space in s_recipe_list to hold all possible
3152 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3155 ice_aq_get_recipe(struct ice_hw *hw,
3156 struct ice_aqc_recipe_data_elem *s_recipe_list,
3157 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3159 struct ice_aqc_add_get_recipe *cmd;
3160 struct ice_aq_desc desc;
3161 enum ice_status status;
3164 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3165 return ICE_ERR_PARAM;
3167 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3168 cmd = &desc.params.add_get_recipe;
3169 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3171 cmd->return_index = CPU_TO_LE16(recipe_root);
3172 cmd->num_sub_recipes = 0;
3174 buf_size = *num_recipes * sizeof(*s_recipe_list);
3176 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3177 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3183 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3184 * @hw: pointer to the HW struct
3185 * @params: parameters used to update the default recipe
3187 * This function only supports updating default recipes and it only supports
3188 * updating a single recipe based on the lkup_idx at a time.
3190 * This is done as a read-modify-write operation. First, get the current recipe
3191 * contents based on the recipe's ID. Then modify the field vector index and
3192 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3193 * the pre-existing recipe with the modifications.
3196 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3197 struct ice_update_recipe_lkup_idx_params *params)
3199 struct ice_aqc_recipe_data_elem *rcp_list;
3200 u16 num_recps = ICE_MAX_NUM_RECIPES;
3201 enum ice_status status;
3203 rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3205 return ICE_ERR_NO_MEMORY;
3207 /* read current recipe list from firmware */
3208 rcp_list->recipe_indx = params->rid;
3209 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3211 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3212 params->rid, status);
3216 /* only modify existing recipe's lkup_idx and mask if valid, while
3217 * leaving all other fields the same, then update the recipe firmware
3219 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3220 if (params->mask_valid)
3221 rcp_list->content.mask[params->lkup_idx] =
3222 CPU_TO_LE16(params->mask);
3224 if (params->ignore_valid)
3225 rcp_list->content.lkup_indx[params->lkup_idx] |=
3226 ICE_AQ_RECIPE_LKUP_IGNORE;
3228 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3230 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3231 params->rid, params->lkup_idx, params->fv_idx,
3232 params->mask, params->mask_valid ? "true" : "false",
3236 ice_free(hw, rcp_list);
3241 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3242 * @hw: pointer to the HW struct
3243 * @profile_id: package profile ID to associate the recipe with
3244 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3245 * @cd: pointer to command details structure or NULL
3246 * Recipe to profile association (0x0291)
3249 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3250 struct ice_sq_cd *cd)
3252 struct ice_aqc_recipe_to_profile *cmd;
3253 struct ice_aq_desc desc;
3255 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3256 cmd = &desc.params.recipe_to_profile;
3257 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3258 cmd->profile_id = CPU_TO_LE16(profile_id);
3259 /* Set the recipe ID bit in the bitmask to let the device know which
3260 * profile we are associating the recipe to
3262 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3263 ICE_NONDMA_TO_NONDMA);
3265 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3269 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3270 * @hw: pointer to the HW struct
3271 * @profile_id: package profile ID to associate the recipe with
3272 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3273 * @cd: pointer to command details structure or NULL
3274 * Associate profile ID with given recipe (0x0293)
3277 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3278 struct ice_sq_cd *cd)
3280 struct ice_aqc_recipe_to_profile *cmd;
3281 struct ice_aq_desc desc;
3282 enum ice_status status;
3284 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3285 cmd = &desc.params.recipe_to_profile;
3286 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3287 cmd->profile_id = CPU_TO_LE16(profile_id);
3289 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3291 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3292 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3298 * ice_alloc_recipe - add recipe resource
3299 * @hw: pointer to the hardware structure
3300 * @rid: recipe ID returned as response to AQ call
3302 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3304 struct ice_aqc_alloc_free_res_elem *sw_buf;
3305 enum ice_status status;
3308 buf_len = ice_struct_size(sw_buf, elem, 1);
3309 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3311 return ICE_ERR_NO_MEMORY;
3313 sw_buf->num_elems = CPU_TO_LE16(1);
3314 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3315 ICE_AQC_RES_TYPE_S) |
3316 ICE_AQC_RES_TYPE_FLAG_SHARED);
3317 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3318 ice_aqc_opc_alloc_res, NULL);
3320 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3321 ice_free(hw, sw_buf);
3326 /* ice_init_port_info - Initialize port_info with switch configuration data
3327 * @pi: pointer to port_info
3328 * @vsi_port_num: VSI number or port number
3329 * @type: Type of switch element (port or VSI)
3330 * @swid: switch ID of the switch the element is attached to
3331 * @pf_vf_num: PF or VF number
3332 * @is_vf: true if the element is a VF, false otherwise
3335 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3336 u16 swid, u16 pf_vf_num, bool is_vf)
3339 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3340 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3342 pi->pf_vf_num = pf_vf_num;
3344 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3345 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3348 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3353 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3354 * @hw: pointer to the hardware structure
3356 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3358 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3359 enum ice_status status;
3366 num_total_ports = 1;
3368 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3369 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3372 return ICE_ERR_NO_MEMORY;
3374 /* Multiple calls to ice_aq_get_sw_cfg may be required
3375 * to get all the switch configuration information. The need
3376 * for additional calls is indicated by ice_aq_get_sw_cfg
3377 * writing a non-zero value in req_desc
3380 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3382 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3383 &req_desc, &num_elems, NULL);
3388 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3389 u16 pf_vf_num, swid, vsi_port_num;
3393 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3394 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3396 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3397 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3399 swid = LE16_TO_CPU(ele->swid);
3401 if (LE16_TO_CPU(ele->pf_vf_num) &
3402 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3405 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3406 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3409 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3410 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3411 if (j == num_total_ports) {
3412 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3413 status = ICE_ERR_CFG;
3416 ice_init_port_info(hw->port_info,
3417 vsi_port_num, res_type, swid,
3425 } while (req_desc && !status);
3433 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3434 * @hw: pointer to the hardware structure
3435 * @fi: filter info structure to fill/update
3437 * This helper function populates the lb_en and lan_en elements of the provided
3438 * ice_fltr_info struct using the switch's type and characteristics of the
3439 * switch rule being configured.
3441 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3443 if ((fi->flag & ICE_FLTR_RX) &&
3444 (fi->fltr_act == ICE_FWD_TO_VSI ||
3445 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3446 fi->lkup_type == ICE_SW_LKUP_LAST)
3450 if ((fi->flag & ICE_FLTR_TX) &&
3451 (fi->fltr_act == ICE_FWD_TO_VSI ||
3452 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3453 fi->fltr_act == ICE_FWD_TO_Q ||
3454 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3455 /* Setting LB for prune actions will result in replicated
3456 * packets to the internal switch that will be dropped.
3458 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3461 /* Set lan_en to TRUE if
3462 * 1. The switch is a VEB AND
3464 * 2.1 The lookup is a directional lookup like ethertype,
3465 * promiscuous, ethertype-MAC, promiscuous-VLAN
3466 * and default-port OR
3467 * 2.2 The lookup is VLAN, OR
3468 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3469 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3473 * The switch is a VEPA.
3475 * In all other cases, the LAN enable has to be set to false.
3478 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3479 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3480 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3481 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3482 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3483 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3484 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3485 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3486 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3487 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3496 * ice_fill_sw_rule - Helper function to fill switch rule structure
3497 * @hw: pointer to the hardware structure
3498 * @f_info: entry containing packet forwarding information
3499 * @s_rule: switch rule structure to be filled in based on mac_entry
3500 * @opc: switch rules population command type - pass in the command opcode
3503 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3504 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3506 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3507 u16 vlan_tpid = ICE_ETH_P_8021Q;
3515 if (opc == ice_aqc_opc_remove_sw_rules) {
3516 s_rule->pdata.lkup_tx_rx.act = 0;
3517 s_rule->pdata.lkup_tx_rx.index =
3518 CPU_TO_LE16(f_info->fltr_rule_id);
3519 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3523 eth_hdr_sz = sizeof(dummy_eth_header);
3524 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3526 /* initialize the ether header with a dummy header */
3527 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3528 ice_fill_sw_info(hw, f_info);
3530 switch (f_info->fltr_act) {
3531 case ICE_FWD_TO_VSI:
3532 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3533 ICE_SINGLE_ACT_VSI_ID_M;
3534 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3535 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3536 ICE_SINGLE_ACT_VALID_BIT;
3538 case ICE_FWD_TO_VSI_LIST:
3539 act |= ICE_SINGLE_ACT_VSI_LIST;
3540 act |= (f_info->fwd_id.vsi_list_id <<
3541 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3542 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3543 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3544 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3545 ICE_SINGLE_ACT_VALID_BIT;
3548 act |= ICE_SINGLE_ACT_TO_Q;
3549 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3550 ICE_SINGLE_ACT_Q_INDEX_M;
3552 case ICE_DROP_PACKET:
3553 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3554 ICE_SINGLE_ACT_VALID_BIT;
3556 case ICE_FWD_TO_QGRP:
3557 q_rgn = f_info->qgrp_size > 0 ?
3558 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3559 act |= ICE_SINGLE_ACT_TO_Q;
3560 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3561 ICE_SINGLE_ACT_Q_INDEX_M;
3562 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3563 ICE_SINGLE_ACT_Q_REGION_M;
3570 act |= ICE_SINGLE_ACT_LB_ENABLE;
3572 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3574 switch (f_info->lkup_type) {
3575 case ICE_SW_LKUP_MAC:
3576 daddr = f_info->l_data.mac.mac_addr;
3578 case ICE_SW_LKUP_VLAN:
3579 vlan_id = f_info->l_data.vlan.vlan_id;
3580 if (f_info->l_data.vlan.tpid_valid)
3581 vlan_tpid = f_info->l_data.vlan.tpid;
3582 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3583 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3584 act |= ICE_SINGLE_ACT_PRUNE;
3585 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3588 case ICE_SW_LKUP_ETHERTYPE_MAC:
3589 daddr = f_info->l_data.ethertype_mac.mac_addr;
3591 case ICE_SW_LKUP_ETHERTYPE:
3592 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3593 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3595 case ICE_SW_LKUP_MAC_VLAN:
3596 daddr = f_info->l_data.mac_vlan.mac_addr;
3597 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3599 case ICE_SW_LKUP_PROMISC_VLAN:
3600 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3602 case ICE_SW_LKUP_PROMISC:
3603 daddr = f_info->l_data.mac_vlan.mac_addr;
3609 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3610 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3611 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3613 /* Recipe set depending on lookup type */
3614 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3615 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3616 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3619 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3620 ICE_NONDMA_TO_NONDMA);
3622 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3623 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3624 *off = CPU_TO_BE16(vlan_id);
3625 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3626 *off = CPU_TO_BE16(vlan_tpid);
3629 /* Create the switch rule with the final dummy Ethernet header */
3630 if (opc != ice_aqc_opc_update_sw_rules)
3631 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3635 * ice_add_marker_act
3636 * @hw: pointer to the hardware structure
3637 * @m_ent: the management entry for which sw marker needs to be added
3638 * @sw_marker: sw marker to tag the Rx descriptor with
3639 * @l_id: large action resource ID
3641 * Create a large action to hold software marker and update the switch rule
3642 * entry pointed by m_ent with newly created large action
3644 static enum ice_status
3645 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3646 u16 sw_marker, u16 l_id)
3648 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3649 /* For software marker we need 3 large actions
3650 * 1. FWD action: FWD TO VSI or VSI LIST
3651 * 2. GENERIC VALUE action to hold the profile ID
3652 * 3. GENERIC VALUE action to hold the software marker ID
3654 const u16 num_lg_acts = 3;
3655 enum ice_status status;
3661 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3662 return ICE_ERR_PARAM;
3664 /* Create two back-to-back switch rules and submit them to the HW using
3665 * one memory buffer:
3669 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3670 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3671 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3673 return ICE_ERR_NO_MEMORY;
3675 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3677 /* Fill in the first switch rule i.e. large action */
3678 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3679 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3680 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3682 /* First action VSI forwarding or VSI list forwarding depending on how
3685 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3686 m_ent->fltr_info.fwd_id.hw_vsi_id;
3688 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3689 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3690 if (m_ent->vsi_count > 1)
3691 act |= ICE_LG_ACT_VSI_LIST;
3692 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3694 /* Second action descriptor type */
3695 act = ICE_LG_ACT_GENERIC;
3697 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3698 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3700 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3701 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3703 /* Third action Marker value */
3704 act |= ICE_LG_ACT_GENERIC;
3705 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3706 ICE_LG_ACT_GENERIC_VALUE_M;
3708 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3710 /* call the fill switch rule to fill the lookup Tx Rx structure */
3711 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3712 ice_aqc_opc_update_sw_rules);
3714 /* Update the action to point to the large action ID */
3715 rx_tx->pdata.lkup_tx_rx.act =
3716 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3717 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3718 ICE_SINGLE_ACT_PTR_VAL_M));
3720 /* Use the filter rule ID of the previously created rule with single
3721 * act. Once the update happens, hardware will treat this as large
3724 rx_tx->pdata.lkup_tx_rx.index =
3725 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3727 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3728 ice_aqc_opc_update_sw_rules, NULL);
3730 m_ent->lg_act_idx = l_id;
3731 m_ent->sw_marker_id = sw_marker;
3734 ice_free(hw, lg_act);
3739 * ice_add_counter_act - add/update filter rule with counter action
3740 * @hw: pointer to the hardware structure
3741 * @m_ent: the management entry for which counter needs to be added
3742 * @counter_id: VLAN counter ID returned as part of allocate resource
3743 * @l_id: large action resource ID
3745 static enum ice_status
3746 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3747 u16 counter_id, u16 l_id)
3749 struct ice_aqc_sw_rules_elem *lg_act;
3750 struct ice_aqc_sw_rules_elem *rx_tx;
3751 enum ice_status status;
3752 /* 2 actions will be added while adding a large action counter */
3753 const int num_acts = 2;
3760 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3761 return ICE_ERR_PARAM;
3763 /* Create two back-to-back switch rules and submit them to the HW using
3764 * one memory buffer:
3768 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3769 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3770 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3772 return ICE_ERR_NO_MEMORY;
3774 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3776 /* Fill in the first switch rule i.e. large action */
3777 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3778 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3779 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3781 /* First action VSI forwarding or VSI list forwarding depending on how
3784 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3785 m_ent->fltr_info.fwd_id.hw_vsi_id;
3787 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3788 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3789 ICE_LG_ACT_VSI_LIST_ID_M;
3790 if (m_ent->vsi_count > 1)
3791 act |= ICE_LG_ACT_VSI_LIST;
3792 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3794 /* Second action counter ID */
3795 act = ICE_LG_ACT_STAT_COUNT;
3796 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3797 ICE_LG_ACT_STAT_COUNT_M;
3798 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3800 /* call the fill switch rule to fill the lookup Tx Rx structure */
3801 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3802 ice_aqc_opc_update_sw_rules);
3804 act = ICE_SINGLE_ACT_PTR;
3805 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3806 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3808 /* Use the filter rule ID of the previously created rule with single
3809 * act. Once the update happens, hardware will treat this as large
3812 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3813 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3815 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3816 ice_aqc_opc_update_sw_rules, NULL);
3818 m_ent->lg_act_idx = l_id;
3819 m_ent->counter_index = counter_id;
3822 ice_free(hw, lg_act);
3827 * ice_create_vsi_list_map
3828 * @hw: pointer to the hardware structure
3829 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3830 * @num_vsi: number of VSI handles in the array
3831 * @vsi_list_id: VSI list ID generated as part of allocate resource
3833 * Helper function to create a new entry of VSI list ID to VSI mapping
3834 * using the given VSI list ID
3836 static struct ice_vsi_list_map_info *
3837 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3840 struct ice_switch_info *sw = hw->switch_info;
3841 struct ice_vsi_list_map_info *v_map;
3844 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3848 v_map->vsi_list_id = vsi_list_id;
3850 for (i = 0; i < num_vsi; i++)
3851 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3853 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3858 * ice_update_vsi_list_rule
3859 * @hw: pointer to the hardware structure
3860 * @vsi_handle_arr: array of VSI handles to form a VSI list
3861 * @num_vsi: number of VSI handles in the array
3862 * @vsi_list_id: VSI list ID generated as part of allocate resource
3863 * @remove: Boolean value to indicate if this is a remove action
3864 * @opc: switch rules population command type - pass in the command opcode
3865 * @lkup_type: lookup type of the filter
3867 * Call AQ command to add a new switch rule or update existing switch rule
3868 * using the given VSI list ID
3870 static enum ice_status
3871 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3872 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3873 enum ice_sw_lkup_type lkup_type)
3875 struct ice_aqc_sw_rules_elem *s_rule;
3876 enum ice_status status;
3882 return ICE_ERR_PARAM;
3884 if (lkup_type == ICE_SW_LKUP_MAC ||
3885 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3886 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3887 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3888 lkup_type == ICE_SW_LKUP_PROMISC ||
3889 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3890 lkup_type == ICE_SW_LKUP_LAST)
3891 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3892 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3893 else if (lkup_type == ICE_SW_LKUP_VLAN)
3894 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3895 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3897 return ICE_ERR_PARAM;
3899 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3900 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3902 return ICE_ERR_NO_MEMORY;
3903 for (i = 0; i < num_vsi; i++) {
3904 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3905 status = ICE_ERR_PARAM;
3908 /* AQ call requires hw_vsi_id(s) */
3909 s_rule->pdata.vsi_list.vsi[i] =
3910 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3913 s_rule->type = CPU_TO_LE16(rule_type);
3914 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3915 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3917 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3920 ice_free(hw, s_rule);
3925 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3926 * @hw: pointer to the HW struct
3927 * @vsi_handle_arr: array of VSI handles to form a VSI list
3928 * @num_vsi: number of VSI handles in the array
3929 * @vsi_list_id: stores the ID of the VSI list to be created
3930 * @lkup_type: switch rule filter's lookup type
3932 static enum ice_status
3933 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3934 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3936 enum ice_status status;
3938 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3939 ice_aqc_opc_alloc_res);
3943 /* Update the newly created VSI list to include the specified VSIs */
3944 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3945 *vsi_list_id, false,
3946 ice_aqc_opc_add_sw_rules, lkup_type);
3950 * ice_create_pkt_fwd_rule
3951 * @hw: pointer to the hardware structure
3952 * @recp_list: corresponding filter management list
3953 * @f_entry: entry containing packet forwarding information
3955 * Create switch rule with given filter information and add an entry
3956 * to the corresponding filter management list to track this switch rule
3959 static enum ice_status
3960 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3961 struct ice_fltr_list_entry *f_entry)
3963 struct ice_fltr_mgmt_list_entry *fm_entry;
3964 struct ice_aqc_sw_rules_elem *s_rule;
3965 enum ice_status status;
3967 s_rule = (struct ice_aqc_sw_rules_elem *)
3968 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3970 return ICE_ERR_NO_MEMORY;
3971 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3972 ice_malloc(hw, sizeof(*fm_entry));
3974 status = ICE_ERR_NO_MEMORY;
3975 goto ice_create_pkt_fwd_rule_exit;
3978 fm_entry->fltr_info = f_entry->fltr_info;
3980 /* Initialize all the fields for the management entry */
3981 fm_entry->vsi_count = 1;
3982 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3983 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3984 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3986 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3987 ice_aqc_opc_add_sw_rules);
3989 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3990 ice_aqc_opc_add_sw_rules, NULL);
3992 ice_free(hw, fm_entry);
3993 goto ice_create_pkt_fwd_rule_exit;
3996 f_entry->fltr_info.fltr_rule_id =
3997 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3998 fm_entry->fltr_info.fltr_rule_id =
3999 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4001 /* The book keeping entries will get removed when base driver
4002 * calls remove filter AQ command
4004 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4006 ice_create_pkt_fwd_rule_exit:
4007 ice_free(hw, s_rule);
4012 * ice_update_pkt_fwd_rule
4013 * @hw: pointer to the hardware structure
4014 * @f_info: filter information for switch rule
4016 * Call AQ command to update a previously created switch rule with a
4019 static enum ice_status
4020 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4022 struct ice_aqc_sw_rules_elem *s_rule;
4023 enum ice_status status;
4025 s_rule = (struct ice_aqc_sw_rules_elem *)
4026 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4028 return ICE_ERR_NO_MEMORY;
4030 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4032 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4034 /* Update switch rule with new rule set to forward VSI list */
4035 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4036 ice_aqc_opc_update_sw_rules, NULL);
4038 ice_free(hw, s_rule);
4043 * ice_update_sw_rule_bridge_mode
4044 * @hw: pointer to the HW struct
4046 * Updates unicast switch filter rules based on VEB/VEPA mode
4048 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4050 struct ice_switch_info *sw = hw->switch_info;
4051 struct ice_fltr_mgmt_list_entry *fm_entry;
4052 enum ice_status status = ICE_SUCCESS;
4053 struct LIST_HEAD_TYPE *rule_head;
4054 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4056 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4057 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4059 ice_acquire_lock(rule_lock);
4060 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4062 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4063 u8 *addr = fi->l_data.mac.mac_addr;
4065 /* Update unicast Tx rules to reflect the selected
4068 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4069 (fi->fltr_act == ICE_FWD_TO_VSI ||
4070 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4071 fi->fltr_act == ICE_FWD_TO_Q ||
4072 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4073 status = ice_update_pkt_fwd_rule(hw, fi);
4079 ice_release_lock(rule_lock);
4085 * ice_add_update_vsi_list
4086 * @hw: pointer to the hardware structure
4087 * @m_entry: pointer to current filter management list entry
4088 * @cur_fltr: filter information from the book keeping entry
4089 * @new_fltr: filter information with the new VSI to be added
4091 * Call AQ command to add or update previously created VSI list with new VSI.
4093 * Helper function to do book keeping associated with adding filter information
4094 * The algorithm to do the book keeping is described below :
4095 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4096 * if only one VSI has been added till now
4097 * Allocate a new VSI list and add two VSIs
4098 * to this list using switch rule command
4099 * Update the previously created switch rule with the
4100 * newly created VSI list ID
4101 * if a VSI list was previously created
4102 * Add the new VSI to the previously created VSI list set
4103 * using the update switch rule command
4105 static enum ice_status
4106 ice_add_update_vsi_list(struct ice_hw *hw,
4107 struct ice_fltr_mgmt_list_entry *m_entry,
4108 struct ice_fltr_info *cur_fltr,
4109 struct ice_fltr_info *new_fltr)
4111 enum ice_status status = ICE_SUCCESS;
4112 u16 vsi_list_id = 0;
4114 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4115 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4116 return ICE_ERR_NOT_IMPL;
4118 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4119 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4120 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4121 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4122 return ICE_ERR_NOT_IMPL;
4124 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4125 /* Only one entry existed in the mapping and it was not already
4126 * a part of a VSI list. So, create a VSI list with the old and
4129 struct ice_fltr_info tmp_fltr;
4130 u16 vsi_handle_arr[2];
4132 /* A rule already exists with the new VSI being added */
4133 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4134 return ICE_ERR_ALREADY_EXISTS;
4136 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4137 vsi_handle_arr[1] = new_fltr->vsi_handle;
4138 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4140 new_fltr->lkup_type);
4144 tmp_fltr = *new_fltr;
4145 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4146 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4147 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4148 /* Update the previous switch rule of "MAC forward to VSI" to
4149 * "MAC fwd to VSI list"
4151 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4155 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4156 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4157 m_entry->vsi_list_info =
4158 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4161 if (!m_entry->vsi_list_info)
4162 return ICE_ERR_NO_MEMORY;
4164 /* If this entry was large action then the large action needs
4165 * to be updated to point to FWD to VSI list
4167 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4169 ice_add_marker_act(hw, m_entry,
4170 m_entry->sw_marker_id,
4171 m_entry->lg_act_idx);
4173 u16 vsi_handle = new_fltr->vsi_handle;
4174 enum ice_adminq_opc opcode;
4176 if (!m_entry->vsi_list_info)
4179 /* A rule already exists with the new VSI being added */
4180 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4183 /* Update the previously created VSI list set with
4184 * the new VSI ID passed in
4186 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4187 opcode = ice_aqc_opc_update_sw_rules;
4189 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4190 vsi_list_id, false, opcode,
4191 new_fltr->lkup_type);
4192 /* update VSI list mapping info with new VSI ID */
4194 ice_set_bit(vsi_handle,
4195 m_entry->vsi_list_info->vsi_map);
4198 m_entry->vsi_count++;
4203 * ice_find_rule_entry - Search a rule entry
4204 * @list_head: head of rule list
4205 * @f_info: rule information
4207 * Helper function to search for a given rule entry
4208 * Returns pointer to entry storing the rule if found
4210 static struct ice_fltr_mgmt_list_entry *
4211 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4212 struct ice_fltr_info *f_info)
4214 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4216 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4218 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4219 sizeof(f_info->l_data)) &&
4220 f_info->flag == list_itr->fltr_info.flag) {
4229 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4230 * @recp_list: VSI lists needs to be searched
4231 * @vsi_handle: VSI handle to be found in VSI list
4232 * @vsi_list_id: VSI list ID found containing vsi_handle
4234 * Helper function to search a VSI list with single entry containing given VSI
4235 * handle element. This can be extended further to search VSI list with more
4236 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4238 static struct ice_vsi_list_map_info *
4239 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4242 struct ice_vsi_list_map_info *map_info = NULL;
4243 struct LIST_HEAD_TYPE *list_head;
4245 list_head = &recp_list->filt_rules;
4246 if (recp_list->adv_rule) {
4247 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4249 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4250 ice_adv_fltr_mgmt_list_entry,
4252 if (list_itr->vsi_list_info) {
4253 map_info = list_itr->vsi_list_info;
4254 if (ice_is_bit_set(map_info->vsi_map,
4256 *vsi_list_id = map_info->vsi_list_id;
4262 struct ice_fltr_mgmt_list_entry *list_itr;
4264 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4265 ice_fltr_mgmt_list_entry,
4267 if (list_itr->vsi_count == 1 &&
4268 list_itr->vsi_list_info) {
4269 map_info = list_itr->vsi_list_info;
4270 if (ice_is_bit_set(map_info->vsi_map,
4272 *vsi_list_id = map_info->vsi_list_id;
4282 * ice_add_rule_internal - add rule for a given lookup type
4283 * @hw: pointer to the hardware structure
4284 * @recp_list: recipe list for which rule has to be added
4285 * @lport: logic port number on which function add rule
4286 * @f_entry: structure containing MAC forwarding information
4288 * Adds or updates the rule lists for a given recipe
4290 static enum ice_status
4291 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4292 u8 lport, struct ice_fltr_list_entry *f_entry)
4294 struct ice_fltr_info *new_fltr, *cur_fltr;
4295 struct ice_fltr_mgmt_list_entry *m_entry;
4296 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4297 enum ice_status status = ICE_SUCCESS;
4299 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4300 return ICE_ERR_PARAM;
4302 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4303 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4304 f_entry->fltr_info.fwd_id.hw_vsi_id =
4305 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4307 rule_lock = &recp_list->filt_rule_lock;
4309 ice_acquire_lock(rule_lock);
4310 new_fltr = &f_entry->fltr_info;
4311 if (new_fltr->flag & ICE_FLTR_RX)
4312 new_fltr->src = lport;
4313 else if (new_fltr->flag & ICE_FLTR_TX)
4315 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4317 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4319 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4320 goto exit_add_rule_internal;
4323 cur_fltr = &m_entry->fltr_info;
4324 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4326 exit_add_rule_internal:
4327 ice_release_lock(rule_lock);
4332 * ice_remove_vsi_list_rule
4333 * @hw: pointer to the hardware structure
4334 * @vsi_list_id: VSI list ID generated as part of allocate resource
4335 * @lkup_type: switch rule filter lookup type
4337 * The VSI list should be emptied before this function is called to remove the
4340 static enum ice_status
4341 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4342 enum ice_sw_lkup_type lkup_type)
4344 /* Free the vsi_list resource that we allocated. It is assumed that the
4345 * list is empty at this point.
4347 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4348 ice_aqc_opc_free_res);
4352 * ice_rem_update_vsi_list
4353 * @hw: pointer to the hardware structure
4354 * @vsi_handle: VSI handle of the VSI to remove
4355 * @fm_list: filter management entry for which the VSI list management needs to
4358 static enum ice_status
4359 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4360 struct ice_fltr_mgmt_list_entry *fm_list)
4362 enum ice_sw_lkup_type lkup_type;
4363 enum ice_status status = ICE_SUCCESS;
4366 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4367 fm_list->vsi_count == 0)
4368 return ICE_ERR_PARAM;
4370 /* A rule with the VSI being removed does not exist */
4371 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4372 return ICE_ERR_DOES_NOT_EXIST;
4374 lkup_type = fm_list->fltr_info.lkup_type;
4375 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4376 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4377 ice_aqc_opc_update_sw_rules,
4382 fm_list->vsi_count--;
4383 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4385 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4386 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4387 struct ice_vsi_list_map_info *vsi_list_info =
4388 fm_list->vsi_list_info;
4391 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4393 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4394 return ICE_ERR_OUT_OF_RANGE;
4396 /* Make sure VSI list is empty before removing it below */
4397 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4399 ice_aqc_opc_update_sw_rules,
4404 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4405 tmp_fltr_info.fwd_id.hw_vsi_id =
4406 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4407 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4408 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4410 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4411 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4415 fm_list->fltr_info = tmp_fltr_info;
4418 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4419 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4420 struct ice_vsi_list_map_info *vsi_list_info =
4421 fm_list->vsi_list_info;
4423 /* Remove the VSI list since it is no longer used */
4424 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4426 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4427 vsi_list_id, status);
4431 LIST_DEL(&vsi_list_info->list_entry);
4432 ice_free(hw, vsi_list_info);
4433 fm_list->vsi_list_info = NULL;
4440 * ice_remove_rule_internal - Remove a filter rule of a given type
4442 * @hw: pointer to the hardware structure
4443 * @recp_list: recipe list for which the rule needs to removed
4444 * @f_entry: rule entry containing filter information
4446 static enum ice_status
4447 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4448 struct ice_fltr_list_entry *f_entry)
4450 struct ice_fltr_mgmt_list_entry *list_elem;
4451 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4452 enum ice_status status = ICE_SUCCESS;
4453 bool remove_rule = false;
4456 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4457 return ICE_ERR_PARAM;
4458 f_entry->fltr_info.fwd_id.hw_vsi_id =
4459 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4461 rule_lock = &recp_list->filt_rule_lock;
4462 ice_acquire_lock(rule_lock);
4463 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4464 &f_entry->fltr_info);
4466 status = ICE_ERR_DOES_NOT_EXIST;
4470 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4472 } else if (!list_elem->vsi_list_info) {
4473 status = ICE_ERR_DOES_NOT_EXIST;
4475 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4476 /* a ref_cnt > 1 indicates that the vsi_list is being
4477 * shared by multiple rules. Decrement the ref_cnt and
4478 * remove this rule, but do not modify the list, as it
4479 * is in-use by other rules.
4481 list_elem->vsi_list_info->ref_cnt--;
4484 /* a ref_cnt of 1 indicates the vsi_list is only used
4485 * by one rule. However, the original removal request is only
4486 * for a single VSI. Update the vsi_list first, and only
4487 * remove the rule if there are no further VSIs in this list.
4489 vsi_handle = f_entry->fltr_info.vsi_handle;
4490 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4493 /* if VSI count goes to zero after updating the VSI list */
4494 if (list_elem->vsi_count == 0)
4499 /* Remove the lookup rule */
4500 struct ice_aqc_sw_rules_elem *s_rule;
4502 s_rule = (struct ice_aqc_sw_rules_elem *)
4503 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4505 status = ICE_ERR_NO_MEMORY;
4509 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4510 ice_aqc_opc_remove_sw_rules);
4512 status = ice_aq_sw_rules(hw, s_rule,
4513 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4514 ice_aqc_opc_remove_sw_rules, NULL);
4516 /* Remove a book keeping from the list */
4517 ice_free(hw, s_rule);
4522 LIST_DEL(&list_elem->list_entry);
4523 ice_free(hw, list_elem);
4526 ice_release_lock(rule_lock);
4531 * ice_aq_get_res_alloc - get allocated resources
4532 * @hw: pointer to the HW struct
4533 * @num_entries: pointer to u16 to store the number of resource entries returned
4534 * @buf: pointer to buffer
4535 * @buf_size: size of buf
4536 * @cd: pointer to command details structure or NULL
4538 * The caller-supplied buffer must be large enough to store the resource
4539 * information for all resource types. Each resource type is an
4540 * ice_aqc_get_res_resp_elem structure.
4543 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4544 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4545 struct ice_sq_cd *cd)
4547 struct ice_aqc_get_res_alloc *resp;
4548 enum ice_status status;
4549 struct ice_aq_desc desc;
4552 return ICE_ERR_BAD_PTR;
4554 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4555 return ICE_ERR_INVAL_SIZE;
4557 resp = &desc.params.get_res;
4559 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4560 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4562 if (!status && num_entries)
4563 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4569 * ice_aq_get_res_descs - get allocated resource descriptors
4570 * @hw: pointer to the hardware structure
4571 * @num_entries: number of resource entries in buffer
4572 * @buf: structure to hold response data buffer
4573 * @buf_size: size of buffer
4574 * @res_type: resource type
4575 * @res_shared: is resource shared
4576 * @desc_id: input - first desc ID to start; output - next desc ID
4577 * @cd: pointer to command details structure or NULL
4580 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4581 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4582 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4584 struct ice_aqc_get_allocd_res_desc *cmd;
4585 struct ice_aq_desc desc;
4586 enum ice_status status;
4588 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4590 cmd = &desc.params.get_res_desc;
4593 return ICE_ERR_PARAM;
4595 if (buf_size != (num_entries * sizeof(*buf)))
4596 return ICE_ERR_PARAM;
4598 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4600 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4601 ICE_AQC_RES_TYPE_M) | (res_shared ?
4602 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4603 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4605 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4607 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4613 * ice_add_mac_rule - Add a MAC address based filter rule
4614 * @hw: pointer to the hardware structure
4615 * @m_list: list of MAC addresses and forwarding information
4616 * @sw: pointer to switch info struct for which function add rule
4617 * @lport: logic port number on which function add rule
4619 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4620 * multiple unicast addresses, the function assumes that all the
4621 * addresses are unique in a given add_mac call. It doesn't
4622 * check for duplicates in this case, removing duplicates from a given
4623 * list should be taken care of in the caller of this function.
4625 static enum ice_status
4626 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4627 struct ice_switch_info *sw, u8 lport)
4629 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4630 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4631 struct ice_fltr_list_entry *m_list_itr;
4632 struct LIST_HEAD_TYPE *rule_head;
4633 u16 total_elem_left, s_rule_size;
4634 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4635 enum ice_status status = ICE_SUCCESS;
4636 u16 num_unicast = 0;
4640 rule_lock = &recp_list->filt_rule_lock;
4641 rule_head = &recp_list->filt_rules;
4643 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4645 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4649 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4650 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4651 if (!ice_is_vsi_valid(hw, vsi_handle))
4652 return ICE_ERR_PARAM;
4653 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4654 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4655 /* update the src in case it is VSI num */
4656 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4657 return ICE_ERR_PARAM;
4658 m_list_itr->fltr_info.src = hw_vsi_id;
4659 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4660 IS_ZERO_ETHER_ADDR(add))
4661 return ICE_ERR_PARAM;
4662 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4663 /* Don't overwrite the unicast address */
4664 ice_acquire_lock(rule_lock);
4665 if (ice_find_rule_entry(rule_head,
4666 &m_list_itr->fltr_info)) {
4667 ice_release_lock(rule_lock);
4668 return ICE_ERR_ALREADY_EXISTS;
4670 ice_release_lock(rule_lock);
4672 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4673 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4674 m_list_itr->status =
4675 ice_add_rule_internal(hw, recp_list, lport,
4677 if (m_list_itr->status)
4678 return m_list_itr->status;
4682 ice_acquire_lock(rule_lock);
4683 /* Exit if no suitable entries were found for adding bulk switch rule */
4685 status = ICE_SUCCESS;
4686 goto ice_add_mac_exit;
4689 /* Allocate switch rule buffer for the bulk update for unicast */
4690 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4691 s_rule = (struct ice_aqc_sw_rules_elem *)
4692 ice_calloc(hw, num_unicast, s_rule_size);
4694 status = ICE_ERR_NO_MEMORY;
4695 goto ice_add_mac_exit;
4699 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4701 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4702 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4704 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4705 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4706 ice_aqc_opc_add_sw_rules);
4707 r_iter = (struct ice_aqc_sw_rules_elem *)
4708 ((u8 *)r_iter + s_rule_size);
4712 /* Call AQ bulk switch rule update for all unicast addresses */
4714 /* Call AQ switch rule in AQ_MAX chunk */
4715 for (total_elem_left = num_unicast; total_elem_left > 0;
4716 total_elem_left -= elem_sent) {
4717 struct ice_aqc_sw_rules_elem *entry = r_iter;
4719 elem_sent = MIN_T(u8, total_elem_left,
4720 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4721 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4722 elem_sent, ice_aqc_opc_add_sw_rules,
4725 goto ice_add_mac_exit;
4726 r_iter = (struct ice_aqc_sw_rules_elem *)
4727 ((u8 *)r_iter + (elem_sent * s_rule_size));
4730 /* Fill up rule ID based on the value returned from FW */
4732 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4734 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4735 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4736 struct ice_fltr_mgmt_list_entry *fm_entry;
4738 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4739 f_info->fltr_rule_id =
4740 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4741 f_info->fltr_act = ICE_FWD_TO_VSI;
4742 /* Create an entry to track this MAC address */
4743 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4744 ice_malloc(hw, sizeof(*fm_entry));
4746 status = ICE_ERR_NO_MEMORY;
4747 goto ice_add_mac_exit;
4749 fm_entry->fltr_info = *f_info;
4750 fm_entry->vsi_count = 1;
4751 /* The book keeping entries will get removed when
4752 * base driver calls remove filter AQ command
4755 LIST_ADD(&fm_entry->list_entry, rule_head);
4756 r_iter = (struct ice_aqc_sw_rules_elem *)
4757 ((u8 *)r_iter + s_rule_size);
4762 ice_release_lock(rule_lock);
4764 ice_free(hw, s_rule);
4769 * ice_add_mac - Add a MAC address based filter rule
4770 * @hw: pointer to the hardware structure
4771 * @m_list: list of MAC addresses and forwarding information
4773 * Function add MAC rule for logical port from HW struct
4775 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4778 return ICE_ERR_PARAM;
4780 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4781 hw->port_info->lport);
4785 * ice_add_vlan_internal - Add one VLAN based filter rule
4786 * @hw: pointer to the hardware structure
4787 * @recp_list: recipe list for which rule has to be added
4788 * @f_entry: filter entry containing one VLAN information
4790 static enum ice_status
4791 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4792 struct ice_fltr_list_entry *f_entry)
4794 struct ice_fltr_mgmt_list_entry *v_list_itr;
4795 struct ice_fltr_info *new_fltr, *cur_fltr;
4796 enum ice_sw_lkup_type lkup_type;
4797 u16 vsi_list_id = 0, vsi_handle;
4798 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4799 enum ice_status status = ICE_SUCCESS;
4801 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4802 return ICE_ERR_PARAM;
4804 f_entry->fltr_info.fwd_id.hw_vsi_id =
4805 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4806 new_fltr = &f_entry->fltr_info;
4808 /* VLAN ID should only be 12 bits */
4809 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4810 return ICE_ERR_PARAM;
4812 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4813 return ICE_ERR_PARAM;
4815 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4816 lkup_type = new_fltr->lkup_type;
4817 vsi_handle = new_fltr->vsi_handle;
4818 rule_lock = &recp_list->filt_rule_lock;
4819 ice_acquire_lock(rule_lock);
4820 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4822 struct ice_vsi_list_map_info *map_info = NULL;
4824 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4825 /* All VLAN pruning rules use a VSI list. Check if
4826 * there is already a VSI list containing VSI that we
4827 * want to add. If found, use the same vsi_list_id for
4828 * this new VLAN rule or else create a new list.
4830 map_info = ice_find_vsi_list_entry(recp_list,
4834 status = ice_create_vsi_list_rule(hw,
4842 /* Convert the action to forwarding to a VSI list. */
4843 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4844 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4847 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4849 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4852 status = ICE_ERR_DOES_NOT_EXIST;
4855 /* reuse VSI list for new rule and increment ref_cnt */
4857 v_list_itr->vsi_list_info = map_info;
4858 map_info->ref_cnt++;
4860 v_list_itr->vsi_list_info =
4861 ice_create_vsi_list_map(hw, &vsi_handle,
4865 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4866 /* Update existing VSI list to add new VSI ID only if it used
4869 cur_fltr = &v_list_itr->fltr_info;
4870 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4873 /* If VLAN rule exists and VSI list being used by this rule is
4874 * referenced by more than 1 VLAN rule. Then create a new VSI
4875 * list appending previous VSI with new VSI and update existing
4876 * VLAN rule to point to new VSI list ID
4878 struct ice_fltr_info tmp_fltr;
4879 u16 vsi_handle_arr[2];
4882 /* Current implementation only supports reusing VSI list with
4883 * one VSI count. We should never hit below condition
4885 if (v_list_itr->vsi_count > 1 &&
4886 v_list_itr->vsi_list_info->ref_cnt > 1) {
4887 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4888 status = ICE_ERR_CFG;
4893 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4896 /* A rule already exists with the new VSI being added */
4897 if (cur_handle == vsi_handle) {
4898 status = ICE_ERR_ALREADY_EXISTS;
4902 vsi_handle_arr[0] = cur_handle;
4903 vsi_handle_arr[1] = vsi_handle;
4904 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4905 &vsi_list_id, lkup_type);
4909 tmp_fltr = v_list_itr->fltr_info;
4910 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4911 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4912 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4913 /* Update the previous switch rule to a new VSI list which
4914 * includes current VSI that is requested
4916 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4920 /* before overriding VSI list map info. decrement ref_cnt of
4923 v_list_itr->vsi_list_info->ref_cnt--;
4925 /* now update to newly created list */
4926 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4927 v_list_itr->vsi_list_info =
4928 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4930 v_list_itr->vsi_count++;
4934 ice_release_lock(rule_lock);
4939 * ice_add_vlan_rule - Add VLAN based filter rule
4940 * @hw: pointer to the hardware structure
4941 * @v_list: list of VLAN entries and forwarding information
4942 * @sw: pointer to switch info struct for which function add rule
4944 static enum ice_status
4945 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4946 struct ice_switch_info *sw)
4948 struct ice_fltr_list_entry *v_list_itr;
4949 struct ice_sw_recipe *recp_list;
4951 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4952 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4954 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4955 return ICE_ERR_PARAM;
4956 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4957 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4959 if (v_list_itr->status)
4960 return v_list_itr->status;
4966 * ice_add_vlan - Add a VLAN based filter rule
4967 * @hw: pointer to the hardware structure
4968 * @v_list: list of VLAN and forwarding information
4970 * Function add VLAN rule for logical port from HW struct
4972 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4975 return ICE_ERR_PARAM;
4977 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4981 * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
4982 * @hw: pointer to the hardware structure
4983 * @mv_list: list of MAC and VLAN filters
4984 * @sw: pointer to switch info struct for which function add rule
4985 * @lport: logic port number on which function add rule
4987 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4988 * pruning bits enabled, then it is the responsibility of the caller to make
4989 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4990 * VLAN won't be received on that VSI otherwise.
4992 static enum ice_status
4993 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4994 struct ice_switch_info *sw, u8 lport)
4996 struct ice_fltr_list_entry *mv_list_itr;
4997 struct ice_sw_recipe *recp_list;
4999 if (!mv_list || !hw)
5000 return ICE_ERR_PARAM;
5002 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5003 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5005 enum ice_sw_lkup_type l_type =
5006 mv_list_itr->fltr_info.lkup_type;
5008 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5009 return ICE_ERR_PARAM;
5010 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5011 mv_list_itr->status =
5012 ice_add_rule_internal(hw, recp_list, lport,
5014 if (mv_list_itr->status)
5015 return mv_list_itr->status;
5021 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5022 * @hw: pointer to the hardware structure
5023 * @mv_list: list of MAC VLAN addresses and forwarding information
5025 * Function add MAC VLAN rule for logical port from HW struct
5028 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5030 if (!mv_list || !hw)
5031 return ICE_ERR_PARAM;
5033 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5034 hw->port_info->lport);
5038 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5039 * @hw: pointer to the hardware structure
5040 * @em_list: list of ether type MAC filter, MAC is optional
5041 * @sw: pointer to switch info struct for which function add rule
5042 * @lport: logic port number on which function add rule
5044 * This function requires the caller to populate the entries in
5045 * the filter list with the necessary fields (including flags to
5046 * indicate Tx or Rx rules).
5048 static enum ice_status
5049 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5050 struct ice_switch_info *sw, u8 lport)
5052 struct ice_fltr_list_entry *em_list_itr;
5054 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5056 struct ice_sw_recipe *recp_list;
5057 enum ice_sw_lkup_type l_type;
5059 l_type = em_list_itr->fltr_info.lkup_type;
5060 recp_list = &sw->recp_list[l_type];
5062 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5063 l_type != ICE_SW_LKUP_ETHERTYPE)
5064 return ICE_ERR_PARAM;
5066 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5069 if (em_list_itr->status)
5070 return em_list_itr->status;
5076 * ice_add_eth_mac - Add a ethertype based filter rule
5077 * @hw: pointer to the hardware structure
5078 * @em_list: list of ethertype and forwarding information
5080 * Function add ethertype rule for logical port from HW struct
5083 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5085 if (!em_list || !hw)
5086 return ICE_ERR_PARAM;
5088 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5089 hw->port_info->lport);
5093 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5094 * @hw: pointer to the hardware structure
5095 * @em_list: list of ethertype or ethertype MAC entries
5096 * @sw: pointer to switch info struct for which function add rule
5098 static enum ice_status
5099 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5100 struct ice_switch_info *sw)
5102 struct ice_fltr_list_entry *em_list_itr, *tmp;
5104 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5106 struct ice_sw_recipe *recp_list;
5107 enum ice_sw_lkup_type l_type;
5109 l_type = em_list_itr->fltr_info.lkup_type;
5111 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5112 l_type != ICE_SW_LKUP_ETHERTYPE)
5113 return ICE_ERR_PARAM;
5115 recp_list = &sw->recp_list[l_type];
5116 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5118 if (em_list_itr->status)
5119 return em_list_itr->status;
5125 * ice_remove_eth_mac - remove a ethertype based filter rule
5126 * @hw: pointer to the hardware structure
5127 * @em_list: list of ethertype and forwarding information
5131 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5133 if (!em_list || !hw)
5134 return ICE_ERR_PARAM;
5136 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5140 * ice_rem_sw_rule_info
5141 * @hw: pointer to the hardware structure
5142 * @rule_head: pointer to the switch list structure that we want to delete
5145 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5147 if (!LIST_EMPTY(rule_head)) {
5148 struct ice_fltr_mgmt_list_entry *entry;
5149 struct ice_fltr_mgmt_list_entry *tmp;
5151 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5152 ice_fltr_mgmt_list_entry, list_entry) {
5153 LIST_DEL(&entry->list_entry);
5154 ice_free(hw, entry);
5160 * ice_rem_adv_rule_info
5161 * @hw: pointer to the hardware structure
5162 * @rule_head: pointer to the switch list structure that we want to delete
5165 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5167 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5168 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5170 if (LIST_EMPTY(rule_head))
5173 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5174 ice_adv_fltr_mgmt_list_entry, list_entry) {
5175 LIST_DEL(&lst_itr->list_entry);
5176 ice_free(hw, lst_itr->lkups);
5177 ice_free(hw, lst_itr);
5182 * ice_rem_all_sw_rules_info
5183 * @hw: pointer to the hardware structure
5185 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5187 struct ice_switch_info *sw = hw->switch_info;
5190 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5191 struct LIST_HEAD_TYPE *rule_head;
5193 rule_head = &sw->recp_list[i].filt_rules;
5194 if (!sw->recp_list[i].adv_rule)
5195 ice_rem_sw_rule_info(hw, rule_head);
5197 ice_rem_adv_rule_info(hw, rule_head);
5198 if (sw->recp_list[i].adv_rule &&
5199 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5200 sw->recp_list[i].adv_rule = false;
5205 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5206 * @pi: pointer to the port_info structure
5207 * @vsi_handle: VSI handle to set as default
5208 * @set: true to add the above mentioned switch rule, false to remove it
5209 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5211 * add filter rule to set/unset given VSI as default VSI for the switch
5212 * (represented by swid)
5215 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5218 struct ice_aqc_sw_rules_elem *s_rule;
5219 struct ice_fltr_info f_info;
5220 struct ice_hw *hw = pi->hw;
5221 enum ice_adminq_opc opcode;
5222 enum ice_status status;
5226 if (!ice_is_vsi_valid(hw, vsi_handle))
5227 return ICE_ERR_PARAM;
5228 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5230 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5231 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5233 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5235 return ICE_ERR_NO_MEMORY;
5237 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5239 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5240 f_info.flag = direction;
5241 f_info.fltr_act = ICE_FWD_TO_VSI;
5242 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5244 if (f_info.flag & ICE_FLTR_RX) {
5245 f_info.src = pi->lport;
5246 f_info.src_id = ICE_SRC_ID_LPORT;
5248 f_info.fltr_rule_id =
5249 pi->dflt_rx_vsi_rule_id;
5250 } else if (f_info.flag & ICE_FLTR_TX) {
5251 f_info.src_id = ICE_SRC_ID_VSI;
5252 f_info.src = hw_vsi_id;
5254 f_info.fltr_rule_id =
5255 pi->dflt_tx_vsi_rule_id;
5259 opcode = ice_aqc_opc_add_sw_rules;
5261 opcode = ice_aqc_opc_remove_sw_rules;
5263 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5265 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5266 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5269 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5271 if (f_info.flag & ICE_FLTR_TX) {
5272 pi->dflt_tx_vsi_num = hw_vsi_id;
5273 pi->dflt_tx_vsi_rule_id = index;
5274 } else if (f_info.flag & ICE_FLTR_RX) {
5275 pi->dflt_rx_vsi_num = hw_vsi_id;
5276 pi->dflt_rx_vsi_rule_id = index;
5279 if (f_info.flag & ICE_FLTR_TX) {
5280 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5281 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5282 } else if (f_info.flag & ICE_FLTR_RX) {
5283 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5284 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5289 ice_free(hw, s_rule);
5294 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5295 * @list_head: head of rule list
5296 * @f_info: rule information
5298 * Helper function to search for a unicast rule entry - this is to be used
5299 * to remove unicast MAC filter that is not shared with other VSIs on the
5302 * Returns pointer to entry storing the rule if found
5304 static struct ice_fltr_mgmt_list_entry *
5305 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5306 struct ice_fltr_info *f_info)
5308 struct ice_fltr_mgmt_list_entry *list_itr;
5310 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5312 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5313 sizeof(f_info->l_data)) &&
5314 f_info->fwd_id.hw_vsi_id ==
5315 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5316 f_info->flag == list_itr->fltr_info.flag)
5323 * ice_remove_mac_rule - remove a MAC based filter rule
5324 * @hw: pointer to the hardware structure
5325 * @m_list: list of MAC addresses and forwarding information
5326 * @recp_list: list from which function remove MAC address
5328 * This function removes either a MAC filter rule or a specific VSI from a
5329 * VSI list for a multicast MAC address.
5331 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5332 * ice_add_mac. Caller should be aware that this call will only work if all
5333 * the entries passed into m_list were added previously. It will not attempt to
5334 * do a partial remove of entries that were found.
5336 static enum ice_status
5337 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5338 struct ice_sw_recipe *recp_list)
5340 struct ice_fltr_list_entry *list_itr, *tmp;
5341 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5344 return ICE_ERR_PARAM;
5346 rule_lock = &recp_list->filt_rule_lock;
5347 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5349 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5350 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5353 if (l_type != ICE_SW_LKUP_MAC)
5354 return ICE_ERR_PARAM;
5356 vsi_handle = list_itr->fltr_info.vsi_handle;
5357 if (!ice_is_vsi_valid(hw, vsi_handle))
5358 return ICE_ERR_PARAM;
5360 list_itr->fltr_info.fwd_id.hw_vsi_id =
5361 ice_get_hw_vsi_num(hw, vsi_handle);
5362 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5363 /* Don't remove the unicast address that belongs to
5364 * another VSI on the switch, since it is not being
5367 ice_acquire_lock(rule_lock);
5368 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5369 &list_itr->fltr_info)) {
5370 ice_release_lock(rule_lock);
5371 return ICE_ERR_DOES_NOT_EXIST;
5373 ice_release_lock(rule_lock);
5375 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5377 if (list_itr->status)
5378 return list_itr->status;
5384 * ice_remove_mac - remove a MAC address based filter rule
5385 * @hw: pointer to the hardware structure
5386 * @m_list: list of MAC addresses and forwarding information
5389 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5391 struct ice_sw_recipe *recp_list;
5393 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5394 return ice_remove_mac_rule(hw, m_list, recp_list);
5398 * ice_remove_vlan_rule - Remove VLAN based filter rule
5399 * @hw: pointer to the hardware structure
5400 * @v_list: list of VLAN entries and forwarding information
5401 * @recp_list: list from which function remove VLAN
5403 static enum ice_status
5404 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5405 struct ice_sw_recipe *recp_list)
5407 struct ice_fltr_list_entry *v_list_itr, *tmp;
5409 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5411 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5413 if (l_type != ICE_SW_LKUP_VLAN)
5414 return ICE_ERR_PARAM;
5415 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5417 if (v_list_itr->status)
5418 return v_list_itr->status;
5424 * ice_remove_vlan - remove a VLAN address based filter rule
5425 * @hw: pointer to the hardware structure
5426 * @v_list: list of VLAN and forwarding information
5430 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5432 struct ice_sw_recipe *recp_list;
5435 return ICE_ERR_PARAM;
5437 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5438 return ice_remove_vlan_rule(hw, v_list, recp_list);
5442 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5443 * @hw: pointer to the hardware structure
5444 * @v_list: list of MAC VLAN entries and forwarding information
5445 * @recp_list: list from which function remove MAC VLAN
5447 static enum ice_status
5448 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5449 struct ice_sw_recipe *recp_list)
5451 struct ice_fltr_list_entry *v_list_itr, *tmp;
5453 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5454 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5456 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5458 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5459 return ICE_ERR_PARAM;
5460 v_list_itr->status =
5461 ice_remove_rule_internal(hw, recp_list,
5463 if (v_list_itr->status)
5464 return v_list_itr->status;
5470 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5471 * @hw: pointer to the hardware structure
5472 * @mv_list: list of MAC VLAN and forwarding information
5475 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5477 struct ice_sw_recipe *recp_list;
5479 if (!mv_list || !hw)
5480 return ICE_ERR_PARAM;
5482 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5483 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5487 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5488 * @fm_entry: filter entry to inspect
5489 * @vsi_handle: VSI handle to compare with filter info
5492 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5494 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5495 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5496 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5497 fm_entry->vsi_list_info &&
5498 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5503 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5504 * @hw: pointer to the hardware structure
5505 * @vsi_handle: VSI handle to remove filters from
5506 * @vsi_list_head: pointer to the list to add entry to
5507 * @fi: pointer to fltr_info of filter entry to copy & add
5509 * Helper function, used when creating a list of filters to remove from
5510 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5511 * original filter entry, with the exception of fltr_info.fltr_act and
5512 * fltr_info.fwd_id fields. These are set such that later logic can
5513 * extract which VSI to remove the fltr from, and pass on that information.
5515 static enum ice_status
5516 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5517 struct LIST_HEAD_TYPE *vsi_list_head,
5518 struct ice_fltr_info *fi)
5520 struct ice_fltr_list_entry *tmp;
5522 /* this memory is freed up in the caller function
5523 * once filters for this VSI are removed
5525 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5527 return ICE_ERR_NO_MEMORY;
5529 tmp->fltr_info = *fi;
5531 /* Overwrite these fields to indicate which VSI to remove filter from,
5532 * so find and remove logic can extract the information from the
5533 * list entries. Note that original entries will still have proper
5536 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5537 tmp->fltr_info.vsi_handle = vsi_handle;
5538 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5540 LIST_ADD(&tmp->list_entry, vsi_list_head);
5546 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5547 * @hw: pointer to the hardware structure
5548 * @vsi_handle: VSI handle to remove filters from
5549 * @lkup_list_head: pointer to the list that has certain lookup type filters
5550 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5552 * Locates all filters in lkup_list_head that are used by the given VSI,
5553 * and adds COPIES of those entries to vsi_list_head (intended to be used
5554 * to remove the listed filters).
5555 * Note that this means all entries in vsi_list_head must be explicitly
5556 * deallocated by the caller when done with list.
5558 static enum ice_status
5559 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5560 struct LIST_HEAD_TYPE *lkup_list_head,
5561 struct LIST_HEAD_TYPE *vsi_list_head)
5563 struct ice_fltr_mgmt_list_entry *fm_entry;
5564 enum ice_status status = ICE_SUCCESS;
5566 /* check to make sure VSI ID is valid and within boundary */
5567 if (!ice_is_vsi_valid(hw, vsi_handle))
5568 return ICE_ERR_PARAM;
5570 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5571 ice_fltr_mgmt_list_entry, list_entry) {
5572 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5575 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5577 &fm_entry->fltr_info);
5585 * ice_determine_promisc_mask
5586 * @fi: filter info to parse
5588 * Helper function to determine which ICE_PROMISC_ mask corresponds
5589 * to given filter into.
5591 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5593 u16 vid = fi->l_data.mac_vlan.vlan_id;
5594 u8 *macaddr = fi->l_data.mac.mac_addr;
5595 bool is_tx_fltr = false;
5596 u8 promisc_mask = 0;
5598 if (fi->flag == ICE_FLTR_TX)
5601 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5602 promisc_mask |= is_tx_fltr ?
5603 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5604 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5605 promisc_mask |= is_tx_fltr ?
5606 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5607 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5608 promisc_mask |= is_tx_fltr ?
5609 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5611 promisc_mask |= is_tx_fltr ?
5612 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5614 return promisc_mask;
5618 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5619 * @hw: pointer to the hardware structure
5620 * @vsi_handle: VSI handle to retrieve info from
5621 * @promisc_mask: pointer to mask to be filled in
5622 * @vid: VLAN ID of promisc VLAN VSI
5623 * @sw: pointer to switch info struct for which function add rule
5625 static enum ice_status
5626 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5627 u16 *vid, struct ice_switch_info *sw)
5629 struct ice_fltr_mgmt_list_entry *itr;
5630 struct LIST_HEAD_TYPE *rule_head;
5631 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5633 if (!ice_is_vsi_valid(hw, vsi_handle))
5634 return ICE_ERR_PARAM;
5638 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5639 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5641 ice_acquire_lock(rule_lock);
5642 LIST_FOR_EACH_ENTRY(itr, rule_head,
5643 ice_fltr_mgmt_list_entry, list_entry) {
5644 /* Continue if this filter doesn't apply to this VSI or the
5645 * VSI ID is not in the VSI map for this filter
5647 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5650 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5652 ice_release_lock(rule_lock);
5658 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5659 * @hw: pointer to the hardware structure
5660 * @vsi_handle: VSI handle to retrieve info from
5661 * @promisc_mask: pointer to mask to be filled in
5662 * @vid: VLAN ID of promisc VLAN VSI
5665 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5668 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5669 vid, hw->switch_info);
5673 * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5674 * @hw: pointer to the hardware structure
5675 * @vsi_handle: VSI handle to retrieve info from
5676 * @promisc_mask: pointer to mask to be filled in
5677 * @vid: VLAN ID of promisc VLAN VSI
5678 * @sw: pointer to switch info struct for which function add rule
5680 static enum ice_status
5681 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5682 u16 *vid, struct ice_switch_info *sw)
5684 struct ice_fltr_mgmt_list_entry *itr;
5685 struct LIST_HEAD_TYPE *rule_head;
5686 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5688 if (!ice_is_vsi_valid(hw, vsi_handle))
5689 return ICE_ERR_PARAM;
5693 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5694 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5696 ice_acquire_lock(rule_lock);
5697 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5699 /* Continue if this filter doesn't apply to this VSI or the
5700 * VSI ID is not in the VSI map for this filter
5702 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5705 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5707 ice_release_lock(rule_lock);
5713 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5714 * @hw: pointer to the hardware structure
5715 * @vsi_handle: VSI handle to retrieve info from
5716 * @promisc_mask: pointer to mask to be filled in
5717 * @vid: VLAN ID of promisc VLAN VSI
5720 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5723 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5724 vid, hw->switch_info);
5728 * ice_remove_promisc - Remove promisc based filter rules
5729 * @hw: pointer to the hardware structure
5730 * @recp_id: recipe ID for which the rule needs to removed
5731 * @v_list: list of promisc entries
5733 static enum ice_status
5734 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5735 struct LIST_HEAD_TYPE *v_list)
5737 struct ice_fltr_list_entry *v_list_itr, *tmp;
5738 struct ice_sw_recipe *recp_list;
5740 recp_list = &hw->switch_info->recp_list[recp_id];
5741 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5743 v_list_itr->status =
5744 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5745 if (v_list_itr->status)
5746 return v_list_itr->status;
5752 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5753 * @hw: pointer to the hardware structure
5754 * @vsi_handle: VSI handle to clear mode
5755 * @promisc_mask: mask of promiscuous config bits to clear
5756 * @vid: VLAN ID to clear VLAN promiscuous
5757 * @sw: pointer to switch info struct for which function add rule
5759 static enum ice_status
5760 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5761 u16 vid, struct ice_switch_info *sw)
5763 struct ice_fltr_list_entry *fm_entry, *tmp;
5764 struct LIST_HEAD_TYPE remove_list_head;
5765 struct ice_fltr_mgmt_list_entry *itr;
5766 struct LIST_HEAD_TYPE *rule_head;
5767 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5768 enum ice_status status = ICE_SUCCESS;
5771 if (!ice_is_vsi_valid(hw, vsi_handle))
5772 return ICE_ERR_PARAM;
5774 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5775 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5777 recipe_id = ICE_SW_LKUP_PROMISC;
5779 rule_head = &sw->recp_list[recipe_id].filt_rules;
5780 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5782 INIT_LIST_HEAD(&remove_list_head);
5784 ice_acquire_lock(rule_lock);
5785 LIST_FOR_EACH_ENTRY(itr, rule_head,
5786 ice_fltr_mgmt_list_entry, list_entry) {
5787 struct ice_fltr_info *fltr_info;
5788 u8 fltr_promisc_mask = 0;
5790 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5792 fltr_info = &itr->fltr_info;
5794 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5795 vid != fltr_info->l_data.mac_vlan.vlan_id)
5798 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5800 /* Skip if filter is not completely specified by given mask */
5801 if (fltr_promisc_mask & ~promisc_mask)
5804 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5808 ice_release_lock(rule_lock);
5809 goto free_fltr_list;
5812 ice_release_lock(rule_lock);
5814 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5817 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5818 ice_fltr_list_entry, list_entry) {
5819 LIST_DEL(&fm_entry->list_entry);
5820 ice_free(hw, fm_entry);
5827 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5828 * @hw: pointer to the hardware structure
5829 * @vsi_handle: VSI handle to clear mode
5830 * @promisc_mask: mask of promiscuous config bits to clear
5831 * @vid: VLAN ID to clear VLAN promiscuous
5834 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5835 u8 promisc_mask, u16 vid)
5837 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5838 vid, hw->switch_info);
5842 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5843 * @hw: pointer to the hardware structure
5844 * @vsi_handle: VSI handle to configure
5845 * @promisc_mask: mask of promiscuous config bits
5846 * @vid: VLAN ID to set VLAN promiscuous
5847 * @lport: logical port number to configure promisc mode
5848 * @sw: pointer to switch info struct for which function add rule
5850 static enum ice_status
5851 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5852 u16 vid, u8 lport, struct ice_switch_info *sw)
5854 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5855 struct ice_fltr_list_entry f_list_entry;
5856 struct ice_fltr_info new_fltr;
5857 enum ice_status status = ICE_SUCCESS;
5863 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5865 if (!ice_is_vsi_valid(hw, vsi_handle))
5866 return ICE_ERR_PARAM;
5867 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5869 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5871 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5872 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5873 new_fltr.l_data.mac_vlan.vlan_id = vid;
5874 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5876 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5877 recipe_id = ICE_SW_LKUP_PROMISC;
5880 /* Separate filters must be set for each direction/packet type
5881 * combination, so we will loop over the mask value, store the
5882 * individual type, and clear it out in the input mask as it
5885 while (promisc_mask) {
5886 struct ice_sw_recipe *recp_list;
5892 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5893 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5894 pkt_type = UCAST_FLTR;
5895 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5896 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5897 pkt_type = UCAST_FLTR;
5899 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5900 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5901 pkt_type = MCAST_FLTR;
5902 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5903 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5904 pkt_type = MCAST_FLTR;
5906 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5907 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5908 pkt_type = BCAST_FLTR;
5909 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5910 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5911 pkt_type = BCAST_FLTR;
5915 /* Check for VLAN promiscuous flag */
5916 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5917 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5918 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5919 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5923 /* Set filter DA based on packet type */
5924 mac_addr = new_fltr.l_data.mac.mac_addr;
5925 if (pkt_type == BCAST_FLTR) {
5926 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5927 } else if (pkt_type == MCAST_FLTR ||
5928 pkt_type == UCAST_FLTR) {
5929 /* Use the dummy ether header DA */
5930 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5931 ICE_NONDMA_TO_NONDMA);
5932 if (pkt_type == MCAST_FLTR)
5933 mac_addr[0] |= 0x1; /* Set multicast bit */
5936 /* Need to reset this to zero for all iterations */
5939 new_fltr.flag |= ICE_FLTR_TX;
5940 new_fltr.src = hw_vsi_id;
5942 new_fltr.flag |= ICE_FLTR_RX;
5943 new_fltr.src = lport;
5946 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5947 new_fltr.vsi_handle = vsi_handle;
5948 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5949 f_list_entry.fltr_info = new_fltr;
5950 recp_list = &sw->recp_list[recipe_id];
5952 status = ice_add_rule_internal(hw, recp_list, lport,
5954 if (status != ICE_SUCCESS)
5955 goto set_promisc_exit;
5963 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5964 * @hw: pointer to the hardware structure
5965 * @vsi_handle: VSI handle to configure
5966 * @promisc_mask: mask of promiscuous config bits
5967 * @vid: VLAN ID to set VLAN promiscuous
5970 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5973 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5974 hw->port_info->lport,
5979 * _ice_set_vlan_vsi_promisc
5980 * @hw: pointer to the hardware structure
5981 * @vsi_handle: VSI handle to configure
5982 * @promisc_mask: mask of promiscuous config bits
5983 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5984 * @lport: logical port number to configure promisc mode
5985 * @sw: pointer to switch info struct for which function add rule
5987 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5989 static enum ice_status
5990 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5991 bool rm_vlan_promisc, u8 lport,
5992 struct ice_switch_info *sw)
5994 struct ice_fltr_list_entry *list_itr, *tmp;
5995 struct LIST_HEAD_TYPE vsi_list_head;
5996 struct LIST_HEAD_TYPE *vlan_head;
5997 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5998 enum ice_status status;
6001 INIT_LIST_HEAD(&vsi_list_head);
6002 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6003 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6004 ice_acquire_lock(vlan_lock);
6005 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6007 ice_release_lock(vlan_lock);
6009 goto free_fltr_list;
6011 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6013 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6014 if (rm_vlan_promisc)
6015 status = _ice_clear_vsi_promisc(hw, vsi_handle,
6019 status = _ice_set_vsi_promisc(hw, vsi_handle,
6020 promisc_mask, vlan_id,
6027 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6028 ice_fltr_list_entry, list_entry) {
6029 LIST_DEL(&list_itr->list_entry);
6030 ice_free(hw, list_itr);
6036 * ice_set_vlan_vsi_promisc
6037 * @hw: pointer to the hardware structure
6038 * @vsi_handle: VSI handle to configure
6039 * @promisc_mask: mask of promiscuous config bits
6040 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6042 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6045 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6046 bool rm_vlan_promisc)
6048 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6049 rm_vlan_promisc, hw->port_info->lport,
6054 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6055 * @hw: pointer to the hardware structure
6056 * @vsi_handle: VSI handle to remove filters from
6057 * @recp_list: recipe list from which function remove fltr
6058 * @lkup: switch rule filter lookup type
6061 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6062 struct ice_sw_recipe *recp_list,
6063 enum ice_sw_lkup_type lkup)
6065 struct ice_fltr_list_entry *fm_entry;
6066 struct LIST_HEAD_TYPE remove_list_head;
6067 struct LIST_HEAD_TYPE *rule_head;
6068 struct ice_fltr_list_entry *tmp;
6069 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6070 enum ice_status status;
6072 INIT_LIST_HEAD(&remove_list_head);
6073 rule_lock = &recp_list[lkup].filt_rule_lock;
6074 rule_head = &recp_list[lkup].filt_rules;
6075 ice_acquire_lock(rule_lock);
6076 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6078 ice_release_lock(rule_lock);
6080 goto free_fltr_list;
6083 case ICE_SW_LKUP_MAC:
6084 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6086 case ICE_SW_LKUP_VLAN:
6087 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6089 case ICE_SW_LKUP_PROMISC:
6090 case ICE_SW_LKUP_PROMISC_VLAN:
6091 ice_remove_promisc(hw, lkup, &remove_list_head);
6093 case ICE_SW_LKUP_MAC_VLAN:
6094 ice_remove_mac_vlan(hw, &remove_list_head);
6096 case ICE_SW_LKUP_ETHERTYPE:
6097 case ICE_SW_LKUP_ETHERTYPE_MAC:
6098 ice_remove_eth_mac(hw, &remove_list_head);
6100 case ICE_SW_LKUP_DFLT:
6101 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6103 case ICE_SW_LKUP_LAST:
6104 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6109 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6110 ice_fltr_list_entry, list_entry) {
6111 LIST_DEL(&fm_entry->list_entry);
6112 ice_free(hw, fm_entry);
6117 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6118 * @hw: pointer to the hardware structure
6119 * @vsi_handle: VSI handle to remove filters from
6120 * @sw: pointer to switch info struct
6123 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6124 struct ice_switch_info *sw)
6126 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6128 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6129 sw->recp_list, ICE_SW_LKUP_MAC);
6130 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6131 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6132 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6133 sw->recp_list, ICE_SW_LKUP_PROMISC);
6134 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6135 sw->recp_list, ICE_SW_LKUP_VLAN);
6136 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6137 sw->recp_list, ICE_SW_LKUP_DFLT);
6138 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6139 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6140 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6141 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6142 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6143 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6147 * ice_remove_vsi_fltr - Remove all filters for a VSI
6148 * @hw: pointer to the hardware structure
6149 * @vsi_handle: VSI handle to remove filters from
6151 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6153 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6157 * ice_alloc_res_cntr - allocating resource counter
6158 * @hw: pointer to the hardware structure
6159 * @type: type of resource
6160 * @alloc_shared: if set it is shared else dedicated
6161 * @num_items: number of entries requested for FD resource type
6162 * @counter_id: counter index returned by AQ call
6165 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6168 struct ice_aqc_alloc_free_res_elem *buf;
6169 enum ice_status status;
6172 /* Allocate resource */
6173 buf_len = ice_struct_size(buf, elem, 1);
6174 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6176 return ICE_ERR_NO_MEMORY;
6178 buf->num_elems = CPU_TO_LE16(num_items);
6179 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6180 ICE_AQC_RES_TYPE_M) | alloc_shared);
6182 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6183 ice_aqc_opc_alloc_res, NULL);
6187 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6195 * ice_free_res_cntr - free resource counter
6196 * @hw: pointer to the hardware structure
6197 * @type: type of resource
6198 * @alloc_shared: if set it is shared else dedicated
6199 * @num_items: number of entries to be freed for FD resource type
6200 * @counter_id: counter ID resource which needs to be freed
6203 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6206 struct ice_aqc_alloc_free_res_elem *buf;
6207 enum ice_status status;
6211 buf_len = ice_struct_size(buf, elem, 1);
6212 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6214 return ICE_ERR_NO_MEMORY;
6216 buf->num_elems = CPU_TO_LE16(num_items);
6217 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6218 ICE_AQC_RES_TYPE_M) | alloc_shared);
6219 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6221 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6222 ice_aqc_opc_free_res, NULL);
6224 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6231 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6232 * @hw: pointer to the hardware structure
6233 * @counter_id: returns counter index
6235 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6237 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6238 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6243 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6244 * @hw: pointer to the hardware structure
6245 * @counter_id: counter index to be freed
6247 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6249 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6250 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6255 * ice_alloc_res_lg_act - add large action resource
6256 * @hw: pointer to the hardware structure
6257 * @l_id: large action ID to fill it in
6258 * @num_acts: number of actions to hold with a large action entry
6260 static enum ice_status
6261 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6263 struct ice_aqc_alloc_free_res_elem *sw_buf;
6264 enum ice_status status;
6267 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6268 return ICE_ERR_PARAM;
6270 /* Allocate resource for large action */
6271 buf_len = ice_struct_size(sw_buf, elem, 1);
6272 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6274 return ICE_ERR_NO_MEMORY;
6276 sw_buf->num_elems = CPU_TO_LE16(1);
6278 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6279 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6280 * If num_acts is greater than 2, then use
6281 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6282 * The num_acts cannot exceed 4. This was ensured at the
6283 * beginning of the function.
6286 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6287 else if (num_acts == 2)
6288 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6290 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6292 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6293 ice_aqc_opc_alloc_res, NULL);
6295 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6297 ice_free(hw, sw_buf);
6302 * ice_add_mac_with_sw_marker - add filter with sw marker
6303 * @hw: pointer to the hardware structure
6304 * @f_info: filter info structure containing the MAC filter information
6305 * @sw_marker: sw marker to tag the Rx descriptor with
6308 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6311 struct ice_fltr_mgmt_list_entry *m_entry;
6312 struct ice_fltr_list_entry fl_info;
6313 struct ice_sw_recipe *recp_list;
6314 struct LIST_HEAD_TYPE l_head;
6315 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6316 enum ice_status ret;
6320 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6321 return ICE_ERR_PARAM;
6323 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6324 return ICE_ERR_PARAM;
6326 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6327 return ICE_ERR_PARAM;
6329 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6330 return ICE_ERR_PARAM;
6331 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6333 /* Add filter if it doesn't exist so then the adding of large
6334 * action always results in update
6337 INIT_LIST_HEAD(&l_head);
6338 fl_info.fltr_info = *f_info;
6339 LIST_ADD(&fl_info.list_entry, &l_head);
6341 entry_exists = false;
6342 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6343 hw->port_info->lport);
6344 if (ret == ICE_ERR_ALREADY_EXISTS)
6345 entry_exists = true;
6349 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6350 rule_lock = &recp_list->filt_rule_lock;
6351 ice_acquire_lock(rule_lock);
6352 /* Get the book keeping entry for the filter */
6353 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6357 /* If counter action was enabled for this rule then don't enable
6358 * sw marker large action
6360 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6361 ret = ICE_ERR_PARAM;
6365 /* if same marker was added before */
6366 if (m_entry->sw_marker_id == sw_marker) {
6367 ret = ICE_ERR_ALREADY_EXISTS;
6371 /* Allocate a hardware table entry to hold large act. Three actions
6372 * for marker based large action
6374 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6378 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6381 /* Update the switch rule to add the marker action */
6382 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6384 ice_release_lock(rule_lock);
6389 ice_release_lock(rule_lock);
6390 /* only remove entry if it did not exist previously */
6392 ret = ice_remove_mac(hw, &l_head);
6398 * ice_add_mac_with_counter - add filter with counter enabled
6399 * @hw: pointer to the hardware structure
6400 * @f_info: pointer to filter info structure containing the MAC filter
6404 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6406 struct ice_fltr_mgmt_list_entry *m_entry;
6407 struct ice_fltr_list_entry fl_info;
6408 struct ice_sw_recipe *recp_list;
6409 struct LIST_HEAD_TYPE l_head;
6410 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6411 enum ice_status ret;
6416 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6417 return ICE_ERR_PARAM;
6419 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6420 return ICE_ERR_PARAM;
6422 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6423 return ICE_ERR_PARAM;
6424 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6425 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6427 entry_exist = false;
6429 rule_lock = &recp_list->filt_rule_lock;
6431 /* Add filter if it doesn't exist so then the adding of large
6432 * action always results in update
6434 INIT_LIST_HEAD(&l_head);
6436 fl_info.fltr_info = *f_info;
6437 LIST_ADD(&fl_info.list_entry, &l_head);
6439 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6440 hw->port_info->lport);
6441 if (ret == ICE_ERR_ALREADY_EXISTS)
6446 ice_acquire_lock(rule_lock);
6447 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6449 ret = ICE_ERR_BAD_PTR;
6453 /* Don't enable counter for a filter for which sw marker was enabled */
6454 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6455 ret = ICE_ERR_PARAM;
6459 /* If a counter was already enabled then don't need to add again */
6460 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6461 ret = ICE_ERR_ALREADY_EXISTS;
6465 /* Allocate a hardware table entry to VLAN counter */
6466 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6470 /* Allocate a hardware table entry to hold large act. Two actions for
6471 * counter based large action
6473 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6477 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6480 /* Update the switch rule to add the counter action */
6481 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6483 ice_release_lock(rule_lock);
6488 ice_release_lock(rule_lock);
6489 /* only remove entry if it did not exist previously */
6491 ret = ice_remove_mac(hw, &l_head);
6496 /* This is mapping table entry that maps every word within a given protocol
6497 * structure to the real byte offset as per the specification of that
6499 * for example dst address is 3 words in ethertype header and corresponding
6500 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6501 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6502 * matching entry describing its field. This needs to be updated if new
6503 * structure is added to that union.
6505 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6506 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6507 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6508 { ICE_ETYPE_OL, { 0 } },
6509 { ICE_VLAN_OFOS, { 0, 2 } },
6510 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6511 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6512 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6513 26, 28, 30, 32, 34, 36, 38 } },
6514 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6515 26, 28, 30, 32, 34, 36, 38 } },
6516 { ICE_TCP_IL, { 0, 2 } },
6517 { ICE_UDP_OF, { 0, 2 } },
6518 { ICE_UDP_ILOS, { 0, 2 } },
6519 { ICE_SCTP_IL, { 0, 2 } },
6520 { ICE_VXLAN, { 8, 10, 12, 14 } },
6521 { ICE_GENEVE, { 8, 10, 12, 14 } },
6522 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6523 { ICE_NVGRE, { 0, 2, 4, 6 } },
6524 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6525 { ICE_PPPOE, { 0, 2, 4, 6 } },
6526 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6527 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6528 { ICE_ESP, { 0, 2, 4, 6 } },
6529 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6530 { ICE_NAT_T, { 8, 10, 12, 14 } },
6531 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6532 { ICE_VLAN_EX, { 0, 2 } },
6535 /* The following table describes preferred grouping of recipes.
6536 * If a recipe that needs to be programmed is a superset or matches one of the
6537 * following combinations, then the recipe needs to be chained as per the
6541 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6542 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6543 { ICE_MAC_IL, ICE_MAC_IL_HW },
6544 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6545 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6546 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6547 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6548 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6549 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6550 { ICE_TCP_IL, ICE_TCP_IL_HW },
6551 { ICE_UDP_OF, ICE_UDP_OF_HW },
6552 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6553 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6554 { ICE_VXLAN, ICE_UDP_OF_HW },
6555 { ICE_GENEVE, ICE_UDP_OF_HW },
6556 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6557 { ICE_NVGRE, ICE_GRE_OF_HW },
6558 { ICE_GTP, ICE_UDP_OF_HW },
6559 { ICE_PPPOE, ICE_PPPOE_HW },
6560 { ICE_PFCP, ICE_UDP_ILOS_HW },
6561 { ICE_L2TPV3, ICE_L2TPV3_HW },
6562 { ICE_ESP, ICE_ESP_HW },
6563 { ICE_AH, ICE_AH_HW },
6564 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6565 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6566 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6570 * ice_find_recp - find a recipe
6571 * @hw: pointer to the hardware structure
6572 * @lkup_exts: extension sequence to match
6574 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6576 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6577 enum ice_sw_tunnel_type tun_type)
6579 bool refresh_required = true;
6580 struct ice_sw_recipe *recp;
6583 /* Walk through existing recipes to find a match */
6584 recp = hw->switch_info->recp_list;
6585 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6586 /* If recipe was not created for this ID, in SW bookkeeping,
6587 * check if FW has an entry for this recipe. If the FW has an
6588 * entry update it in our SW bookkeeping and continue with the
6591 if (!recp[i].recp_created)
6592 if (ice_get_recp_frm_fw(hw,
6593 hw->switch_info->recp_list, i,
6597 /* Skip inverse action recipes */
6598 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6599 ICE_AQ_RECIPE_ACT_INV_ACT)
6602 /* if number of words we are looking for match */
6603 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6604 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6605 struct ice_fv_word *be = lkup_exts->fv_words;
6606 u16 *cr = recp[i].lkup_exts.field_mask;
6607 u16 *de = lkup_exts->field_mask;
6611 /* ar, cr, and qr are related to the recipe words, while
6612 * be, de, and pe are related to the lookup words
6614 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6615 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6617 if (ar[qr].off == be[pe].off &&
6618 ar[qr].prot_id == be[pe].prot_id &&
6620 /* Found the "pe"th word in the
6625 /* After walking through all the words in the
6626 * "i"th recipe if "p"th word was not found then
6627 * this recipe is not what we are looking for.
6628 * So break out from this loop and try the next
6631 if (qr >= recp[i].lkup_exts.n_val_words) {
6636 /* If for "i"th recipe the found was never set to false
6637 * then it means we found our match
6639 if (tun_type == recp[i].tun_type && found)
6640 return i; /* Return the recipe ID */
6643 return ICE_MAX_NUM_RECIPES;
6647 * ice_prot_type_to_id - get protocol ID from protocol type
6648 * @type: protocol type
6649 * @id: pointer to variable that will receive the ID
6651 * Returns true if found, false otherwise
6653 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6657 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6658 if (ice_prot_id_tbl[i].type == type) {
6659 *id = ice_prot_id_tbl[i].protocol_id;
6666 * ice_fill_valid_words - count valid words
6667 * @rule: advanced rule with lookup information
6668 * @lkup_exts: byte offset extractions of the words that are valid
6670 * calculate valid words in a lookup rule using mask value
6673 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6674 struct ice_prot_lkup_ext *lkup_exts)
6676 u8 j, word, prot_id, ret_val;
6678 if (!ice_prot_type_to_id(rule->type, &prot_id))
6681 word = lkup_exts->n_val_words;
6683 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6684 if (((u16 *)&rule->m_u)[j] &&
6685 (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6686 /* No more space to accommodate */
6687 if (word >= ICE_MAX_CHAIN_WORDS)
6689 lkup_exts->fv_words[word].off =
6690 ice_prot_ext[rule->type].offs[j];
6691 lkup_exts->fv_words[word].prot_id =
6692 ice_prot_id_tbl[rule->type].protocol_id;
6693 lkup_exts->field_mask[word] =
6694 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6698 ret_val = word - lkup_exts->n_val_words;
6699 lkup_exts->n_val_words = word;
6705 * ice_create_first_fit_recp_def - Create a recipe grouping
6706 * @hw: pointer to the hardware structure
6707 * @lkup_exts: an array of protocol header extractions
6708 * @rg_list: pointer to a list that stores new recipe groups
6709 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6711 * Using first fit algorithm, take all the words that are still not done
6712 * and start grouping them in 4-word groups. Each group makes up one
6715 static enum ice_status
6716 ice_create_first_fit_recp_def(struct ice_hw *hw,
6717 struct ice_prot_lkup_ext *lkup_exts,
6718 struct LIST_HEAD_TYPE *rg_list,
6721 struct ice_pref_recipe_group *grp = NULL;
6726 if (!lkup_exts->n_val_words) {
6727 struct ice_recp_grp_entry *entry;
6729 entry = (struct ice_recp_grp_entry *)
6730 ice_malloc(hw, sizeof(*entry));
6732 return ICE_ERR_NO_MEMORY;
6733 LIST_ADD(&entry->l_entry, rg_list);
6734 grp = &entry->r_group;
6736 grp->n_val_pairs = 0;
6739 /* Walk through every word in the rule to check if it is not done. If so
6740 * then this word needs to be part of a new recipe.
6742 for (j = 0; j < lkup_exts->n_val_words; j++)
6743 if (!ice_is_bit_set(lkup_exts->done, j)) {
6745 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6746 struct ice_recp_grp_entry *entry;
6748 entry = (struct ice_recp_grp_entry *)
6749 ice_malloc(hw, sizeof(*entry));
6751 return ICE_ERR_NO_MEMORY;
6752 LIST_ADD(&entry->l_entry, rg_list);
6753 grp = &entry->r_group;
6757 grp->pairs[grp->n_val_pairs].prot_id =
6758 lkup_exts->fv_words[j].prot_id;
6759 grp->pairs[grp->n_val_pairs].off =
6760 lkup_exts->fv_words[j].off;
6761 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6769 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6770 * @hw: pointer to the hardware structure
6771 * @fv_list: field vector with the extraction sequence information
6772 * @rg_list: recipe groupings with protocol-offset pairs
6774 * Helper function to fill in the field vector indices for protocol-offset
6775 * pairs. These indexes are then ultimately programmed into a recipe.
6777 static enum ice_status
6778 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6779 struct LIST_HEAD_TYPE *rg_list)
6781 struct ice_sw_fv_list_entry *fv;
6782 struct ice_recp_grp_entry *rg;
6783 struct ice_fv_word *fv_ext;
6785 if (LIST_EMPTY(fv_list))
6788 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6789 fv_ext = fv->fv_ptr->ew;
6791 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6794 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6795 struct ice_fv_word *pr;
6800 pr = &rg->r_group.pairs[i];
6801 mask = rg->r_group.mask[i];
6803 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6804 if (fv_ext[j].prot_id == pr->prot_id &&
6805 fv_ext[j].off == pr->off) {
6808 /* Store index of field vector */
6810 rg->fv_mask[i] = mask;
6814 /* Protocol/offset could not be found, caller gave an
6818 return ICE_ERR_PARAM;
6826 * ice_find_free_recp_res_idx - find free result indexes for recipe
6827 * @hw: pointer to hardware structure
6828 * @profiles: bitmap of profiles that will be associated with the new recipe
6829 * @free_idx: pointer to variable to receive the free index bitmap
6831 * The algorithm used here is:
6832 * 1. When creating a new recipe, create a set P which contains all
6833 * Profiles that will be associated with our new recipe
6835 * 2. For each Profile p in set P:
6836 * a. Add all recipes associated with Profile p into set R
6837 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6838 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6839 * i. Or just assume they all have the same possible indexes:
6841 * i.e., PossibleIndexes = 0x0000F00000000000
6843 * 3. For each Recipe r in set R:
6844 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6845 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6847 * FreeIndexes will contain the bits indicating the indexes free for use,
6848 * then the code needs to update the recipe[r].used_result_idx_bits to
6849 * indicate which indexes were selected for use by this recipe.
6852 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6853 ice_bitmap_t *free_idx)
6855 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6856 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6857 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6860 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6861 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6862 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6863 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6865 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6867 /* For each profile we are going to associate the recipe with, add the
6868 * recipes that are associated with that profile. This will give us
6869 * the set of recipes that our recipe may collide with. Also, determine
6870 * what possible result indexes are usable given this set of profiles.
6872 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6873 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6874 ICE_MAX_NUM_RECIPES);
6875 ice_and_bitmap(possible_idx, possible_idx,
6876 hw->switch_info->prof_res_bm[bit],
6880 /* For each recipe that our new recipe may collide with, determine
6881 * which indexes have been used.
6883 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6884 ice_or_bitmap(used_idx, used_idx,
6885 hw->switch_info->recp_list[bit].res_idxs,
6888 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6890 /* return number of free indexes */
6891 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6895 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6896 * @hw: pointer to hardware structure
6897 * @rm: recipe management list entry
6898 * @profiles: bitmap of profiles that will be associated.
6900 static enum ice_status
6901 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6902 ice_bitmap_t *profiles)
6904 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6905 struct ice_aqc_recipe_data_elem *tmp;
6906 struct ice_aqc_recipe_data_elem *buf;
6907 struct ice_recp_grp_entry *entry;
6908 enum ice_status status;
6914 /* When more than one recipe are required, another recipe is needed to
6915 * chain them together. Matching a tunnel metadata ID takes up one of
6916 * the match fields in the chaining recipe reducing the number of
6917 * chained recipes by one.
6919 /* check number of free result indices */
6920 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6921 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6923 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6924 free_res_idx, rm->n_grp_count);
6926 if (rm->n_grp_count > 1) {
6927 if (rm->n_grp_count > free_res_idx)
6928 return ICE_ERR_MAX_LIMIT;
6933 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6934 return ICE_ERR_MAX_LIMIT;
6936 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6937 ICE_MAX_NUM_RECIPES,
6940 return ICE_ERR_NO_MEMORY;
6942 buf = (struct ice_aqc_recipe_data_elem *)
6943 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6945 status = ICE_ERR_NO_MEMORY;
6949 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6950 recipe_count = ICE_MAX_NUM_RECIPES;
6951 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6953 if (status || recipe_count == 0)
6956 /* Allocate the recipe resources, and configure them according to the
6957 * match fields from protocol headers and extracted field vectors.
6959 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6960 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6963 status = ice_alloc_recipe(hw, &entry->rid);
6967 /* Clear the result index of the located recipe, as this will be
6968 * updated, if needed, later in the recipe creation process.
6970 tmp[0].content.result_indx = 0;
6972 buf[recps] = tmp[0];
6973 buf[recps].recipe_indx = (u8)entry->rid;
6974 /* if the recipe is a non-root recipe RID should be programmed
6975 * as 0 for the rules to be applied correctly.
6977 buf[recps].content.rid = 0;
6978 ice_memset(&buf[recps].content.lkup_indx, 0,
6979 sizeof(buf[recps].content.lkup_indx),
6982 /* All recipes use look-up index 0 to match switch ID. */
6983 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6984 buf[recps].content.mask[0] =
6985 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6986 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6989 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6990 buf[recps].content.lkup_indx[i] = 0x80;
6991 buf[recps].content.mask[i] = 0;
6994 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6995 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6996 buf[recps].content.mask[i + 1] =
6997 CPU_TO_LE16(entry->fv_mask[i]);
7000 if (rm->n_grp_count > 1) {
7001 /* Checks to see if there really is a valid result index
7004 if (chain_idx >= ICE_MAX_FV_WORDS) {
7005 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7006 status = ICE_ERR_MAX_LIMIT;
7010 entry->chain_idx = chain_idx;
7011 buf[recps].content.result_indx =
7012 ICE_AQ_RECIPE_RESULT_EN |
7013 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7014 ICE_AQ_RECIPE_RESULT_DATA_M);
7015 ice_clear_bit(chain_idx, result_idx_bm);
7016 chain_idx = ice_find_first_bit(result_idx_bm,
7020 /* fill recipe dependencies */
7021 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7022 ICE_MAX_NUM_RECIPES);
7023 ice_set_bit(buf[recps].recipe_indx,
7024 (ice_bitmap_t *)buf[recps].recipe_bitmap);
7025 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7029 if (rm->n_grp_count == 1) {
7030 rm->root_rid = buf[0].recipe_indx;
7031 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7032 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7033 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7034 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7035 sizeof(buf[0].recipe_bitmap),
7036 ICE_NONDMA_TO_NONDMA);
7038 status = ICE_ERR_BAD_PTR;
7041 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7042 * the recipe which is getting created if specified
7043 * by user. Usually any advanced switch filter, which results
7044 * into new extraction sequence, ended up creating a new recipe
7045 * of type ROOT and usually recipes are associated with profiles
7046 * Switch rule referreing newly created recipe, needs to have
7047 * either/or 'fwd' or 'join' priority, otherwise switch rule
7048 * evaluation will not happen correctly. In other words, if
7049 * switch rule to be evaluated on priority basis, then recipe
7050 * needs to have priority, otherwise it will be evaluated last.
7052 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7054 struct ice_recp_grp_entry *last_chain_entry;
7057 /* Allocate the last recipe that will chain the outcomes of the
7058 * other recipes together
7060 status = ice_alloc_recipe(hw, &rid);
7064 buf[recps].recipe_indx = (u8)rid;
7065 buf[recps].content.rid = (u8)rid;
7066 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7067 /* the new entry created should also be part of rg_list to
7068 * make sure we have complete recipe
7070 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7071 sizeof(*last_chain_entry));
7072 if (!last_chain_entry) {
7073 status = ICE_ERR_NO_MEMORY;
7076 last_chain_entry->rid = rid;
7077 ice_memset(&buf[recps].content.lkup_indx, 0,
7078 sizeof(buf[recps].content.lkup_indx),
7080 /* All recipes use look-up index 0 to match switch ID. */
7081 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7082 buf[recps].content.mask[0] =
7083 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7084 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7085 buf[recps].content.lkup_indx[i] =
7086 ICE_AQ_RECIPE_LKUP_IGNORE;
7087 buf[recps].content.mask[i] = 0;
7091 /* update r_bitmap with the recp that is used for chaining */
7092 ice_set_bit(rid, rm->r_bitmap);
7093 /* this is the recipe that chains all the other recipes so it
7094 * should not have a chaining ID to indicate the same
7096 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7097 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7099 last_chain_entry->fv_idx[i] = entry->chain_idx;
7100 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7101 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7102 ice_set_bit(entry->rid, rm->r_bitmap);
7104 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7105 if (sizeof(buf[recps].recipe_bitmap) >=
7106 sizeof(rm->r_bitmap)) {
7107 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7108 sizeof(buf[recps].recipe_bitmap),
7109 ICE_NONDMA_TO_NONDMA);
7111 status = ICE_ERR_BAD_PTR;
7114 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7117 rm->root_rid = (u8)rid;
7119 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7123 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7124 ice_release_change_lock(hw);
7128 /* Every recipe that just got created add it to the recipe
7131 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7132 struct ice_switch_info *sw = hw->switch_info;
7133 bool is_root, idx_found = false;
7134 struct ice_sw_recipe *recp;
7135 u16 idx, buf_idx = 0;
7137 /* find buffer index for copying some data */
7138 for (idx = 0; idx < rm->n_grp_count; idx++)
7139 if (buf[idx].recipe_indx == entry->rid) {
7145 status = ICE_ERR_OUT_OF_RANGE;
7149 recp = &sw->recp_list[entry->rid];
7150 is_root = (rm->root_rid == entry->rid);
7151 recp->is_root = is_root;
7153 recp->root_rid = entry->rid;
7154 recp->big_recp = (is_root && rm->n_grp_count > 1);
7156 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7157 entry->r_group.n_val_pairs *
7158 sizeof(struct ice_fv_word),
7159 ICE_NONDMA_TO_NONDMA);
7161 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7162 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7164 /* Copy non-result fv index values and masks to recipe. This
7165 * call will also update the result recipe bitmask.
7167 ice_collect_result_idx(&buf[buf_idx], recp);
7169 /* for non-root recipes, also copy to the root, this allows
7170 * easier matching of a complete chained recipe
7173 ice_collect_result_idx(&buf[buf_idx],
7174 &sw->recp_list[rm->root_rid]);
7176 recp->n_ext_words = entry->r_group.n_val_pairs;
7177 recp->chain_idx = entry->chain_idx;
7178 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7179 recp->n_grp_count = rm->n_grp_count;
7180 recp->tun_type = rm->tun_type;
7181 recp->recp_created = true;
7195 * ice_create_recipe_group - creates recipe group
7196 * @hw: pointer to hardware structure
7197 * @rm: recipe management list entry
7198 * @lkup_exts: lookup elements
7200 static enum ice_status
7201 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7202 struct ice_prot_lkup_ext *lkup_exts)
7204 enum ice_status status;
7207 rm->n_grp_count = 0;
7209 /* Create recipes for words that are marked not done by packing them
7212 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7213 &rm->rg_list, &recp_count);
7215 rm->n_grp_count += recp_count;
7216 rm->n_ext_words = lkup_exts->n_val_words;
7217 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7218 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7219 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7220 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7227 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7228 * @hw: pointer to hardware structure
7229 * @lkups: lookup elements or match criteria for the advanced recipe, one
7230 * structure per protocol header
7231 * @lkups_cnt: number of protocols
7232 * @bm: bitmap of field vectors to consider
7233 * @fv_list: pointer to a list that holds the returned field vectors
7235 static enum ice_status
7236 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7237 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7239 enum ice_status status;
7246 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7248 return ICE_ERR_NO_MEMORY;
7250 for (i = 0; i < lkups_cnt; i++)
7251 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7252 status = ICE_ERR_CFG;
7256 /* Find field vectors that include all specified protocol types */
7257 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7260 ice_free(hw, prot_ids);
7265 * ice_tun_type_match_word - determine if tun type needs a match mask
7266 * @tun_type: tunnel type
7267 * @mask: mask to be used for the tunnel
7269 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7272 case ICE_SW_TUN_VXLAN_GPE:
7273 case ICE_SW_TUN_GENEVE:
7274 case ICE_SW_TUN_VXLAN:
7275 case ICE_SW_TUN_NVGRE:
7276 case ICE_SW_TUN_UDP:
7277 case ICE_ALL_TUNNELS:
7278 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7279 case ICE_NON_TUN_QINQ:
7280 case ICE_SW_TUN_PPPOE_QINQ:
7281 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7282 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7283 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7284 *mask = ICE_TUN_FLAG_MASK;
7287 case ICE_SW_TUN_GENEVE_VLAN:
7288 case ICE_SW_TUN_VXLAN_VLAN:
7289 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7299 * ice_add_special_words - Add words that are not protocols, such as metadata
7300 * @rinfo: other information regarding the rule e.g. priority and action info
7301 * @lkup_exts: lookup word structure
7303 static enum ice_status
7304 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7305 struct ice_prot_lkup_ext *lkup_exts)
7309 /* If this is a tunneled packet, then add recipe index to match the
7310 * tunnel bit in the packet metadata flags.
7312 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7313 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7314 u8 word = lkup_exts->n_val_words++;
7316 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7317 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7318 lkup_exts->field_mask[word] = mask;
7320 return ICE_ERR_MAX_LIMIT;
7327 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7328 * @hw: pointer to hardware structure
7329 * @rinfo: other information regarding the rule e.g. priority and action info
7330 * @bm: pointer to memory for returning the bitmap of field vectors
7333 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7336 enum ice_prof_type prof_type;
7338 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7340 switch (rinfo->tun_type) {
7342 case ICE_NON_TUN_QINQ:
7343 prof_type = ICE_PROF_NON_TUN;
7345 case ICE_ALL_TUNNELS:
7346 prof_type = ICE_PROF_TUN_ALL;
7348 case ICE_SW_TUN_VXLAN_GPE:
7349 case ICE_SW_TUN_GENEVE:
7350 case ICE_SW_TUN_GENEVE_VLAN:
7351 case ICE_SW_TUN_VXLAN:
7352 case ICE_SW_TUN_VXLAN_VLAN:
7353 case ICE_SW_TUN_UDP:
7354 case ICE_SW_TUN_GTP:
7355 prof_type = ICE_PROF_TUN_UDP;
7357 case ICE_SW_TUN_NVGRE:
7358 prof_type = ICE_PROF_TUN_GRE;
7360 case ICE_SW_TUN_PPPOE:
7361 case ICE_SW_TUN_PPPOE_QINQ:
7362 prof_type = ICE_PROF_TUN_PPPOE;
7364 case ICE_SW_TUN_PPPOE_PAY:
7365 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7366 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7368 case ICE_SW_TUN_PPPOE_IPV4:
7369 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7370 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7371 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7372 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7374 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7375 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7377 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7378 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7380 case ICE_SW_TUN_PPPOE_IPV6:
7381 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7382 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7383 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7384 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7386 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7387 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7389 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7390 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7392 case ICE_SW_TUN_PROFID_IPV6_ESP:
7393 case ICE_SW_TUN_IPV6_ESP:
7394 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7396 case ICE_SW_TUN_PROFID_IPV6_AH:
7397 case ICE_SW_TUN_IPV6_AH:
7398 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7400 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7401 case ICE_SW_TUN_IPV6_L2TPV3:
7402 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7404 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7405 case ICE_SW_TUN_IPV6_NAT_T:
7406 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7408 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7409 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7411 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7412 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7414 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7415 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7417 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7418 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7420 case ICE_SW_TUN_IPV4_NAT_T:
7421 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7423 case ICE_SW_TUN_IPV4_L2TPV3:
7424 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7426 case ICE_SW_TUN_IPV4_ESP:
7427 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7429 case ICE_SW_TUN_IPV4_AH:
7430 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7432 case ICE_SW_IPV4_TCP:
7433 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7435 case ICE_SW_IPV4_UDP:
7436 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7438 case ICE_SW_IPV6_TCP:
7439 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7441 case ICE_SW_IPV6_UDP:
7442 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7444 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7445 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7446 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7447 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7448 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7449 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7450 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7452 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7453 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7454 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7455 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7456 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7457 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7458 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7460 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7461 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7462 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7463 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7464 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7465 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7466 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7468 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7469 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7470 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7471 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7472 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7473 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7474 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7476 case ICE_SW_TUN_AND_NON_TUN:
7477 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7479 prof_type = ICE_PROF_ALL;
7483 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7487 * ice_is_prof_rule - determine if rule type is a profile rule
7488 * @type: the rule type
7490 * if the rule type is a profile rule, that means that there no field value
7491 * match required, in this case just a profile hit is required.
7493 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7496 case ICE_SW_TUN_PROFID_IPV6_ESP:
7497 case ICE_SW_TUN_PROFID_IPV6_AH:
7498 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7499 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7500 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7501 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7502 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7503 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7513 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7514 * @hw: pointer to hardware structure
7515 * @lkups: lookup elements or match criteria for the advanced recipe, one
7516 * structure per protocol header
7517 * @lkups_cnt: number of protocols
7518 * @rinfo: other information regarding the rule e.g. priority and action info
7519 * @rid: return the recipe ID of the recipe created
7521 static enum ice_status
7522 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7523 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7525 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7526 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7527 struct ice_prot_lkup_ext *lkup_exts;
7528 struct ice_recp_grp_entry *r_entry;
7529 struct ice_sw_fv_list_entry *fvit;
7530 struct ice_recp_grp_entry *r_tmp;
7531 struct ice_sw_fv_list_entry *tmp;
7532 enum ice_status status = ICE_SUCCESS;
7533 struct ice_sw_recipe *rm;
7536 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7537 return ICE_ERR_PARAM;
7539 lkup_exts = (struct ice_prot_lkup_ext *)
7540 ice_malloc(hw, sizeof(*lkup_exts));
7542 return ICE_ERR_NO_MEMORY;
7544 /* Determine the number of words to be matched and if it exceeds a
7545 * recipe's restrictions
7547 for (i = 0; i < lkups_cnt; i++) {
7550 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7551 status = ICE_ERR_CFG;
7552 goto err_free_lkup_exts;
7555 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7557 status = ICE_ERR_CFG;
7558 goto err_free_lkup_exts;
7562 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7564 status = ICE_ERR_NO_MEMORY;
7565 goto err_free_lkup_exts;
7568 /* Get field vectors that contain fields extracted from all the protocol
7569 * headers being programmed.
7571 INIT_LIST_HEAD(&rm->fv_list);
7572 INIT_LIST_HEAD(&rm->rg_list);
7574 /* Get bitmap of field vectors (profiles) that are compatible with the
7575 * rule request; only these will be searched in the subsequent call to
7578 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7580 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7584 /* Create any special protocol/offset pairs, such as looking at tunnel
7585 * bits by extracting metadata
7587 status = ice_add_special_words(rinfo, lkup_exts);
7589 goto err_free_lkup_exts;
7591 /* Group match words into recipes using preferred recipe grouping
7594 status = ice_create_recipe_group(hw, rm, lkup_exts);
7598 /* set the recipe priority if specified */
7599 rm->priority = (u8)rinfo->priority;
7601 /* Find offsets from the field vector. Pick the first one for all the
7604 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7608 /* An empty FV list means to use all the profiles returned in the
7611 if (LIST_EMPTY(&rm->fv_list)) {
7614 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7615 struct ice_sw_fv_list_entry *fvl;
7617 fvl = (struct ice_sw_fv_list_entry *)
7618 ice_malloc(hw, sizeof(*fvl));
7622 fvl->profile_id = j;
7623 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7627 /* get bitmap of all profiles the recipe will be associated with */
7628 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7629 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7631 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7632 ice_set_bit((u16)fvit->profile_id, profiles);
7635 /* Look for a recipe which matches our requested fv / mask list */
7636 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7637 if (*rid < ICE_MAX_NUM_RECIPES)
7638 /* Success if found a recipe that match the existing criteria */
7641 rm->tun_type = rinfo->tun_type;
7642 /* Recipe we need does not exist, add a recipe */
7643 status = ice_add_sw_recipe(hw, rm, profiles);
7647 /* Associate all the recipes created with all the profiles in the
7648 * common field vector.
7650 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7652 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7655 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7656 (u8 *)r_bitmap, NULL);
7660 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7661 ICE_MAX_NUM_RECIPES);
7662 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7666 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7669 ice_release_change_lock(hw);
7674 /* Update profile to recipe bitmap array */
7675 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7676 ICE_MAX_NUM_RECIPES);
7678 /* Update recipe to profile bitmap array */
7679 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7680 ice_set_bit((u16)fvit->profile_id,
7681 recipe_to_profile[j]);
7684 *rid = rm->root_rid;
7685 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7686 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7688 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7689 ice_recp_grp_entry, l_entry) {
7690 LIST_DEL(&r_entry->l_entry);
7691 ice_free(hw, r_entry);
7694 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7696 LIST_DEL(&fvit->list_entry);
7701 ice_free(hw, rm->root_buf);
7706 ice_free(hw, lkup_exts);
7712 * ice_find_dummy_packet - find dummy packet by tunnel type
7714 * @lkups: lookup elements or match criteria for the advanced recipe, one
7715 * structure per protocol header
7716 * @lkups_cnt: number of protocols
7717 * @tun_type: tunnel type from the match criteria
7718 * @pkt: dummy packet to fill according to filter match criteria
7719 * @pkt_len: packet length of dummy packet
7720 * @offsets: pointer to receive the pointer to the offsets for the packet
7723 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7724 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7726 const struct ice_dummy_pkt_offsets **offsets)
7728 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7732 for (i = 0; i < lkups_cnt; i++) {
7733 if (lkups[i].type == ICE_UDP_ILOS)
7735 else if (lkups[i].type == ICE_TCP_IL)
7737 else if (lkups[i].type == ICE_IPV6_OFOS)
7739 else if (lkups[i].type == ICE_VLAN_OFOS)
7741 else if (lkups[i].type == ICE_IPV4_OFOS &&
7742 lkups[i].h_u.ipv4_hdr.protocol ==
7743 ICE_IPV4_NVGRE_PROTO_ID &&
7744 lkups[i].m_u.ipv4_hdr.protocol ==
7747 else if (lkups[i].type == ICE_PPPOE &&
7748 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7749 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7750 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7753 else if (lkups[i].type == ICE_ETYPE_OL &&
7754 lkups[i].h_u.ethertype.ethtype_id ==
7755 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7756 lkups[i].m_u.ethertype.ethtype_id ==
7759 else if (lkups[i].type == ICE_IPV4_IL &&
7760 lkups[i].h_u.ipv4_hdr.protocol ==
7762 lkups[i].m_u.ipv4_hdr.protocol ==
7767 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7768 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7769 *pkt = dummy_qinq_ipv6_pkt;
7770 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7771 *offsets = dummy_qinq_ipv6_packet_offsets;
7773 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7774 tun_type == ICE_NON_TUN_QINQ) {
7775 *pkt = dummy_qinq_ipv4_pkt;
7776 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7777 *offsets = dummy_qinq_ipv4_packet_offsets;
7781 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7782 *pkt = dummy_qinq_pppoe_ipv6_packet;
7783 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7784 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7786 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7787 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7788 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7789 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7791 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7792 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7793 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7794 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7795 *offsets = dummy_qinq_pppoe_packet_offsets;
7799 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7800 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7801 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7802 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7804 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7805 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7806 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7807 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7811 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7812 *pkt = dummy_ipv4_esp_pkt;
7813 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7814 *offsets = dummy_ipv4_esp_packet_offsets;
7818 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7819 *pkt = dummy_ipv6_esp_pkt;
7820 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7821 *offsets = dummy_ipv6_esp_packet_offsets;
7825 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7826 *pkt = dummy_ipv4_ah_pkt;
7827 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7828 *offsets = dummy_ipv4_ah_packet_offsets;
7832 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7833 *pkt = dummy_ipv6_ah_pkt;
7834 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7835 *offsets = dummy_ipv6_ah_packet_offsets;
7839 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7840 *pkt = dummy_ipv4_nat_pkt;
7841 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7842 *offsets = dummy_ipv4_nat_packet_offsets;
7846 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7847 *pkt = dummy_ipv6_nat_pkt;
7848 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7849 *offsets = dummy_ipv6_nat_packet_offsets;
7853 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7854 *pkt = dummy_ipv4_l2tpv3_pkt;
7855 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7856 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7860 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7861 *pkt = dummy_ipv6_l2tpv3_pkt;
7862 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7863 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7867 if (tun_type == ICE_SW_TUN_GTP) {
7868 *pkt = dummy_udp_gtp_packet;
7869 *pkt_len = sizeof(dummy_udp_gtp_packet);
7870 *offsets = dummy_udp_gtp_packet_offsets;
7874 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7875 *pkt = dummy_pppoe_ipv6_packet;
7876 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7877 *offsets = dummy_pppoe_packet_offsets;
7879 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7880 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7881 *pkt = dummy_pppoe_ipv4_packet;
7882 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7883 *offsets = dummy_pppoe_packet_offsets;
7887 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7888 *pkt = dummy_pppoe_ipv4_packet;
7889 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7890 *offsets = dummy_pppoe_packet_ipv4_offsets;
7894 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7895 *pkt = dummy_pppoe_ipv4_tcp_packet;
7896 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7897 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7901 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7902 *pkt = dummy_pppoe_ipv4_udp_packet;
7903 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7904 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7908 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7909 *pkt = dummy_pppoe_ipv6_packet;
7910 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7911 *offsets = dummy_pppoe_packet_ipv6_offsets;
7915 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7916 *pkt = dummy_pppoe_ipv6_tcp_packet;
7917 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7918 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7922 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7923 *pkt = dummy_pppoe_ipv6_udp_packet;
7924 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7925 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7929 if (tun_type == ICE_SW_IPV4_TCP) {
7930 *pkt = dummy_tcp_packet;
7931 *pkt_len = sizeof(dummy_tcp_packet);
7932 *offsets = dummy_tcp_packet_offsets;
7936 if (tun_type == ICE_SW_IPV4_UDP) {
7937 *pkt = dummy_udp_packet;
7938 *pkt_len = sizeof(dummy_udp_packet);
7939 *offsets = dummy_udp_packet_offsets;
7943 if (tun_type == ICE_SW_IPV6_TCP) {
7944 *pkt = dummy_tcp_ipv6_packet;
7945 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7946 *offsets = dummy_tcp_ipv6_packet_offsets;
7950 if (tun_type == ICE_SW_IPV6_UDP) {
7951 *pkt = dummy_udp_ipv6_packet;
7952 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7953 *offsets = dummy_udp_ipv6_packet_offsets;
7957 /* Support GTP tunnel + L3 */
7958 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7959 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7960 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7961 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7964 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7965 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7966 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7967 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7970 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7971 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7972 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7973 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7976 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7977 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7978 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7979 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7983 if (tun_type == ICE_ALL_TUNNELS) {
7984 *pkt = dummy_gre_udp_packet;
7985 *pkt_len = sizeof(dummy_gre_udp_packet);
7986 *offsets = dummy_gre_udp_packet_offsets;
7990 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7992 *pkt = dummy_gre_tcp_packet;
7993 *pkt_len = sizeof(dummy_gre_tcp_packet);
7994 *offsets = dummy_gre_tcp_packet_offsets;
7998 *pkt = dummy_gre_udp_packet;
7999 *pkt_len = sizeof(dummy_gre_udp_packet);
8000 *offsets = dummy_gre_udp_packet_offsets;
8004 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8005 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8006 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8007 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8009 *pkt = dummy_udp_tun_tcp_packet;
8010 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8011 *offsets = dummy_udp_tun_tcp_packet_offsets;
8015 *pkt = dummy_udp_tun_udp_packet;
8016 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8017 *offsets = dummy_udp_tun_udp_packet_offsets;
8023 *pkt = dummy_vlan_udp_packet;
8024 *pkt_len = sizeof(dummy_vlan_udp_packet);
8025 *offsets = dummy_vlan_udp_packet_offsets;
8028 *pkt = dummy_udp_packet;
8029 *pkt_len = sizeof(dummy_udp_packet);
8030 *offsets = dummy_udp_packet_offsets;
8032 } else if (udp && ipv6) {
8034 *pkt = dummy_vlan_udp_ipv6_packet;
8035 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8036 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8039 *pkt = dummy_udp_ipv6_packet;
8040 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8041 *offsets = dummy_udp_ipv6_packet_offsets;
8043 } else if ((tcp && ipv6) || ipv6) {
8045 *pkt = dummy_vlan_tcp_ipv6_packet;
8046 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8047 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8050 *pkt = dummy_tcp_ipv6_packet;
8051 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8052 *offsets = dummy_tcp_ipv6_packet_offsets;
8057 *pkt = dummy_vlan_tcp_packet;
8058 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8059 *offsets = dummy_vlan_tcp_packet_offsets;
8061 *pkt = dummy_tcp_packet;
8062 *pkt_len = sizeof(dummy_tcp_packet);
8063 *offsets = dummy_tcp_packet_offsets;
8068 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8070 * @lkups: lookup elements or match criteria for the advanced recipe, one
8071 * structure per protocol header
8072 * @lkups_cnt: number of protocols
8073 * @s_rule: stores rule information from the match criteria
8074 * @dummy_pkt: dummy packet to fill according to filter match criteria
8075 * @pkt_len: packet length of dummy packet
8076 * @offsets: offset info for the dummy packet
8078 static enum ice_status
8079 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8080 struct ice_aqc_sw_rules_elem *s_rule,
8081 const u8 *dummy_pkt, u16 pkt_len,
8082 const struct ice_dummy_pkt_offsets *offsets)
8087 /* Start with a packet with a pre-defined/dummy content. Then, fill
8088 * in the header values to be looked up or matched.
8090 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8092 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8094 for (i = 0; i < lkups_cnt; i++) {
8095 enum ice_protocol_type type;
8096 u16 offset = 0, len = 0, j;
8099 /* find the start of this layer; it should be found since this
8100 * was already checked when search for the dummy packet
8102 type = lkups[i].type;
8103 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8104 if (type == offsets[j].type) {
8105 offset = offsets[j].offset;
8110 /* this should never happen in a correct calling sequence */
8112 return ICE_ERR_PARAM;
8114 switch (lkups[i].type) {
8117 len = sizeof(struct ice_ether_hdr);
8120 len = sizeof(struct ice_ethtype_hdr);
8124 len = sizeof(struct ice_vlan_hdr);
8128 len = sizeof(struct ice_ipv4_hdr);
8132 len = sizeof(struct ice_ipv6_hdr);
8137 len = sizeof(struct ice_l4_hdr);
8140 len = sizeof(struct ice_sctp_hdr);
8143 len = sizeof(struct ice_nvgre);
8148 len = sizeof(struct ice_udp_tnl_hdr);
8152 case ICE_GTP_NO_PAY:
8153 len = sizeof(struct ice_udp_gtp_hdr);
8156 len = sizeof(struct ice_pppoe_hdr);
8159 len = sizeof(struct ice_esp_hdr);
8162 len = sizeof(struct ice_nat_t_hdr);
8165 len = sizeof(struct ice_ah_hdr);
8168 len = sizeof(struct ice_l2tpv3_sess_hdr);
8171 return ICE_ERR_PARAM;
8174 /* the length should be a word multiple */
8175 if (len % ICE_BYTES_PER_WORD)
8178 /* We have the offset to the header start, the length, the
8179 * caller's header values and mask. Use this information to
8180 * copy the data into the dummy packet appropriately based on
8181 * the mask. Note that we need to only write the bits as
8182 * indicated by the mask to make sure we don't improperly write
8183 * over any significant packet data.
8185 for (j = 0; j < len / sizeof(u16); j++)
8186 if (((u16 *)&lkups[i].m_u)[j])
8187 ((u16 *)(pkt + offset))[j] =
8188 (((u16 *)(pkt + offset))[j] &
8189 ~((u16 *)&lkups[i].m_u)[j]) |
8190 (((u16 *)&lkups[i].h_u)[j] &
8191 ((u16 *)&lkups[i].m_u)[j]);
8194 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8200 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8201 * @hw: pointer to the hardware structure
8202 * @tun_type: tunnel type
8203 * @pkt: dummy packet to fill in
8204 * @offsets: offset info for the dummy packet
8206 static enum ice_status
8207 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8208 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8213 case ICE_SW_TUN_AND_NON_TUN:
8214 case ICE_SW_TUN_VXLAN_GPE:
8215 case ICE_SW_TUN_VXLAN:
8216 case ICE_SW_TUN_VXLAN_VLAN:
8217 case ICE_SW_TUN_UDP:
8218 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8222 case ICE_SW_TUN_GENEVE:
8223 case ICE_SW_TUN_GENEVE_VLAN:
8224 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8229 /* Nothing needs to be done for this tunnel type */
8233 /* Find the outer UDP protocol header and insert the port number */
8234 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8235 if (offsets[i].type == ICE_UDP_OF) {
8236 struct ice_l4_hdr *hdr;
8239 offset = offsets[i].offset;
8240 hdr = (struct ice_l4_hdr *)&pkt[offset];
8241 hdr->dst_port = CPU_TO_BE16(open_port);
8251 * ice_find_adv_rule_entry - Search a rule entry
8252 * @hw: pointer to the hardware structure
8253 * @lkups: lookup elements or match criteria for the advanced recipe, one
8254 * structure per protocol header
8255 * @lkups_cnt: number of protocols
8256 * @recp_id: recipe ID for which we are finding the rule
8257 * @rinfo: other information regarding the rule e.g. priority and action info
8259 * Helper function to search for a given advance rule entry
8260 * Returns pointer to entry storing the rule if found
8262 static struct ice_adv_fltr_mgmt_list_entry *
8263 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8264 u16 lkups_cnt, u16 recp_id,
8265 struct ice_adv_rule_info *rinfo)
8267 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8268 struct ice_switch_info *sw = hw->switch_info;
8271 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8272 ice_adv_fltr_mgmt_list_entry, list_entry) {
8273 bool lkups_matched = true;
8275 if (lkups_cnt != list_itr->lkups_cnt)
8277 for (i = 0; i < list_itr->lkups_cnt; i++)
8278 if (memcmp(&list_itr->lkups[i], &lkups[i],
8280 lkups_matched = false;
8283 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8284 rinfo->tun_type == list_itr->rule_info.tun_type &&
8292 * ice_adv_add_update_vsi_list
8293 * @hw: pointer to the hardware structure
8294 * @m_entry: pointer to current adv filter management list entry
8295 * @cur_fltr: filter information from the book keeping entry
8296 * @new_fltr: filter information with the new VSI to be added
8298 * Call AQ command to add or update previously created VSI list with new VSI.
8300 * Helper function to do book keeping associated with adding filter information
8301 * The algorithm to do the booking keeping is described below :
8302 * When a VSI needs to subscribe to a given advanced filter
8303 * if only one VSI has been added till now
8304 * Allocate a new VSI list and add two VSIs
8305 * to this list using switch rule command
8306 * Update the previously created switch rule with the
8307 * newly created VSI list ID
8308 * if a VSI list was previously created
8309 * Add the new VSI to the previously created VSI list set
8310 * using the update switch rule command
8312 static enum ice_status
8313 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8314 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8315 struct ice_adv_rule_info *cur_fltr,
8316 struct ice_adv_rule_info *new_fltr)
8318 enum ice_status status;
8319 u16 vsi_list_id = 0;
8321 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8322 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8323 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8324 return ICE_ERR_NOT_IMPL;
8326 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8327 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8328 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8329 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8330 return ICE_ERR_NOT_IMPL;
8332 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8333 /* Only one entry existed in the mapping and it was not already
8334 * a part of a VSI list. So, create a VSI list with the old and
8337 struct ice_fltr_info tmp_fltr;
8338 u16 vsi_handle_arr[2];
8340 /* A rule already exists with the new VSI being added */
8341 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8342 new_fltr->sw_act.fwd_id.hw_vsi_id)
8343 return ICE_ERR_ALREADY_EXISTS;
8345 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8346 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8347 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8353 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8354 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8355 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8356 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8357 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8358 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8360 /* Update the previous switch rule of "forward to VSI" to
8363 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8367 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8368 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8369 m_entry->vsi_list_info =
8370 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8373 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8375 if (!m_entry->vsi_list_info)
8378 /* A rule already exists with the new VSI being added */
8379 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8382 /* Update the previously created VSI list set with
8383 * the new VSI ID passed in
8385 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8387 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8389 ice_aqc_opc_update_sw_rules,
8391 /* update VSI list mapping info with new VSI ID */
8393 ice_set_bit(vsi_handle,
8394 m_entry->vsi_list_info->vsi_map);
8397 m_entry->vsi_count++;
8402 * ice_add_adv_rule - helper function to create an advanced switch rule
8403 * @hw: pointer to the hardware structure
8404 * @lkups: information on the words that needs to be looked up. All words
8405 * together makes one recipe
8406 * @lkups_cnt: num of entries in the lkups array
8407 * @rinfo: other information related to the rule that needs to be programmed
8408 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8409 * ignored is case of error.
8411 * This function can program only 1 rule at a time. The lkups is used to
8412 * describe the all the words that forms the "lookup" portion of the recipe.
8413 * These words can span multiple protocols. Callers to this function need to
8414 * pass in a list of protocol headers with lookup information along and mask
8415 * that determines which words are valid from the given protocol header.
8416 * rinfo describes other information related to this rule such as forwarding
8417 * IDs, priority of this rule, etc.
8420 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8421 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8422 struct ice_rule_query_data *added_entry)
8424 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8425 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8426 const struct ice_dummy_pkt_offsets *pkt_offsets;
8427 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8428 struct LIST_HEAD_TYPE *rule_head;
8429 struct ice_switch_info *sw;
8430 enum ice_status status;
8431 const u8 *pkt = NULL;
8437 /* Initialize profile to result index bitmap */
8438 if (!hw->switch_info->prof_res_bm_init) {
8439 hw->switch_info->prof_res_bm_init = 1;
8440 ice_init_prof_result_bm(hw);
8443 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8444 if (!prof_rule && !lkups_cnt)
8445 return ICE_ERR_PARAM;
8447 /* get # of words we need to match */
8449 for (i = 0; i < lkups_cnt; i++) {
8452 ptr = (u16 *)&lkups[i].m_u;
8453 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8459 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8460 return ICE_ERR_PARAM;
8462 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8463 return ICE_ERR_PARAM;
8466 /* make sure that we can locate a dummy packet */
8467 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8470 status = ICE_ERR_PARAM;
8471 goto err_ice_add_adv_rule;
8474 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8475 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8476 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8477 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8480 vsi_handle = rinfo->sw_act.vsi_handle;
8481 if (!ice_is_vsi_valid(hw, vsi_handle))
8482 return ICE_ERR_PARAM;
8484 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8485 rinfo->sw_act.fwd_id.hw_vsi_id =
8486 ice_get_hw_vsi_num(hw, vsi_handle);
8487 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8488 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8490 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8493 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8495 /* we have to add VSI to VSI_LIST and increment vsi_count.
8496 * Also Update VSI list so that we can change forwarding rule
8497 * if the rule already exists, we will check if it exists with
8498 * same vsi_id, if not then add it to the VSI list if it already
8499 * exists if not then create a VSI list and add the existing VSI
8500 * ID and the new VSI ID to the list
8501 * We will add that VSI to the list
8503 status = ice_adv_add_update_vsi_list(hw, m_entry,
8504 &m_entry->rule_info,
8507 added_entry->rid = rid;
8508 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8509 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8513 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8514 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8516 return ICE_ERR_NO_MEMORY;
8517 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8518 switch (rinfo->sw_act.fltr_act) {
8519 case ICE_FWD_TO_VSI:
8520 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8521 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8522 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8525 act |= ICE_SINGLE_ACT_TO_Q;
8526 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8527 ICE_SINGLE_ACT_Q_INDEX_M;
8529 case ICE_FWD_TO_QGRP:
8530 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8531 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8532 act |= ICE_SINGLE_ACT_TO_Q;
8533 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8534 ICE_SINGLE_ACT_Q_INDEX_M;
8535 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8536 ICE_SINGLE_ACT_Q_REGION_M;
8538 case ICE_DROP_PACKET:
8539 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8540 ICE_SINGLE_ACT_VALID_BIT;
8543 status = ICE_ERR_CFG;
8544 goto err_ice_add_adv_rule;
8547 /* set the rule LOOKUP type based on caller specified 'RX'
8548 * instead of hardcoding it to be either LOOKUP_TX/RX
8550 * for 'RX' set the source to be the port number
8551 * for 'TX' set the source to be the source HW VSI number (determined
8555 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8556 s_rule->pdata.lkup_tx_rx.src =
8557 CPU_TO_LE16(hw->port_info->lport);
8559 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8560 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8563 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8564 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8566 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8567 pkt_len, pkt_offsets);
8569 goto err_ice_add_adv_rule;
8571 if (rinfo->tun_type != ICE_NON_TUN &&
8572 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8573 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8574 s_rule->pdata.lkup_tx_rx.hdr,
8577 goto err_ice_add_adv_rule;
8580 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8581 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8584 goto err_ice_add_adv_rule;
8585 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8586 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8588 status = ICE_ERR_NO_MEMORY;
8589 goto err_ice_add_adv_rule;
8592 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8593 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8594 ICE_NONDMA_TO_NONDMA);
8595 if (!adv_fltr->lkups && !prof_rule) {
8596 status = ICE_ERR_NO_MEMORY;
8597 goto err_ice_add_adv_rule;
8600 adv_fltr->lkups_cnt = lkups_cnt;
8601 adv_fltr->rule_info = *rinfo;
8602 adv_fltr->rule_info.fltr_rule_id =
8603 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8604 sw = hw->switch_info;
8605 sw->recp_list[rid].adv_rule = true;
8606 rule_head = &sw->recp_list[rid].filt_rules;
8608 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8609 adv_fltr->vsi_count = 1;
8611 /* Add rule entry to book keeping list */
8612 LIST_ADD(&adv_fltr->list_entry, rule_head);
8614 added_entry->rid = rid;
8615 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8616 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8618 err_ice_add_adv_rule:
8619 if (status && adv_fltr) {
8620 ice_free(hw, adv_fltr->lkups);
8621 ice_free(hw, adv_fltr);
8624 ice_free(hw, s_rule);
8630 * ice_adv_rem_update_vsi_list
8631 * @hw: pointer to the hardware structure
8632 * @vsi_handle: VSI handle of the VSI to remove
8633 * @fm_list: filter management entry for which the VSI list management needs to
8636 static enum ice_status
8637 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8638 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8640 struct ice_vsi_list_map_info *vsi_list_info;
8641 enum ice_sw_lkup_type lkup_type;
8642 enum ice_status status;
8645 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8646 fm_list->vsi_count == 0)
8647 return ICE_ERR_PARAM;
8649 /* A rule with the VSI being removed does not exist */
8650 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8651 return ICE_ERR_DOES_NOT_EXIST;
8653 lkup_type = ICE_SW_LKUP_LAST;
8654 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8655 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8656 ice_aqc_opc_update_sw_rules,
8661 fm_list->vsi_count--;
8662 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8663 vsi_list_info = fm_list->vsi_list_info;
8664 if (fm_list->vsi_count == 1) {
8665 struct ice_fltr_info tmp_fltr;
8668 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8670 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8671 return ICE_ERR_OUT_OF_RANGE;
8673 /* Make sure VSI list is empty before removing it below */
8674 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8676 ice_aqc_opc_update_sw_rules,
8681 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8682 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8683 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8684 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8685 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8686 tmp_fltr.fwd_id.hw_vsi_id =
8687 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8688 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8689 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8690 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8692 /* Update the previous switch rule of "MAC forward to VSI" to
8693 * "MAC fwd to VSI list"
8695 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8697 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8698 tmp_fltr.fwd_id.hw_vsi_id, status);
8701 fm_list->vsi_list_info->ref_cnt--;
8703 /* Remove the VSI list since it is no longer used */
8704 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8706 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8707 vsi_list_id, status);
8711 LIST_DEL(&vsi_list_info->list_entry);
8712 ice_free(hw, vsi_list_info);
8713 fm_list->vsi_list_info = NULL;
8720 * ice_rem_adv_rule - removes existing advanced switch rule
8721 * @hw: pointer to the hardware structure
8722 * @lkups: information on the words that needs to be looked up. All words
8723 * together makes one recipe
8724 * @lkups_cnt: num of entries in the lkups array
8725 * @rinfo: Its the pointer to the rule information for the rule
8727 * This function can be used to remove 1 rule at a time. The lkups is
8728 * used to describe all the words that forms the "lookup" portion of the
8729 * rule. These words can span multiple protocols. Callers to this function
8730 * need to pass in a list of protocol headers with lookup information along
8731 * and mask that determines which words are valid from the given protocol
8732 * header. rinfo describes other information related to this rule such as
8733 * forwarding IDs, priority of this rule, etc.
8736 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8737 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8739 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8740 struct ice_prot_lkup_ext lkup_exts;
8741 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8742 enum ice_status status = ICE_SUCCESS;
8743 bool remove_rule = false;
8744 u16 i, rid, vsi_handle;
8746 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8747 for (i = 0; i < lkups_cnt; i++) {
8750 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8753 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8758 /* Create any special protocol/offset pairs, such as looking at tunnel
8759 * bits by extracting metadata
8761 status = ice_add_special_words(rinfo, &lkup_exts);
8765 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8766 /* If did not find a recipe that match the existing criteria */
8767 if (rid == ICE_MAX_NUM_RECIPES)
8768 return ICE_ERR_PARAM;
8770 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8771 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8772 /* the rule is already removed */
8775 ice_acquire_lock(rule_lock);
8776 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8778 } else if (list_elem->vsi_count > 1) {
8779 remove_rule = false;
8780 vsi_handle = rinfo->sw_act.vsi_handle;
8781 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8783 vsi_handle = rinfo->sw_act.vsi_handle;
8784 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8786 ice_release_lock(rule_lock);
8789 if (list_elem->vsi_count == 0)
8792 ice_release_lock(rule_lock);
8794 struct ice_aqc_sw_rules_elem *s_rule;
8797 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8798 s_rule = (struct ice_aqc_sw_rules_elem *)
8799 ice_malloc(hw, rule_buf_sz);
8801 return ICE_ERR_NO_MEMORY;
8802 s_rule->pdata.lkup_tx_rx.act = 0;
8803 s_rule->pdata.lkup_tx_rx.index =
8804 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8805 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8806 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8808 ice_aqc_opc_remove_sw_rules, NULL);
8809 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8810 struct ice_switch_info *sw = hw->switch_info;
8812 ice_acquire_lock(rule_lock);
8813 LIST_DEL(&list_elem->list_entry);
8814 ice_free(hw, list_elem->lkups);
8815 ice_free(hw, list_elem);
8816 ice_release_lock(rule_lock);
8817 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8818 sw->recp_list[rid].adv_rule = false;
8820 ice_free(hw, s_rule);
8826 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8827 * @hw: pointer to the hardware structure
8828 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8830 * This function is used to remove 1 rule at a time. The removal is based on
8831 * the remove_entry parameter. This function will remove rule for a given
8832 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8835 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8836 struct ice_rule_query_data *remove_entry)
8838 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8839 struct LIST_HEAD_TYPE *list_head;
8840 struct ice_adv_rule_info rinfo;
8841 struct ice_switch_info *sw;
8843 sw = hw->switch_info;
8844 if (!sw->recp_list[remove_entry->rid].recp_created)
8845 return ICE_ERR_PARAM;
8846 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8847 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8849 if (list_itr->rule_info.fltr_rule_id ==
8850 remove_entry->rule_id) {
8851 rinfo = list_itr->rule_info;
8852 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8853 return ice_rem_adv_rule(hw, list_itr->lkups,
8854 list_itr->lkups_cnt, &rinfo);
8857 /* either list is empty or unable to find rule */
8858 return ICE_ERR_DOES_NOT_EXIST;
8862 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
8864 * @hw: pointer to the hardware structure
8865 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8867 * This function is used to remove all the rules for a given VSI and as soon
8868 * as removing a rule fails, it will return immediately with the error code,
8869 * else it will return ICE_SUCCESS
8871 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8873 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8874 struct ice_vsi_list_map_info *map_info;
8875 struct LIST_HEAD_TYPE *list_head;
8876 struct ice_adv_rule_info rinfo;
8877 struct ice_switch_info *sw;
8878 enum ice_status status;
8881 sw = hw->switch_info;
8882 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8883 if (!sw->recp_list[rid].recp_created)
8885 if (!sw->recp_list[rid].adv_rule)
8888 list_head = &sw->recp_list[rid].filt_rules;
8889 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8890 ice_adv_fltr_mgmt_list_entry,
8892 rinfo = list_itr->rule_info;
8894 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8895 map_info = list_itr->vsi_list_info;
8899 if (!ice_is_bit_set(map_info->vsi_map,
8902 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8906 rinfo.sw_act.vsi_handle = vsi_handle;
8907 status = ice_rem_adv_rule(hw, list_itr->lkups,
8908 list_itr->lkups_cnt, &rinfo);
8918 * ice_replay_fltr - Replay all the filters stored by a specific list head
8919 * @hw: pointer to the hardware structure
8920 * @list_head: list for which filters needs to be replayed
8921 * @recp_id: Recipe ID for which rules need to be replayed
8923 static enum ice_status
8924 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8926 struct ice_fltr_mgmt_list_entry *itr;
8927 enum ice_status status = ICE_SUCCESS;
8928 struct ice_sw_recipe *recp_list;
8929 u8 lport = hw->port_info->lport;
8930 struct LIST_HEAD_TYPE l_head;
8932 if (LIST_EMPTY(list_head))
8935 recp_list = &hw->switch_info->recp_list[recp_id];
8936 /* Move entries from the given list_head to a temporary l_head so that
8937 * they can be replayed. Otherwise when trying to re-add the same
8938 * filter, the function will return already exists
8940 LIST_REPLACE_INIT(list_head, &l_head);
8942 /* Mark the given list_head empty by reinitializing it so filters
8943 * could be added again by *handler
8945 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8947 struct ice_fltr_list_entry f_entry;
8950 f_entry.fltr_info = itr->fltr_info;
8951 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8952 status = ice_add_rule_internal(hw, recp_list, lport,
8954 if (status != ICE_SUCCESS)
8959 /* Add a filter per VSI separately */
8960 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8962 if (!ice_is_vsi_valid(hw, vsi_handle))
8965 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8966 f_entry.fltr_info.vsi_handle = vsi_handle;
8967 f_entry.fltr_info.fwd_id.hw_vsi_id =
8968 ice_get_hw_vsi_num(hw, vsi_handle);
8969 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8970 if (recp_id == ICE_SW_LKUP_VLAN)
8971 status = ice_add_vlan_internal(hw, recp_list,
8974 status = ice_add_rule_internal(hw, recp_list,
8977 if (status != ICE_SUCCESS)
8982 /* Clear the filter management list */
8983 ice_rem_sw_rule_info(hw, &l_head);
8988 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8989 * @hw: pointer to the hardware structure
8991 * NOTE: This function does not clean up partially added filters on error.
8992 * It is up to caller of the function to issue a reset or fail early.
8994 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8996 struct ice_switch_info *sw = hw->switch_info;
8997 enum ice_status status = ICE_SUCCESS;
9000 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9001 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9003 status = ice_replay_fltr(hw, i, head);
9004 if (status != ICE_SUCCESS)
9011 * ice_replay_vsi_fltr - Replay filters for requested VSI
9012 * @hw: pointer to the hardware structure
9013 * @pi: pointer to port information structure
9014 * @sw: pointer to switch info struct for which function replays filters
9015 * @vsi_handle: driver VSI handle
9016 * @recp_id: Recipe ID for which rules need to be replayed
9017 * @list_head: list for which filters need to be replayed
9019 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9020 * It is required to pass valid VSI handle.
9022 static enum ice_status
9023 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9024 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9025 struct LIST_HEAD_TYPE *list_head)
9027 struct ice_fltr_mgmt_list_entry *itr;
9028 enum ice_status status = ICE_SUCCESS;
9029 struct ice_sw_recipe *recp_list;
9032 if (LIST_EMPTY(list_head))
9034 recp_list = &sw->recp_list[recp_id];
9035 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9037 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9039 struct ice_fltr_list_entry f_entry;
9041 f_entry.fltr_info = itr->fltr_info;
9042 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9043 itr->fltr_info.vsi_handle == vsi_handle) {
9044 /* update the src in case it is VSI num */
9045 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9046 f_entry.fltr_info.src = hw_vsi_id;
9047 status = ice_add_rule_internal(hw, recp_list,
9050 if (status != ICE_SUCCESS)
9054 if (!itr->vsi_list_info ||
9055 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9057 /* Clearing it so that the logic can add it back */
9058 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9059 f_entry.fltr_info.vsi_handle = vsi_handle;
9060 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9061 /* update the src in case it is VSI num */
9062 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9063 f_entry.fltr_info.src = hw_vsi_id;
9064 if (recp_id == ICE_SW_LKUP_VLAN)
9065 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9067 status = ice_add_rule_internal(hw, recp_list,
9070 if (status != ICE_SUCCESS)
9078 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9079 * @hw: pointer to the hardware structure
9080 * @vsi_handle: driver VSI handle
9081 * @list_head: list for which filters need to be replayed
9083 * Replay the advanced rule for the given VSI.
9085 static enum ice_status
9086 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9087 struct LIST_HEAD_TYPE *list_head)
9089 struct ice_rule_query_data added_entry = { 0 };
9090 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9091 enum ice_status status = ICE_SUCCESS;
9093 if (LIST_EMPTY(list_head))
9095 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9097 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9098 u16 lk_cnt = adv_fltr->lkups_cnt;
9100 if (vsi_handle != rinfo->sw_act.vsi_handle)
9102 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9111 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9112 * @hw: pointer to the hardware structure
9113 * @pi: pointer to port information structure
9114 * @vsi_handle: driver VSI handle
9116 * Replays filters for requested VSI via vsi_handle.
9119 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9122 struct ice_switch_info *sw = hw->switch_info;
9123 enum ice_status status;
9126 /* Update the recipes that were created */
9127 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9128 struct LIST_HEAD_TYPE *head;
9130 head = &sw->recp_list[i].filt_replay_rules;
9131 if (!sw->recp_list[i].adv_rule)
9132 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9135 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9136 if (status != ICE_SUCCESS)
9144 * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9145 * @hw: pointer to the HW struct
9146 * @sw: pointer to switch info struct for which function removes filters
9148 * Deletes the filter replay rules for given switch
9150 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9157 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9158 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9159 struct LIST_HEAD_TYPE *l_head;
9161 l_head = &sw->recp_list[i].filt_replay_rules;
9162 if (!sw->recp_list[i].adv_rule)
9163 ice_rem_sw_rule_info(hw, l_head);
9165 ice_rem_adv_rule_info(hw, l_head);
9171 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9172 * @hw: pointer to the HW struct
9174 * Deletes the filter replay rules.
9176 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9178 ice_rm_sw_replay_rule_info(hw, hw->switch_info);