1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_ETH_P_8021Q 0x8100
19 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
20 * struct to configure any switch filter rules.
21 * {DA (6 bytes), SA(6 bytes),
22 * Ether type (2 bytes for header without VLAN tag) OR
23 * VLAN tag (4 bytes for header with VLAN tag) }
25 * Word on Hardcoded values
26 * byte 0 = 0x2: to identify it as locally administered DA MAC
27 * byte 6 = 0x2: to identify it as locally administered SA MAC
28 * byte 12 = 0x81 & byte 13 = 0x00:
29 * In case of VLAN filter first two bytes defines ether type (0x8100)
30 * and remaining two bytes are placeholder for programming a given VLAN ID
31 * In case of Ether type filter it is treated as header without VLAN tag
32 * and byte 12 and 13 is used to program a given Ether type instead
34 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
38 struct ice_dummy_pkt_offsets {
39 enum ice_protocol_type type;
40 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46 { ICE_IPV4_OFOS, 14 },
51 { ICE_PROTOCOL_LAST, 0 },
54 static const u8 dummy_gre_tcp_packet[] = {
55 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
56 0x00, 0x00, 0x00, 0x00,
57 0x00, 0x00, 0x00, 0x00,
59 0x08, 0x00, /* ICE_ETYPE_OL 12 */
61 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x2F, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x00, 0x00, 0x00,
67 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
68 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
71 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00,
75 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x06, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x50, 0x02, 0x20, 0x00,
85 0x00, 0x00, 0x00, 0x00
88 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91 { ICE_IPV4_OFOS, 14 },
96 { ICE_PROTOCOL_LAST, 0 },
99 static const u8 dummy_gre_udp_packet[] = {
100 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
104 0x08, 0x00, /* ICE_ETYPE_OL 12 */
106 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x2F, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
112 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
113 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
120 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x11, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
127 0x00, 0x08, 0x00, 0x00,
130 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
132 { ICE_ETYPE_OL, 12 },
133 { ICE_IPV4_OFOS, 14 },
137 { ICE_VXLAN_GPE, 42 },
141 { ICE_PROTOCOL_LAST, 0 },
144 static const u8 dummy_udp_tun_tcp_packet[] = {
145 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
146 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00,
149 0x08, 0x00, /* ICE_ETYPE_OL 12 */
151 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
152 0x00, 0x01, 0x00, 0x00,
153 0x40, 0x11, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
158 0x00, 0x46, 0x00, 0x00,
160 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
161 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
164 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
168 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x06, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x50, 0x02, 0x20, 0x00,
178 0x00, 0x00, 0x00, 0x00
181 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
183 { ICE_ETYPE_OL, 12 },
184 { ICE_IPV4_OFOS, 14 },
188 { ICE_VXLAN_GPE, 42 },
191 { ICE_UDP_ILOS, 84 },
192 { ICE_PROTOCOL_LAST, 0 },
195 static const u8 dummy_udp_tun_udp_packet[] = {
196 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
200 0x08, 0x00, /* ICE_ETYPE_OL 12 */
202 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
203 0x00, 0x01, 0x00, 0x00,
204 0x00, 0x11, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
209 0x00, 0x3a, 0x00, 0x00,
211 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
212 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
215 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00,
219 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
226 0x00, 0x08, 0x00, 0x00,
229 /* offset info for MAC + IPv4 + UDP dummy packet */
230 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
232 { ICE_ETYPE_OL, 12 },
233 { ICE_IPV4_OFOS, 14 },
234 { ICE_UDP_ILOS, 34 },
235 { ICE_PROTOCOL_LAST, 0 },
238 /* Dummy packet for MAC + IPv4 + UDP */
239 static const u8 dummy_udp_packet[] = {
240 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
241 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00,
244 0x08, 0x00, /* ICE_ETYPE_OL 12 */
246 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
247 0x00, 0x01, 0x00, 0x00,
248 0x00, 0x11, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
253 0x00, 0x08, 0x00, 0x00,
255 0x00, 0x00, /* 2 bytes for 4 byte alignment */
258 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
259 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
261 { ICE_ETYPE_OL, 12 },
262 { ICE_VLAN_OFOS, 14 },
263 { ICE_IPV4_OFOS, 18 },
264 { ICE_UDP_ILOS, 38 },
265 { ICE_PROTOCOL_LAST, 0 },
268 /* C-tag (801.1Q), IPv4:UDP dummy packet */
269 static const u8 dummy_vlan_udp_packet[] = {
270 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x81, 0x00, /* ICE_ETYPE_OL 12 */
276 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
278 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
279 0x00, 0x01, 0x00, 0x00,
280 0x00, 0x11, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
285 0x00, 0x08, 0x00, 0x00,
287 0x00, 0x00, /* 2 bytes for 4 byte alignment */
290 /* offset info for MAC + IPv4 + TCP dummy packet */
291 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
293 { ICE_ETYPE_OL, 12 },
294 { ICE_IPV4_OFOS, 14 },
296 { ICE_PROTOCOL_LAST, 0 },
299 /* Dummy packet for MAC + IPv4 + TCP */
300 static const u8 dummy_tcp_packet[] = {
301 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x08, 0x00, /* ICE_ETYPE_OL 12 */
307 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
308 0x00, 0x01, 0x00, 0x00,
309 0x00, 0x06, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
314 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
316 0x50, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
319 0x00, 0x00, /* 2 bytes for 4 byte alignment */
322 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
323 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
325 { ICE_ETYPE_OL, 12 },
326 { ICE_VLAN_OFOS, 14 },
327 { ICE_IPV4_OFOS, 18 },
329 { ICE_PROTOCOL_LAST, 0 },
332 /* C-tag (801.1Q), IPv4:TCP dummy packet */
333 static const u8 dummy_vlan_tcp_packet[] = {
334 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
338 0x81, 0x00, /* ICE_ETYPE_OL 12 */
340 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
342 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
343 0x00, 0x01, 0x00, 0x00,
344 0x00, 0x06, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
349 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
351 0x50, 0x00, 0x00, 0x00,
352 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, /* 2 bytes for 4 byte alignment */
357 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
359 { ICE_ETYPE_OL, 12 },
360 { ICE_IPV6_OFOS, 14 },
362 { ICE_PROTOCOL_LAST, 0 },
365 static const u8 dummy_tcp_ipv6_packet[] = {
366 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
367 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00,
370 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
372 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
373 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
386 0x50, 0x00, 0x00, 0x00,
387 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, /* 2 bytes for 4 byte alignment */
392 /* C-tag (802.1Q): IPv6 + TCP */
393 static const struct ice_dummy_pkt_offsets
394 dummy_vlan_tcp_ipv6_packet_offsets[] = {
396 { ICE_ETYPE_OL, 12 },
397 { ICE_VLAN_OFOS, 14 },
398 { ICE_IPV6_OFOS, 18 },
400 { ICE_PROTOCOL_LAST, 0 },
403 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
404 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
405 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x81, 0x00, /* ICE_ETYPE_OL 12 */
411 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
413 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
414 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
425 0x00, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
427 0x50, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
430 0x00, 0x00, /* 2 bytes for 4 byte alignment */
434 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
436 { ICE_ETYPE_OL, 12 },
437 { ICE_IPV6_OFOS, 14 },
438 { ICE_UDP_ILOS, 54 },
439 { ICE_PROTOCOL_LAST, 0 },
442 /* IPv6 + UDP dummy packet */
443 static const u8 dummy_udp_ipv6_packet[] = {
444 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
448 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
450 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
451 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00,
461 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
462 0x00, 0x10, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
465 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, /* 2 bytes for 4 byte alignment */
470 /* C-tag (802.1Q): IPv6 + UDP */
471 static const struct ice_dummy_pkt_offsets
472 dummy_vlan_udp_ipv6_packet_offsets[] = {
474 { ICE_ETYPE_OL, 12 },
475 { ICE_VLAN_OFOS, 14 },
476 { ICE_IPV6_OFOS, 18 },
477 { ICE_UDP_ILOS, 58 },
478 { ICE_PROTOCOL_LAST, 0 },
481 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
482 static const u8 dummy_vlan_udp_ipv6_packet[] = {
483 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
484 0x00, 0x00, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00,
487 0x81, 0x00, /* ICE_ETYPE_OL 12 */
489 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
491 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
492 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00,
502 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
503 0x00, 0x08, 0x00, 0x00,
505 0x00, 0x00, /* 2 bytes for 4 byte alignment */
508 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
509 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
511 { ICE_IPV4_OFOS, 14 },
516 { ICE_PROTOCOL_LAST, 0 },
519 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
520 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
525 0x45, 0x00, 0x00, 0x58, /* IP 14 */
526 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x11, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x00,
529 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
532 0x00, 0x44, 0x00, 0x00,
534 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
535 0x00, 0x00, 0x00, 0x00,
536 0x00, 0x00, 0x00, 0x85,
538 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
539 0x00, 0x00, 0x00, 0x00,
541 0x45, 0x00, 0x00, 0x28, /* IP 62 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x06, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
548 0x00, 0x00, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x50, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
553 0x00, 0x00, /* 2 bytes for 4 byte alignment */
556 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
557 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
559 { ICE_IPV4_OFOS, 14 },
563 { ICE_UDP_ILOS, 82 },
564 { ICE_PROTOCOL_LAST, 0 },
567 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
568 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
573 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
574 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x11, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
580 0x00, 0x38, 0x00, 0x00,
582 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x85,
586 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
587 0x00, 0x00, 0x00, 0x00,
589 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x11, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00,
595 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
596 0x00, 0x08, 0x00, 0x00,
598 0x00, 0x00, /* 2 bytes for 4 byte alignment */
601 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
602 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
604 { ICE_IPV4_OFOS, 14 },
609 { ICE_PROTOCOL_LAST, 0 },
612 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
613 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
618 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x11, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
625 0x00, 0x58, 0x00, 0x00,
627 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
628 0x00, 0x00, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x85,
631 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
632 0x00, 0x00, 0x00, 0x00,
634 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
635 0x00, 0x14, 0x06, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x50, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 byte alignment */
654 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
656 { ICE_IPV4_OFOS, 14 },
660 { ICE_UDP_ILOS, 102 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
670 0x45, 0x00, 0x00, 0x60, /* IP 14 */
671 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x11, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00,
676 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
677 0x00, 0x4c, 0x00, 0x00,
679 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x85,
683 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
684 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
687 0x00, 0x08, 0x11, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
698 0x00, 0x08, 0x00, 0x00,
700 0x00, 0x00, /* 2 bytes for 4 byte alignment */
703 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
705 { ICE_IPV6_OFOS, 14 },
710 { ICE_PROTOCOL_LAST, 0 },
713 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
714 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
719 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
720 0x00, 0x44, 0x11, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
731 0x00, 0x44, 0x00, 0x00,
733 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
734 0x00, 0x00, 0x00, 0x00,
735 0x00, 0x00, 0x00, 0x85,
737 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
738 0x00, 0x00, 0x00, 0x00,
740 0x45, 0x00, 0x00, 0x28, /* IP 82 */
741 0x00, 0x00, 0x00, 0x00,
742 0x00, 0x06, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x50, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, /* 2 bytes for 4 byte alignment */
755 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
757 { ICE_IPV6_OFOS, 14 },
761 { ICE_UDP_ILOS, 102 },
762 { ICE_PROTOCOL_LAST, 0 },
765 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
766 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
771 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
772 0x00, 0x38, 0x11, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
783 0x00, 0x38, 0x00, 0x00,
785 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
786 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x85,
789 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
790 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
793 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x11, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
799 0x00, 0x08, 0x00, 0x00,
801 0x00, 0x00, /* 2 bytes for 4 byte alignment */
804 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
806 { ICE_IPV6_OFOS, 14 },
811 { ICE_PROTOCOL_LAST, 0 },
814 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
815 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
820 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
821 0x00, 0x58, 0x11, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, 0x00, 0x00,
831 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
832 0x00, 0x58, 0x00, 0x00,
834 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
835 0x00, 0x00, 0x00, 0x00,
836 0x00, 0x00, 0x00, 0x85,
838 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
839 0x00, 0x00, 0x00, 0x00,
841 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
842 0x00, 0x14, 0x06, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
855 0x50, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x00, 0x00,
858 0x00, 0x00, /* 2 bytes for 4 byte alignment */
861 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
863 { ICE_IPV6_OFOS, 14 },
867 { ICE_UDP_ILOS, 102 },
868 { ICE_PROTOCOL_LAST, 0 },
871 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
872 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
877 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
878 0x00, 0x4c, 0x11, 0x00,
879 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
889 0x00, 0x4c, 0x00, 0x00,
891 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
892 0x00, 0x00, 0x00, 0x00,
893 0x00, 0x00, 0x00, 0x85,
895 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
896 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
899 0x00, 0x08, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
910 0x00, 0x08, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 byte alignment */
915 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
917 { ICE_IPV4_OFOS, 14 },
921 { ICE_PROTOCOL_LAST, 0 },
924 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
925 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
930 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
931 0x00, 0x00, 0x40, 0x00,
932 0x40, 0x11, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
936 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
937 0x00, 0x00, 0x00, 0x00,
939 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
940 0x00, 0x00, 0x00, 0x00,
941 0x00, 0x00, 0x00, 0x85,
943 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
944 0x00, 0x00, 0x00, 0x00,
946 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
947 0x00, 0x00, 0x40, 0x00,
948 0x40, 0x00, 0x00, 0x00,
949 0x00, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
955 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
957 { ICE_IPV4_OFOS, 14 },
961 { ICE_PROTOCOL_LAST, 0 },
964 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
965 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
966 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00,
970 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
971 0x00, 0x00, 0x40, 0x00,
972 0x40, 0x11, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
976 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
977 0x00, 0x00, 0x00, 0x00,
979 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
980 0x00, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x85,
983 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
984 0x00, 0x00, 0x00, 0x00,
986 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
987 0x00, 0x00, 0x3b, 0x00,
988 0x00, 0x00, 0x00, 0x00,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
1001 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1002 { ICE_MAC_OFOS, 0 },
1003 { ICE_IPV6_OFOS, 14 },
1006 { ICE_IPV4_IL, 82 },
1007 { ICE_PROTOCOL_LAST, 0 },
1010 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1011 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x00,
1016 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1017 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1018 0x00, 0x00, 0x00, 0x00,
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1027 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1028 0x00, 0x00, 0x00, 0x00,
1030 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1031 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x85,
1034 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1035 0x00, 0x00, 0x00, 0x00,
1037 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1038 0x00, 0x00, 0x40, 0x00,
1039 0x40, 0x00, 0x00, 0x00,
1040 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00,
1047 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1048 { ICE_MAC_OFOS, 0 },
1049 { ICE_IPV6_OFOS, 14 },
1052 { ICE_IPV6_IL, 82 },
1053 { ICE_PROTOCOL_LAST, 0 },
1056 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1057 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1058 0x00, 0x00, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00,
1062 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1063 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1064 0x00, 0x00, 0x00, 0x00,
1065 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x00,
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1069 0x00, 0x00, 0x00, 0x00,
1070 0x00, 0x00, 0x00, 0x00,
1071 0x00, 0x00, 0x00, 0x00,
1073 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1074 0x00, 0x00, 0x00, 0x00,
1076 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1077 0x00, 0x00, 0x00, 0x00,
1078 0x00, 0x00, 0x00, 0x85,
1080 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1081 0x00, 0x00, 0x00, 0x00,
1083 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1084 0x00, 0x00, 0x3b, 0x00,
1085 0x00, 0x00, 0x00, 0x00,
1086 0x00, 0x00, 0x00, 0x00,
1087 0x00, 0x00, 0x00, 0x00,
1088 0x00, 0x00, 0x00, 0x00,
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1092 0x00, 0x00, 0x00, 0x00,
1097 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1098 { ICE_MAC_OFOS, 0 },
1099 { ICE_IPV4_OFOS, 14 },
1102 { ICE_PROTOCOL_LAST, 0 },
1105 static const u8 dummy_udp_gtp_packet[] = {
1106 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1107 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x00, 0x00,
1111 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x11, 0x00, 0x00,
1114 0x00, 0x00, 0x00, 0x00,
1115 0x00, 0x00, 0x00, 0x00,
1117 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1118 0x00, 0x1c, 0x00, 0x00,
1120 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1121 0x00, 0x00, 0x00, 0x00,
1122 0x00, 0x00, 0x00, 0x85,
1124 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1125 0x00, 0x00, 0x00, 0x00,
1128 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1129 { ICE_MAC_OFOS, 0 },
1130 { ICE_IPV4_OFOS, 14 },
1132 { ICE_GTP_NO_PAY, 42 },
1133 { ICE_PROTOCOL_LAST, 0 },
1137 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1138 { ICE_MAC_OFOS, 0 },
1139 { ICE_IPV6_OFOS, 14 },
1141 { ICE_GTP_NO_PAY, 62 },
1142 { ICE_PROTOCOL_LAST, 0 },
1145 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1146 { ICE_MAC_OFOS, 0 },
1147 { ICE_ETYPE_OL, 12 },
1148 { ICE_VLAN_OFOS, 14},
1150 { ICE_PROTOCOL_LAST, 0 },
1153 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1154 { ICE_MAC_OFOS, 0 },
1155 { ICE_ETYPE_OL, 12 },
1156 { ICE_VLAN_OFOS, 14},
1158 { ICE_IPV4_OFOS, 26 },
1159 { ICE_PROTOCOL_LAST, 0 },
1162 static const u8 dummy_pppoe_ipv4_packet[] = {
1163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1164 0x00, 0x00, 0x00, 0x00,
1165 0x00, 0x00, 0x00, 0x00,
1167 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1169 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1171 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1174 0x00, 0x21, /* PPP Link Layer 24 */
1176 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1177 0x00, 0x00, 0x00, 0x00,
1178 0x00, 0x00, 0x00, 0x00,
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1182 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1186 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1187 { ICE_MAC_OFOS, 0 },
1188 { ICE_ETYPE_OL, 12 },
1189 { ICE_VLAN_OFOS, 14},
1191 { ICE_IPV4_OFOS, 26 },
1193 { ICE_PROTOCOL_LAST, 0 },
1196 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1197 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1198 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00,
1201 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1203 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1205 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1208 0x00, 0x21, /* PPP Link Layer 24 */
1210 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1211 0x00, 0x01, 0x00, 0x00,
1212 0x00, 0x06, 0x00, 0x00,
1213 0x00, 0x00, 0x00, 0x00,
1214 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1217 0x00, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00,
1219 0x50, 0x00, 0x00, 0x00,
1220 0x00, 0x00, 0x00, 0x00,
1222 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1226 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1227 { ICE_MAC_OFOS, 0 },
1228 { ICE_ETYPE_OL, 12 },
1229 { ICE_VLAN_OFOS, 14},
1231 { ICE_IPV4_OFOS, 26 },
1232 { ICE_UDP_ILOS, 46 },
1233 { ICE_PROTOCOL_LAST, 0 },
1236 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1237 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1238 0x00, 0x00, 0x00, 0x00,
1239 0x00, 0x00, 0x00, 0x00,
1241 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1243 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1245 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1248 0x00, 0x21, /* PPP Link Layer 24 */
1250 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1251 0x00, 0x01, 0x00, 0x00,
1252 0x00, 0x11, 0x00, 0x00,
1253 0x00, 0x00, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1257 0x00, 0x08, 0x00, 0x00,
1259 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1262 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1263 { ICE_MAC_OFOS, 0 },
1264 { ICE_ETYPE_OL, 12 },
1265 { ICE_VLAN_OFOS, 14},
1267 { ICE_IPV6_OFOS, 26 },
1268 { ICE_PROTOCOL_LAST, 0 },
1271 static const u8 dummy_pppoe_ipv6_packet[] = {
1272 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1273 0x00, 0x00, 0x00, 0x00,
1274 0x00, 0x00, 0x00, 0x00,
1276 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1278 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1280 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1283 0x00, 0x57, /* PPP Link Layer 24 */
1285 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1286 0x00, 0x00, 0x3b, 0x00,
1287 0x00, 0x00, 0x00, 0x00,
1288 0x00, 0x00, 0x00, 0x00,
1289 0x00, 0x00, 0x00, 0x00,
1290 0x00, 0x00, 0x00, 0x00,
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1293 0x00, 0x00, 0x00, 0x00,
1294 0x00, 0x00, 0x00, 0x00,
1296 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1300 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1301 { ICE_MAC_OFOS, 0 },
1302 { ICE_ETYPE_OL, 12 },
1303 { ICE_VLAN_OFOS, 14},
1305 { ICE_IPV6_OFOS, 26 },
1307 { ICE_PROTOCOL_LAST, 0 },
1310 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1311 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1312 0x00, 0x00, 0x00, 0x00,
1313 0x00, 0x00, 0x00, 0x00,
1315 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1317 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1319 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1322 0x00, 0x57, /* PPP Link Layer 24 */
1324 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1325 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1326 0x00, 0x00, 0x00, 0x00,
1327 0x00, 0x00, 0x00, 0x00,
1328 0x00, 0x00, 0x00, 0x00,
1329 0x00, 0x00, 0x00, 0x00,
1330 0x00, 0x00, 0x00, 0x00,
1331 0x00, 0x00, 0x00, 0x00,
1332 0x00, 0x00, 0x00, 0x00,
1333 0x00, 0x00, 0x00, 0x00,
1335 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1336 0x00, 0x00, 0x00, 0x00,
1337 0x00, 0x00, 0x00, 0x00,
1338 0x50, 0x00, 0x00, 0x00,
1339 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1345 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1346 { ICE_MAC_OFOS, 0 },
1347 { ICE_ETYPE_OL, 12 },
1348 { ICE_VLAN_OFOS, 14},
1350 { ICE_IPV6_OFOS, 26 },
1351 { ICE_UDP_ILOS, 66 },
1352 { ICE_PROTOCOL_LAST, 0 },
1355 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1356 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1357 0x00, 0x00, 0x00, 0x00,
1358 0x00, 0x00, 0x00, 0x00,
1360 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1362 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1364 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1367 0x00, 0x57, /* PPP Link Layer 24 */
1369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1371 0x00, 0x00, 0x00, 0x00,
1372 0x00, 0x00, 0x00, 0x00,
1373 0x00, 0x00, 0x00, 0x00,
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1381 0x00, 0x08, 0x00, 0x00,
1383 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1386 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1387 { ICE_MAC_OFOS, 0 },
1388 { ICE_IPV4_OFOS, 14 },
1390 { ICE_PROTOCOL_LAST, 0 },
1393 static const u8 dummy_ipv4_esp_pkt[] = {
1394 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1395 0x00, 0x00, 0x00, 0x00,
1396 0x00, 0x00, 0x00, 0x00,
1399 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1400 0x00, 0x00, 0x40, 0x00,
1401 0x40, 0x32, 0x00, 0x00,
1402 0x00, 0x00, 0x00, 0x00,
1403 0x00, 0x00, 0x00, 0x00,
1405 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1406 0x00, 0x00, 0x00, 0x00,
1407 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1410 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1411 { ICE_MAC_OFOS, 0 },
1412 { ICE_IPV6_OFOS, 14 },
1414 { ICE_PROTOCOL_LAST, 0 },
1417 static const u8 dummy_ipv6_esp_pkt[] = {
1418 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1419 0x00, 0x00, 0x00, 0x00,
1420 0x00, 0x00, 0x00, 0x00,
1423 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1424 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1425 0x00, 0x00, 0x00, 0x00,
1426 0x00, 0x00, 0x00, 0x00,
1427 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00,
1429 0x00, 0x00, 0x00, 0x00,
1430 0x00, 0x00, 0x00, 0x00,
1431 0x00, 0x00, 0x00, 0x00,
1432 0x00, 0x00, 0x00, 0x00,
1434 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1435 0x00, 0x00, 0x00, 0x00,
1436 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1439 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1440 { ICE_MAC_OFOS, 0 },
1441 { ICE_IPV4_OFOS, 14 },
1443 { ICE_PROTOCOL_LAST, 0 },
1446 static const u8 dummy_ipv4_ah_pkt[] = {
1447 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1448 0x00, 0x00, 0x00, 0x00,
1449 0x00, 0x00, 0x00, 0x00,
1452 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1453 0x00, 0x00, 0x40, 0x00,
1454 0x40, 0x33, 0x00, 0x00,
1455 0x00, 0x00, 0x00, 0x00,
1456 0x00, 0x00, 0x00, 0x00,
1458 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1459 0x00, 0x00, 0x00, 0x00,
1460 0x00, 0x00, 0x00, 0x00,
1461 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1464 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1465 { ICE_MAC_OFOS, 0 },
1466 { ICE_IPV6_OFOS, 14 },
1468 { ICE_PROTOCOL_LAST, 0 },
1471 static const u8 dummy_ipv6_ah_pkt[] = {
1472 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1473 0x00, 0x00, 0x00, 0x00,
1474 0x00, 0x00, 0x00, 0x00,
1477 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1478 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1479 0x00, 0x00, 0x00, 0x00,
1480 0x00, 0x00, 0x00, 0x00,
1481 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, 0x00, 0x00,
1485 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x00,
1488 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1489 0x00, 0x00, 0x00, 0x00,
1490 0x00, 0x00, 0x00, 0x00,
1491 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1494 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1495 { ICE_MAC_OFOS, 0 },
1496 { ICE_IPV4_OFOS, 14 },
1497 { ICE_UDP_ILOS, 34 },
1499 { ICE_PROTOCOL_LAST, 0 },
1502 static const u8 dummy_ipv4_nat_pkt[] = {
1503 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1504 0x00, 0x00, 0x00, 0x00,
1505 0x00, 0x00, 0x00, 0x00,
1508 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1509 0x00, 0x00, 0x40, 0x00,
1510 0x40, 0x11, 0x00, 0x00,
1511 0x00, 0x00, 0x00, 0x00,
1512 0x00, 0x00, 0x00, 0x00,
1514 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1515 0x00, 0x00, 0x00, 0x00,
1517 0x00, 0x00, 0x00, 0x00,
1518 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1522 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1523 { ICE_MAC_OFOS, 0 },
1524 { ICE_IPV6_OFOS, 14 },
1525 { ICE_UDP_ILOS, 54 },
1527 { ICE_PROTOCOL_LAST, 0 },
1530 static const u8 dummy_ipv6_nat_pkt[] = {
1531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1532 0x00, 0x00, 0x00, 0x00,
1533 0x00, 0x00, 0x00, 0x00,
1536 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1537 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1538 0x00, 0x00, 0x00, 0x00,
1539 0x00, 0x00, 0x00, 0x00,
1540 0x00, 0x00, 0x00, 0x00,
1541 0x00, 0x00, 0x00, 0x00,
1542 0x00, 0x00, 0x00, 0x00,
1543 0x00, 0x00, 0x00, 0x00,
1544 0x00, 0x00, 0x00, 0x00,
1545 0x00, 0x00, 0x00, 0x00,
1547 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1548 0x00, 0x00, 0x00, 0x00,
1550 0x00, 0x00, 0x00, 0x00,
1551 0x00, 0x00, 0x00, 0x00,
1552 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1556 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1557 { ICE_MAC_OFOS, 0 },
1558 { ICE_IPV4_OFOS, 14 },
1560 { ICE_PROTOCOL_LAST, 0 },
1563 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1564 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1565 0x00, 0x00, 0x00, 0x00,
1566 0x00, 0x00, 0x00, 0x00,
1569 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1570 0x00, 0x00, 0x40, 0x00,
1571 0x40, 0x73, 0x00, 0x00,
1572 0x00, 0x00, 0x00, 0x00,
1573 0x00, 0x00, 0x00, 0x00,
1575 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1576 0x00, 0x00, 0x00, 0x00,
1577 0x00, 0x00, 0x00, 0x00,
1578 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1581 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1582 { ICE_MAC_OFOS, 0 },
1583 { ICE_IPV6_OFOS, 14 },
1585 { ICE_PROTOCOL_LAST, 0 },
1588 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1590 0x00, 0x00, 0x00, 0x00,
1591 0x00, 0x00, 0x00, 0x00,
1594 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1595 0x00, 0x0c, 0x73, 0x40,
1596 0x00, 0x00, 0x00, 0x00,
1597 0x00, 0x00, 0x00, 0x00,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, 0x00, 0x00,
1601 0x00, 0x00, 0x00, 0x00,
1602 0x00, 0x00, 0x00, 0x00,
1603 0x00, 0x00, 0x00, 0x00,
1605 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1606 0x00, 0x00, 0x00, 0x00,
1607 0x00, 0x00, 0x00, 0x00,
1608 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1611 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1612 { ICE_MAC_OFOS, 0 },
1613 { ICE_ETYPE_OL, 12 },
1614 { ICE_VLAN_EX, 14 },
1615 { ICE_VLAN_OFOS, 18 },
1616 { ICE_IPV4_OFOS, 22 },
1617 { ICE_PROTOCOL_LAST, 0 },
1620 static const u8 dummy_qinq_ipv4_pkt[] = {
1621 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1622 0x00, 0x00, 0x00, 0x00,
1623 0x00, 0x00, 0x00, 0x00,
1625 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1627 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1628 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1630 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1631 0x00, 0x01, 0x00, 0x00,
1632 0x00, 0x11, 0x00, 0x00,
1633 0x00, 0x00, 0x00, 0x00,
1634 0x00, 0x00, 0x00, 0x00,
1636 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1637 0x00, 0x08, 0x00, 0x00,
1639 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1642 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1643 { ICE_MAC_OFOS, 0 },
1644 { ICE_ETYPE_OL, 12 },
1645 { ICE_VLAN_EX, 14 },
1646 { ICE_VLAN_OFOS, 18 },
1647 { ICE_IPV6_OFOS, 22 },
1648 { ICE_PROTOCOL_LAST, 0 },
1651 static const u8 dummy_qinq_ipv6_pkt[] = {
1652 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1653 0x00, 0x00, 0x00, 0x00,
1654 0x00, 0x00, 0x00, 0x00,
1656 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1658 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1659 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1661 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1662 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1663 0x00, 0x00, 0x00, 0x00,
1664 0x00, 0x00, 0x00, 0x00,
1665 0x00, 0x00, 0x00, 0x00,
1666 0x00, 0x00, 0x00, 0x00,
1667 0x00, 0x00, 0x00, 0x00,
1668 0x00, 0x00, 0x00, 0x00,
1669 0x00, 0x00, 0x00, 0x00,
1670 0x00, 0x00, 0x00, 0x00,
1672 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1673 0x00, 0x10, 0x00, 0x00,
1675 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1676 0x00, 0x00, 0x00, 0x00,
1678 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1681 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1682 { ICE_MAC_OFOS, 0 },
1683 { ICE_ETYPE_OL, 12 },
1684 { ICE_VLAN_EX, 14 },
1685 { ICE_VLAN_OFOS, 18 },
1687 { ICE_PROTOCOL_LAST, 0 },
1691 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1692 { ICE_MAC_OFOS, 0 },
1693 { ICE_ETYPE_OL, 12 },
1694 { ICE_VLAN_EX, 14 },
1695 { ICE_VLAN_OFOS, 18 },
1697 { ICE_IPV4_OFOS, 30 },
1698 { ICE_PROTOCOL_LAST, 0 },
1701 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1702 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1703 0x00, 0x00, 0x00, 0x00,
1704 0x00, 0x00, 0x00, 0x00,
1706 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1708 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1709 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1711 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1714 0x00, 0x21, /* PPP Link Layer 28 */
1716 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1717 0x00, 0x00, 0x00, 0x00,
1718 0x00, 0x00, 0x00, 0x00,
1719 0x00, 0x00, 0x00, 0x00,
1720 0x00, 0x00, 0x00, 0x00,
1722 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1726 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1727 { ICE_MAC_OFOS, 0 },
1728 { ICE_ETYPE_OL, 12 },
1730 { ICE_VLAN_OFOS, 18 },
1732 { ICE_IPV6_OFOS, 30 },
1733 { ICE_PROTOCOL_LAST, 0 },
1736 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1737 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1738 0x00, 0x00, 0x00, 0x00,
1739 0x00, 0x00, 0x00, 0x00,
1741 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1743 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1744 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1746 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1749 0x00, 0x57, /* PPP Link Layer 28*/
1751 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1752 0x00, 0x00, 0x3b, 0x00,
1753 0x00, 0x00, 0x00, 0x00,
1754 0x00, 0x00, 0x00, 0x00,
1755 0x00, 0x00, 0x00, 0x00,
1756 0x00, 0x00, 0x00, 0x00,
1757 0x00, 0x00, 0x00, 0x00,
1758 0x00, 0x00, 0x00, 0x00,
1759 0x00, 0x00, 0x00, 0x00,
1760 0x00, 0x00, 0x00, 0x00,
1762 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1765 /* this is a recipe to profile association bitmap */
1766 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1767 ICE_MAX_NUM_PROFILES);
1769 /* this is a profile to recipe association bitmap */
1770 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1771 ICE_MAX_NUM_RECIPES);
1773 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1776 * ice_collect_result_idx - copy result index values
1777 * @buf: buffer that contains the result index
1778 * @recp: the recipe struct to copy data into
1780 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1781 struct ice_sw_recipe *recp)
1783 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1784 ice_set_bit(buf->content.result_indx &
1785 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1789 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1790 * @rid: recipe ID that we are populating
1792 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1794 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1795 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1796 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1797 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1798 enum ice_sw_tunnel_type tun_type;
1799 u16 i, j, profile_num = 0;
1800 bool non_tun_valid = false;
1801 bool pppoe_valid = false;
1802 bool vxlan_valid = false;
1803 bool gre_valid = false;
1804 bool gtp_valid = false;
1805 bool flag_valid = false;
1807 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1808 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1813 for (i = 0; i < 12; i++) {
1814 if (gre_profile[i] == j)
1818 for (i = 0; i < 12; i++) {
1819 if (vxlan_profile[i] == j)
1823 for (i = 0; i < 7; i++) {
1824 if (pppoe_profile[i] == j)
1828 for (i = 0; i < 6; i++) {
1829 if (non_tun_profile[i] == j)
1830 non_tun_valid = true;
1833 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1834 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1837 if ((j >= ICE_PROFID_IPV4_ESP &&
1838 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1839 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1840 j <= ICE_PROFID_IPV6_GTPU_TEID))
1844 if (!non_tun_valid && vxlan_valid)
1845 tun_type = ICE_SW_TUN_VXLAN;
1846 else if (!non_tun_valid && gre_valid)
1847 tun_type = ICE_SW_TUN_NVGRE;
1848 else if (!non_tun_valid && pppoe_valid)
1849 tun_type = ICE_SW_TUN_PPPOE;
1850 else if (!non_tun_valid && gtp_valid)
1851 tun_type = ICE_SW_TUN_GTP;
1852 else if (non_tun_valid &&
1853 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1854 tun_type = ICE_SW_TUN_AND_NON_TUN;
1855 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1857 tun_type = ICE_NON_TUN;
1859 tun_type = ICE_NON_TUN;
1861 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1862 i = ice_is_bit_set(recipe_to_profile[rid],
1863 ICE_PROFID_PPPOE_IPV4_OTHER);
1864 j = ice_is_bit_set(recipe_to_profile[rid],
1865 ICE_PROFID_PPPOE_IPV6_OTHER);
1867 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1869 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1872 if (tun_type == ICE_SW_TUN_GTP) {
1873 if (ice_is_bit_set(recipe_to_profile[rid],
1874 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1875 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1876 else if (ice_is_bit_set(recipe_to_profile[rid],
1877 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1878 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1879 else if (ice_is_bit_set(recipe_to_profile[rid],
1880 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1881 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1882 else if (ice_is_bit_set(recipe_to_profile[rid],
1883 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1884 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1887 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1888 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1889 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1891 case ICE_PROFID_IPV4_TCP:
1892 tun_type = ICE_SW_IPV4_TCP;
1894 case ICE_PROFID_IPV4_UDP:
1895 tun_type = ICE_SW_IPV4_UDP;
1897 case ICE_PROFID_IPV6_TCP:
1898 tun_type = ICE_SW_IPV6_TCP;
1900 case ICE_PROFID_IPV6_UDP:
1901 tun_type = ICE_SW_IPV6_UDP;
1903 case ICE_PROFID_PPPOE_PAY:
1904 tun_type = ICE_SW_TUN_PPPOE_PAY;
1906 case ICE_PROFID_PPPOE_IPV4_TCP:
1907 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1909 case ICE_PROFID_PPPOE_IPV4_UDP:
1910 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1912 case ICE_PROFID_PPPOE_IPV4_OTHER:
1913 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1915 case ICE_PROFID_PPPOE_IPV6_TCP:
1916 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1918 case ICE_PROFID_PPPOE_IPV6_UDP:
1919 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1921 case ICE_PROFID_PPPOE_IPV6_OTHER:
1922 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1924 case ICE_PROFID_IPV4_ESP:
1925 tun_type = ICE_SW_TUN_IPV4_ESP;
1927 case ICE_PROFID_IPV6_ESP:
1928 tun_type = ICE_SW_TUN_IPV6_ESP;
1930 case ICE_PROFID_IPV4_AH:
1931 tun_type = ICE_SW_TUN_IPV4_AH;
1933 case ICE_PROFID_IPV6_AH:
1934 tun_type = ICE_SW_TUN_IPV6_AH;
1936 case ICE_PROFID_IPV4_NAT_T:
1937 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1939 case ICE_PROFID_IPV6_NAT_T:
1940 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1942 case ICE_PROFID_IPV4_PFCP_NODE:
1944 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1946 case ICE_PROFID_IPV6_PFCP_NODE:
1948 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1950 case ICE_PROFID_IPV4_PFCP_SESSION:
1952 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1954 case ICE_PROFID_IPV6_PFCP_SESSION:
1956 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1958 case ICE_PROFID_MAC_IPV4_L2TPV3:
1959 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1961 case ICE_PROFID_MAC_IPV6_L2TPV3:
1962 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1964 case ICE_PROFID_IPV4_GTPU_TEID:
1965 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1967 case ICE_PROFID_IPV6_GTPU_TEID:
1968 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1979 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1980 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1981 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1982 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1983 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1984 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1985 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1986 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1987 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1988 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1989 else if (vlan && tun_type == ICE_NON_TUN)
1990 tun_type = ICE_NON_TUN_QINQ;
1996 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1997 * @hw: pointer to hardware structure
1998 * @recps: struct that we need to populate
1999 * @rid: recipe ID that we are populating
2000 * @refresh_required: true if we should get recipe to profile mapping from FW
2002 * This function is used to populate all the necessary entries into our
2003 * bookkeeping so that we have a current list of all the recipes that are
2004 * programmed in the firmware.
2006 static enum ice_status
2007 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2008 bool *refresh_required)
2010 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2011 struct ice_aqc_recipe_data_elem *tmp;
2012 u16 num_recps = ICE_MAX_NUM_RECIPES;
2013 struct ice_prot_lkup_ext *lkup_exts;
2014 enum ice_status status;
2019 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2021 /* we need a buffer big enough to accommodate all the recipes */
2022 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2023 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2025 return ICE_ERR_NO_MEMORY;
2027 tmp[0].recipe_indx = rid;
2028 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2029 /* non-zero status meaning recipe doesn't exist */
2033 /* Get recipe to profile map so that we can get the fv from lkups that
2034 * we read for a recipe from FW. Since we want to minimize the number of
2035 * times we make this FW call, just make one call and cache the copy
2036 * until a new recipe is added. This operation is only required the
2037 * first time to get the changes from FW. Then to search existing
2038 * entries we don't need to update the cache again until another recipe
2041 if (*refresh_required) {
2042 ice_get_recp_to_prof_map(hw);
2043 *refresh_required = false;
2046 /* Start populating all the entries for recps[rid] based on lkups from
2047 * firmware. Note that we are only creating the root recipe in our
2050 lkup_exts = &recps[rid].lkup_exts;
2052 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2053 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2054 struct ice_recp_grp_entry *rg_entry;
2055 u8 i, prof, idx, prot = 0;
2059 rg_entry = (struct ice_recp_grp_entry *)
2060 ice_malloc(hw, sizeof(*rg_entry));
2062 status = ICE_ERR_NO_MEMORY;
2066 idx = root_bufs.recipe_indx;
2067 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2069 /* Mark all result indices in this chain */
2070 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2071 ice_set_bit(root_bufs.content.result_indx &
2072 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2074 /* get the first profile that is associated with rid */
2075 prof = ice_find_first_bit(recipe_to_profile[idx],
2076 ICE_MAX_NUM_PROFILES);
2077 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2078 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2080 rg_entry->fv_idx[i] = lkup_indx;
2081 rg_entry->fv_mask[i] =
2082 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2084 /* If the recipe is a chained recipe then all its
2085 * child recipe's result will have a result index.
2086 * To fill fv_words we should not use those result
2087 * index, we only need the protocol ids and offsets.
2088 * We will skip all the fv_idx which stores result
2089 * index in them. We also need to skip any fv_idx which
2090 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2091 * valid offset value.
2093 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2094 rg_entry->fv_idx[i]) ||
2095 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2096 rg_entry->fv_idx[i] == 0)
2099 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2100 rg_entry->fv_idx[i], &prot, &off);
2101 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2102 lkup_exts->fv_words[fv_word_idx].off = off;
2103 lkup_exts->field_mask[fv_word_idx] =
2104 rg_entry->fv_mask[i];
2105 if (prot == ICE_META_DATA_ID_HW &&
2106 off == ICE_TUN_FLAG_MDID_OFF)
2110 /* populate rg_list with the data from the child entry of this
2113 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2115 /* Propagate some data to the recipe database */
2116 recps[idx].is_root = !!is_root;
2117 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2118 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2119 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2120 recps[idx].chain_idx = root_bufs.content.result_indx &
2121 ~ICE_AQ_RECIPE_RESULT_EN;
2122 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2124 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2130 /* Only do the following for root recipes entries */
2131 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2132 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2133 recps[idx].root_rid = root_bufs.content.rid &
2134 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2135 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2138 /* Complete initialization of the root recipe entry */
2139 lkup_exts->n_val_words = fv_word_idx;
2140 recps[rid].big_recp = (num_recps > 1);
2141 recps[rid].n_grp_count = (u8)num_recps;
2142 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2143 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2144 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2145 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2146 if (!recps[rid].root_buf)
2149 /* Copy result indexes */
2150 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2151 recps[rid].recp_created = true;
2159 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2160 * @hw: pointer to hardware structure
2162 * This function is used to populate recipe_to_profile matrix where index to
2163 * this array is the recipe ID and the element is the mapping of which profiles
2164 * is this recipe mapped to.
2166 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2168 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2171 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2174 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2175 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2176 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2178 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2179 ICE_MAX_NUM_RECIPES);
2180 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2181 ice_set_bit(i, recipe_to_profile[j]);
2186 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2187 * @hw: pointer to the HW struct
2188 * @recp_list: pointer to sw recipe list
2190 * Allocate memory for the entire recipe table and initialize the structures/
2191 * entries corresponding to basic recipes.
2194 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2196 struct ice_sw_recipe *recps;
2199 recps = (struct ice_sw_recipe *)
2200 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2202 return ICE_ERR_NO_MEMORY;
2204 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2205 recps[i].root_rid = i;
2206 INIT_LIST_HEAD(&recps[i].filt_rules);
2207 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2208 INIT_LIST_HEAD(&recps[i].rg_list);
2209 ice_init_lock(&recps[i].filt_rule_lock);
2218 * ice_aq_get_sw_cfg - get switch configuration
2219 * @hw: pointer to the hardware structure
2220 * @buf: pointer to the result buffer
2221 * @buf_size: length of the buffer available for response
2222 * @req_desc: pointer to requested descriptor
2223 * @num_elems: pointer to number of elements
2224 * @cd: pointer to command details structure or NULL
2226 * Get switch configuration (0x0200) to be placed in buf.
2227 * This admin command returns information such as initial VSI/port number
2228 * and switch ID it belongs to.
2230 * NOTE: *req_desc is both an input/output parameter.
2231 * The caller of this function first calls this function with *request_desc set
2232 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2233 * configuration information has been returned; if non-zero (meaning not all
2234 * the information was returned), the caller should call this function again
2235 * with *req_desc set to the previous value returned by f/w to get the
2236 * next block of switch configuration information.
2238 * *num_elems is output only parameter. This reflects the number of elements
2239 * in response buffer. The caller of this function to use *num_elems while
2240 * parsing the response buffer.
2242 static enum ice_status
2243 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2244 u16 buf_size, u16 *req_desc, u16 *num_elems,
2245 struct ice_sq_cd *cd)
2247 struct ice_aqc_get_sw_cfg *cmd;
2248 struct ice_aq_desc desc;
2249 enum ice_status status;
2251 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2252 cmd = &desc.params.get_sw_conf;
2253 cmd->element = CPU_TO_LE16(*req_desc);
2255 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2257 *req_desc = LE16_TO_CPU(cmd->element);
2258 *num_elems = LE16_TO_CPU(cmd->num_elems);
2265 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2266 * @hw: pointer to the HW struct
2267 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2268 * @global_lut_id: output parameter for the RSS global LUT's ID
2270 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2272 struct ice_aqc_alloc_free_res_elem *sw_buf;
2273 enum ice_status status;
2276 buf_len = ice_struct_size(sw_buf, elem, 1);
2277 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2279 return ICE_ERR_NO_MEMORY;
2281 sw_buf->num_elems = CPU_TO_LE16(1);
2282 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2283 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2284 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2286 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2288 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2289 shared_res ? "shared" : "dedicated", status);
2290 goto ice_alloc_global_lut_exit;
2293 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2295 ice_alloc_global_lut_exit:
2296 ice_free(hw, sw_buf);
2301 * ice_free_global_lut - free a RSS global LUT
2302 * @hw: pointer to the HW struct
2303 * @global_lut_id: ID of the RSS global LUT to free
2305 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2307 struct ice_aqc_alloc_free_res_elem *sw_buf;
2308 u16 buf_len, num_elems = 1;
2309 enum ice_status status;
2311 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2312 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2314 return ICE_ERR_NO_MEMORY;
2316 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2317 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2318 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2320 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2322 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2323 global_lut_id, status);
2325 ice_free(hw, sw_buf);
2330 * ice_alloc_sw - allocate resources specific to switch
2331 * @hw: pointer to the HW struct
2332 * @ena_stats: true to turn on VEB stats
2333 * @shared_res: true for shared resource, false for dedicated resource
2334 * @sw_id: switch ID returned
2335 * @counter_id: VEB counter ID returned
2337 * allocates switch resources (SWID and VEB counter) (0x0208)
2340 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2343 struct ice_aqc_alloc_free_res_elem *sw_buf;
2344 struct ice_aqc_res_elem *sw_ele;
2345 enum ice_status status;
2348 buf_len = ice_struct_size(sw_buf, elem, 1);
2349 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2351 return ICE_ERR_NO_MEMORY;
2353 /* Prepare buffer for switch ID.
2354 * The number of resource entries in buffer is passed as 1 since only a
2355 * single switch/VEB instance is allocated, and hence a single sw_id
2358 sw_buf->num_elems = CPU_TO_LE16(1);
2360 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2361 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2362 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2364 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2365 ice_aqc_opc_alloc_res, NULL);
2368 goto ice_alloc_sw_exit;
2370 sw_ele = &sw_buf->elem[0];
2371 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2374 /* Prepare buffer for VEB Counter */
2375 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2376 struct ice_aqc_alloc_free_res_elem *counter_buf;
2377 struct ice_aqc_res_elem *counter_ele;
2379 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2380 ice_malloc(hw, buf_len);
2382 status = ICE_ERR_NO_MEMORY;
2383 goto ice_alloc_sw_exit;
2386 /* The number of resource entries in buffer is passed as 1 since
2387 * only a single switch/VEB instance is allocated, and hence a
2388 * single VEB counter is requested.
2390 counter_buf->num_elems = CPU_TO_LE16(1);
2391 counter_buf->res_type =
2392 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2393 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2394 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2398 ice_free(hw, counter_buf);
2399 goto ice_alloc_sw_exit;
2401 counter_ele = &counter_buf->elem[0];
2402 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2403 ice_free(hw, counter_buf);
2407 ice_free(hw, sw_buf);
2412 * ice_free_sw - free resources specific to switch
2413 * @hw: pointer to the HW struct
2414 * @sw_id: switch ID returned
2415 * @counter_id: VEB counter ID returned
2417 * free switch resources (SWID and VEB counter) (0x0209)
2419 * NOTE: This function frees multiple resources. It continues
2420 * releasing other resources even after it encounters error.
2421 * The error code returned is the last error it encountered.
2423 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2425 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2426 enum ice_status status, ret_status;
2429 buf_len = ice_struct_size(sw_buf, elem, 1);
2430 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2432 return ICE_ERR_NO_MEMORY;
2434 /* Prepare buffer to free for switch ID res.
2435 * The number of resource entries in buffer is passed as 1 since only a
2436 * single switch/VEB instance is freed, and hence a single sw_id
2439 sw_buf->num_elems = CPU_TO_LE16(1);
2440 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2441 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2443 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2444 ice_aqc_opc_free_res, NULL);
2447 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2449 /* Prepare buffer to free for VEB Counter resource */
2450 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2451 ice_malloc(hw, buf_len);
2453 ice_free(hw, sw_buf);
2454 return ICE_ERR_NO_MEMORY;
2457 /* The number of resource entries in buffer is passed as 1 since only a
2458 * single switch/VEB instance is freed, and hence a single VEB counter
2461 counter_buf->num_elems = CPU_TO_LE16(1);
2462 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2463 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2465 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2466 ice_aqc_opc_free_res, NULL);
2468 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2469 ret_status = status;
2472 ice_free(hw, counter_buf);
2473 ice_free(hw, sw_buf);
2479 * @hw: pointer to the HW struct
2480 * @vsi_ctx: pointer to a VSI context struct
2481 * @cd: pointer to command details structure or NULL
2483 * Add a VSI context to the hardware (0x0210)
2486 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2487 struct ice_sq_cd *cd)
2489 struct ice_aqc_add_update_free_vsi_resp *res;
2490 struct ice_aqc_add_get_update_free_vsi *cmd;
2491 struct ice_aq_desc desc;
2492 enum ice_status status;
2494 cmd = &desc.params.vsi_cmd;
2495 res = &desc.params.add_update_free_vsi_res;
2497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2499 if (!vsi_ctx->alloc_from_pool)
2500 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2501 ICE_AQ_VSI_IS_VALID);
2503 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2505 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2507 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2508 sizeof(vsi_ctx->info), cd);
2511 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2512 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2513 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2521 * @hw: pointer to the HW struct
2522 * @vsi_ctx: pointer to a VSI context struct
2523 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2524 * @cd: pointer to command details structure or NULL
2526 * Free VSI context info from hardware (0x0213)
2529 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2530 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2532 struct ice_aqc_add_update_free_vsi_resp *resp;
2533 struct ice_aqc_add_get_update_free_vsi *cmd;
2534 struct ice_aq_desc desc;
2535 enum ice_status status;
2537 cmd = &desc.params.vsi_cmd;
2538 resp = &desc.params.add_update_free_vsi_res;
2540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2542 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2544 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2546 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2548 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2549 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2557 * @hw: pointer to the HW struct
2558 * @vsi_ctx: pointer to a VSI context struct
2559 * @cd: pointer to command details structure or NULL
2561 * Update VSI context in the hardware (0x0211)
2564 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2565 struct ice_sq_cd *cd)
2567 struct ice_aqc_add_update_free_vsi_resp *resp;
2568 struct ice_aqc_add_get_update_free_vsi *cmd;
2569 struct ice_aq_desc desc;
2570 enum ice_status status;
2572 cmd = &desc.params.vsi_cmd;
2573 resp = &desc.params.add_update_free_vsi_res;
2575 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2577 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2579 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2581 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2582 sizeof(vsi_ctx->info), cd);
2585 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2586 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2593 * ice_is_vsi_valid - check whether the VSI is valid or not
2594 * @hw: pointer to the HW struct
2595 * @vsi_handle: VSI handle
2597 * check whether the VSI is valid or not
2599 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2601 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2605 * ice_get_hw_vsi_num - return the HW VSI number
2606 * @hw: pointer to the HW struct
2607 * @vsi_handle: VSI handle
2609 * return the HW VSI number
2610 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2612 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2614 return hw->vsi_ctx[vsi_handle]->vsi_num;
2618 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2619 * @hw: pointer to the HW struct
2620 * @vsi_handle: VSI handle
2622 * return the VSI context entry for a given VSI handle
2624 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2626 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2630 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2631 * @hw: pointer to the HW struct
2632 * @vsi_handle: VSI handle
2633 * @vsi: VSI context pointer
2635 * save the VSI context entry for a given VSI handle
2638 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2640 hw->vsi_ctx[vsi_handle] = vsi;
2644 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2645 * @hw: pointer to the HW struct
2646 * @vsi_handle: VSI handle
2648 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2650 struct ice_vsi_ctx *vsi;
2653 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2656 ice_for_each_traffic_class(i) {
2657 if (vsi->lan_q_ctx[i]) {
2658 ice_free(hw, vsi->lan_q_ctx[i]);
2659 vsi->lan_q_ctx[i] = NULL;
2665 * ice_clear_vsi_ctx - clear the VSI context entry
2666 * @hw: pointer to the HW struct
2667 * @vsi_handle: VSI handle
2669 * clear the VSI context entry
2671 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2673 struct ice_vsi_ctx *vsi;
2675 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2677 ice_clear_vsi_q_ctx(hw, vsi_handle);
2679 hw->vsi_ctx[vsi_handle] = NULL;
2684 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2685 * @hw: pointer to the HW struct
2687 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2691 for (i = 0; i < ICE_MAX_VSI; i++)
2692 ice_clear_vsi_ctx(hw, i);
2696 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2697 * @hw: pointer to the HW struct
2698 * @vsi_handle: unique VSI handle provided by drivers
2699 * @vsi_ctx: pointer to a VSI context struct
2700 * @cd: pointer to command details structure or NULL
2702 * Add a VSI context to the hardware also add it into the VSI handle list.
2703 * If this function gets called after reset for existing VSIs then update
2704 * with the new HW VSI number in the corresponding VSI handle list entry.
2707 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2708 struct ice_sq_cd *cd)
2710 struct ice_vsi_ctx *tmp_vsi_ctx;
2711 enum ice_status status;
2713 if (vsi_handle >= ICE_MAX_VSI)
2714 return ICE_ERR_PARAM;
2715 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2718 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2720 /* Create a new VSI context */
2721 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2722 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2724 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2725 return ICE_ERR_NO_MEMORY;
2727 *tmp_vsi_ctx = *vsi_ctx;
2729 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2731 /* update with new HW VSI num */
2732 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2739 * ice_free_vsi- free VSI context from hardware and VSI handle list
2740 * @hw: pointer to the HW struct
2741 * @vsi_handle: unique VSI handle
2742 * @vsi_ctx: pointer to a VSI context struct
2743 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2744 * @cd: pointer to command details structure or NULL
2746 * Free VSI context info from hardware as well as from VSI handle list
2749 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2750 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2752 enum ice_status status;
2754 if (!ice_is_vsi_valid(hw, vsi_handle))
2755 return ICE_ERR_PARAM;
2756 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2757 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2759 ice_clear_vsi_ctx(hw, vsi_handle);
2765 * @hw: pointer to the HW struct
2766 * @vsi_handle: unique VSI handle
2767 * @vsi_ctx: pointer to a VSI context struct
2768 * @cd: pointer to command details structure or NULL
2770 * Update VSI context in the hardware
2773 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2774 struct ice_sq_cd *cd)
2776 if (!ice_is_vsi_valid(hw, vsi_handle))
2777 return ICE_ERR_PARAM;
2778 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2779 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2783 * ice_aq_get_vsi_params
2784 * @hw: pointer to the HW struct
2785 * @vsi_ctx: pointer to a VSI context struct
2786 * @cd: pointer to command details structure or NULL
2788 * Get VSI context info from hardware (0x0212)
2791 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2792 struct ice_sq_cd *cd)
2794 struct ice_aqc_add_get_update_free_vsi *cmd;
2795 struct ice_aqc_get_vsi_resp *resp;
2796 struct ice_aq_desc desc;
2797 enum ice_status status;
2799 cmd = &desc.params.vsi_cmd;
2800 resp = &desc.params.get_vsi_resp;
2802 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2804 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2806 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2807 sizeof(vsi_ctx->info), cd);
2809 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2811 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2812 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2819 * ice_aq_add_update_mir_rule - add/update a mirror rule
2820 * @hw: pointer to the HW struct
2821 * @rule_type: Rule Type
2822 * @dest_vsi: VSI number to which packets will be mirrored
2823 * @count: length of the list
2824 * @mr_buf: buffer for list of mirrored VSI numbers
2825 * @cd: pointer to command details structure or NULL
2828 * Add/Update Mirror Rule (0x260).
2831 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2832 u16 count, struct ice_mir_rule_buf *mr_buf,
2833 struct ice_sq_cd *cd, u16 *rule_id)
2835 struct ice_aqc_add_update_mir_rule *cmd;
2836 struct ice_aq_desc desc;
2837 enum ice_status status;
2838 __le16 *mr_list = NULL;
2841 switch (rule_type) {
2842 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2843 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2844 /* Make sure count and mr_buf are set for these rule_types */
2845 if (!(count && mr_buf))
2846 return ICE_ERR_PARAM;
2848 buf_size = count * sizeof(__le16);
2849 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2851 return ICE_ERR_NO_MEMORY;
2853 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2854 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2855 /* Make sure count and mr_buf are not set for these
2858 if (count || mr_buf)
2859 return ICE_ERR_PARAM;
2862 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2863 return ICE_ERR_OUT_OF_RANGE;
2866 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2868 /* Pre-process 'mr_buf' items for add/update of virtual port
2869 * ingress/egress mirroring (but not physical port ingress/egress
2875 for (i = 0; i < count; i++) {
2878 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2880 /* Validate specified VSI number, make sure it is less
2881 * than ICE_MAX_VSI, if not return with error.
2883 if (id >= ICE_MAX_VSI) {
2884 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2886 ice_free(hw, mr_list);
2887 return ICE_ERR_OUT_OF_RANGE;
2890 /* add VSI to mirror rule */
2893 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2894 else /* remove VSI from mirror rule */
2895 mr_list[i] = CPU_TO_LE16(id);
2899 cmd = &desc.params.add_update_rule;
2900 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2901 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2902 ICE_AQC_RULE_ID_VALID_M);
2903 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2904 cmd->num_entries = CPU_TO_LE16(count);
2905 cmd->dest = CPU_TO_LE16(dest_vsi);
2907 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2909 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2911 ice_free(hw, mr_list);
2917 * ice_aq_delete_mir_rule - delete a mirror rule
2918 * @hw: pointer to the HW struct
2919 * @rule_id: Mirror rule ID (to be deleted)
2920 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2921 * otherwise it is returned to the shared pool
2922 * @cd: pointer to command details structure or NULL
2924 * Delete Mirror Rule (0x261).
2927 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2928 struct ice_sq_cd *cd)
2930 struct ice_aqc_delete_mir_rule *cmd;
2931 struct ice_aq_desc desc;
2933 /* rule_id should be in the range 0...63 */
2934 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2935 return ICE_ERR_OUT_OF_RANGE;
2937 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2939 cmd = &desc.params.del_rule;
2940 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2941 cmd->rule_id = CPU_TO_LE16(rule_id);
2944 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2946 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2950 * ice_aq_alloc_free_vsi_list
2951 * @hw: pointer to the HW struct
2952 * @vsi_list_id: VSI list ID returned or used for lookup
2953 * @lkup_type: switch rule filter lookup type
2954 * @opc: switch rules population command type - pass in the command opcode
2956 * allocates or free a VSI list resource
2958 static enum ice_status
2959 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2960 enum ice_sw_lkup_type lkup_type,
2961 enum ice_adminq_opc opc)
2963 struct ice_aqc_alloc_free_res_elem *sw_buf;
2964 struct ice_aqc_res_elem *vsi_ele;
2965 enum ice_status status;
2968 buf_len = ice_struct_size(sw_buf, elem, 1);
2969 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2971 return ICE_ERR_NO_MEMORY;
2972 sw_buf->num_elems = CPU_TO_LE16(1);
2974 if (lkup_type == ICE_SW_LKUP_MAC ||
2975 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2976 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2977 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2978 lkup_type == ICE_SW_LKUP_PROMISC ||
2979 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2980 lkup_type == ICE_SW_LKUP_LAST) {
2981 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2982 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2984 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2986 status = ICE_ERR_PARAM;
2987 goto ice_aq_alloc_free_vsi_list_exit;
2990 if (opc == ice_aqc_opc_free_res)
2991 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2993 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2995 goto ice_aq_alloc_free_vsi_list_exit;
2997 if (opc == ice_aqc_opc_alloc_res) {
2998 vsi_ele = &sw_buf->elem[0];
2999 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3002 ice_aq_alloc_free_vsi_list_exit:
3003 ice_free(hw, sw_buf);
3008 * ice_aq_set_storm_ctrl - Sets storm control configuration
3009 * @hw: pointer to the HW struct
3010 * @bcast_thresh: represents the upper threshold for broadcast storm control
3011 * @mcast_thresh: represents the upper threshold for multicast storm control
3012 * @ctl_bitmask: storm control knobs
3014 * Sets the storm control configuration (0x0280)
3017 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3020 struct ice_aqc_storm_cfg *cmd;
3021 struct ice_aq_desc desc;
3023 cmd = &desc.params.storm_conf;
3025 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3027 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3028 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3029 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3031 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3035 * ice_aq_get_storm_ctrl - gets storm control configuration
3036 * @hw: pointer to the HW struct
3037 * @bcast_thresh: represents the upper threshold for broadcast storm control
3038 * @mcast_thresh: represents the upper threshold for multicast storm control
3039 * @ctl_bitmask: storm control knobs
3041 * Gets the storm control configuration (0x0281)
3044 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3047 enum ice_status status;
3048 struct ice_aq_desc desc;
3050 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3052 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3054 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3057 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3060 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3063 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3070 * ice_aq_sw_rules - add/update/remove switch rules
3071 * @hw: pointer to the HW struct
3072 * @rule_list: pointer to switch rule population list
3073 * @rule_list_sz: total size of the rule list in bytes
3074 * @num_rules: number of switch rules in the rule_list
3075 * @opc: switch rules population command type - pass in the command opcode
3076 * @cd: pointer to command details structure or NULL
3078 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3080 static enum ice_status
3081 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3082 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3084 struct ice_aq_desc desc;
3085 enum ice_status status;
3087 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3089 if (opc != ice_aqc_opc_add_sw_rules &&
3090 opc != ice_aqc_opc_update_sw_rules &&
3091 opc != ice_aqc_opc_remove_sw_rules)
3092 return ICE_ERR_PARAM;
3094 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3096 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3097 desc.params.sw_rules.num_rules_fltr_entry_index =
3098 CPU_TO_LE16(num_rules);
3099 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3100 if (opc != ice_aqc_opc_add_sw_rules &&
3101 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3102 status = ICE_ERR_DOES_NOT_EXIST;
3108 * ice_aq_add_recipe - add switch recipe
3109 * @hw: pointer to the HW struct
3110 * @s_recipe_list: pointer to switch rule population list
3111 * @num_recipes: number of switch recipes in the list
3112 * @cd: pointer to command details structure or NULL
3117 ice_aq_add_recipe(struct ice_hw *hw,
3118 struct ice_aqc_recipe_data_elem *s_recipe_list,
3119 u16 num_recipes, struct ice_sq_cd *cd)
3121 struct ice_aqc_add_get_recipe *cmd;
3122 struct ice_aq_desc desc;
3125 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3126 cmd = &desc.params.add_get_recipe;
3127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3129 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3130 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3132 buf_size = num_recipes * sizeof(*s_recipe_list);
3134 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3138 * ice_aq_get_recipe - get switch recipe
3139 * @hw: pointer to the HW struct
3140 * @s_recipe_list: pointer to switch rule population list
3141 * @num_recipes: pointer to the number of recipes (input and output)
3142 * @recipe_root: root recipe number of recipe(s) to retrieve
3143 * @cd: pointer to command details structure or NULL
3147 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3148 * On output, *num_recipes will equal the number of entries returned in
3151 * The caller must supply enough space in s_recipe_list to hold all possible
3152 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3155 ice_aq_get_recipe(struct ice_hw *hw,
3156 struct ice_aqc_recipe_data_elem *s_recipe_list,
3157 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3159 struct ice_aqc_add_get_recipe *cmd;
3160 struct ice_aq_desc desc;
3161 enum ice_status status;
3164 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3165 return ICE_ERR_PARAM;
3167 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3168 cmd = &desc.params.add_get_recipe;
3169 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3171 cmd->return_index = CPU_TO_LE16(recipe_root);
3172 cmd->num_sub_recipes = 0;
3174 buf_size = *num_recipes * sizeof(*s_recipe_list);
3176 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3177 /* cppcheck-suppress constArgument */
3178 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3184 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3185 * @hw: pointer to the HW struct
3186 * @params: parameters used to update the default recipe
3188 * This function only supports updating default recipes and it only supports
3189 * updating a single recipe based on the lkup_idx at a time.
3191 * This is done as a read-modify-write operation. First, get the current recipe
3192 * contents based on the recipe's ID. Then modify the field vector index and
3193 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3194 * the pre-existing recipe with the modifications.
3197 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3198 struct ice_update_recipe_lkup_idx_params *params)
3200 struct ice_aqc_recipe_data_elem *rcp_list;
3201 u16 num_recps = ICE_MAX_NUM_RECIPES;
3202 enum ice_status status;
3204 rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3206 return ICE_ERR_NO_MEMORY;
3208 /* read current recipe list from firmware */
3209 rcp_list->recipe_indx = params->rid;
3210 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3212 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3213 params->rid, status);
3217 /* only modify existing recipe's lkup_idx and mask if valid, while
3218 * leaving all other fields the same, then update the recipe firmware
3220 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3221 if (params->mask_valid)
3222 rcp_list->content.mask[params->lkup_idx] =
3223 CPU_TO_LE16(params->mask);
3225 if (params->ignore_valid)
3226 rcp_list->content.lkup_indx[params->lkup_idx] |=
3227 ICE_AQ_RECIPE_LKUP_IGNORE;
3229 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3231 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3232 params->rid, params->lkup_idx, params->fv_idx,
3233 params->mask, params->mask_valid ? "true" : "false",
3237 ice_free(hw, rcp_list);
3242 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3243 * @hw: pointer to the HW struct
3244 * @profile_id: package profile ID to associate the recipe with
3245 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3246 * @cd: pointer to command details structure or NULL
3247 * Recipe to profile association (0x0291)
3250 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3251 struct ice_sq_cd *cd)
3253 struct ice_aqc_recipe_to_profile *cmd;
3254 struct ice_aq_desc desc;
3256 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3257 cmd = &desc.params.recipe_to_profile;
3258 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3259 cmd->profile_id = CPU_TO_LE16(profile_id);
3260 /* Set the recipe ID bit in the bitmask to let the device know which
3261 * profile we are associating the recipe to
3263 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3264 ICE_NONDMA_TO_NONDMA);
3266 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3270 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3271 * @hw: pointer to the HW struct
3272 * @profile_id: package profile ID to associate the recipe with
3273 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3274 * @cd: pointer to command details structure or NULL
3275 * Associate profile ID with given recipe (0x0293)
3278 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3279 struct ice_sq_cd *cd)
3281 struct ice_aqc_recipe_to_profile *cmd;
3282 struct ice_aq_desc desc;
3283 enum ice_status status;
3285 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3286 cmd = &desc.params.recipe_to_profile;
3287 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3288 cmd->profile_id = CPU_TO_LE16(profile_id);
3290 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3292 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3293 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3299 * ice_alloc_recipe - add recipe resource
3300 * @hw: pointer to the hardware structure
3301 * @rid: recipe ID returned as response to AQ call
3303 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3305 struct ice_aqc_alloc_free_res_elem *sw_buf;
3306 enum ice_status status;
3309 buf_len = ice_struct_size(sw_buf, elem, 1);
3310 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3312 return ICE_ERR_NO_MEMORY;
3314 sw_buf->num_elems = CPU_TO_LE16(1);
3315 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3316 ICE_AQC_RES_TYPE_S) |
3317 ICE_AQC_RES_TYPE_FLAG_SHARED);
3318 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3319 ice_aqc_opc_alloc_res, NULL);
3321 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3322 ice_free(hw, sw_buf);
3327 /* ice_init_port_info - Initialize port_info with switch configuration data
3328 * @pi: pointer to port_info
3329 * @vsi_port_num: VSI number or port number
3330 * @type: Type of switch element (port or VSI)
3331 * @swid: switch ID of the switch the element is attached to
3332 * @pf_vf_num: PF or VF number
3333 * @is_vf: true if the element is a VF, false otherwise
3336 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3337 u16 swid, u16 pf_vf_num, bool is_vf)
3340 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3341 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3343 pi->pf_vf_num = pf_vf_num;
3345 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3346 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3349 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3354 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3355 * @hw: pointer to the hardware structure
3357 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3359 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3360 enum ice_status status;
3367 num_total_ports = 1;
3369 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3370 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3373 return ICE_ERR_NO_MEMORY;
3375 /* Multiple calls to ice_aq_get_sw_cfg may be required
3376 * to get all the switch configuration information. The need
3377 * for additional calls is indicated by ice_aq_get_sw_cfg
3378 * writing a non-zero value in req_desc
3381 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3383 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3384 &req_desc, &num_elems, NULL);
3389 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3390 u16 pf_vf_num, swid, vsi_port_num;
3394 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3395 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3397 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3398 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3400 swid = LE16_TO_CPU(ele->swid);
3402 if (LE16_TO_CPU(ele->pf_vf_num) &
3403 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3406 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3407 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3410 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3411 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3412 if (j == num_total_ports) {
3413 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3414 status = ICE_ERR_CFG;
3417 ice_init_port_info(hw->port_info,
3418 vsi_port_num, res_type, swid,
3426 } while (req_desc && !status);
3434 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3435 * @hw: pointer to the hardware structure
3436 * @fi: filter info structure to fill/update
3438 * This helper function populates the lb_en and lan_en elements of the provided
3439 * ice_fltr_info struct using the switch's type and characteristics of the
3440 * switch rule being configured.
3442 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3444 if ((fi->flag & ICE_FLTR_RX) &&
3445 (fi->fltr_act == ICE_FWD_TO_VSI ||
3446 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3447 fi->lkup_type == ICE_SW_LKUP_LAST)
3451 if ((fi->flag & ICE_FLTR_TX) &&
3452 (fi->fltr_act == ICE_FWD_TO_VSI ||
3453 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3454 fi->fltr_act == ICE_FWD_TO_Q ||
3455 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3456 /* Setting LB for prune actions will result in replicated
3457 * packets to the internal switch that will be dropped.
3459 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3462 /* Set lan_en to TRUE if
3463 * 1. The switch is a VEB AND
3465 * 2.1 The lookup is a directional lookup like ethertype,
3466 * promiscuous, ethertype-MAC, promiscuous-VLAN
3467 * and default-port OR
3468 * 2.2 The lookup is VLAN, OR
3469 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3470 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3474 * The switch is a VEPA.
3476 * In all other cases, the LAN enable has to be set to false.
3479 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3480 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3481 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3482 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3483 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3484 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3485 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3486 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3487 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3488 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3497 * ice_fill_sw_rule - Helper function to fill switch rule structure
3498 * @hw: pointer to the hardware structure
3499 * @f_info: entry containing packet forwarding information
3500 * @s_rule: switch rule structure to be filled in based on mac_entry
3501 * @opc: switch rules population command type - pass in the command opcode
3504 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3505 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3507 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3508 u16 vlan_tpid = ICE_ETH_P_8021Q;
3516 if (opc == ice_aqc_opc_remove_sw_rules) {
3517 s_rule->pdata.lkup_tx_rx.act = 0;
3518 s_rule->pdata.lkup_tx_rx.index =
3519 CPU_TO_LE16(f_info->fltr_rule_id);
3520 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3524 eth_hdr_sz = sizeof(dummy_eth_header);
3525 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3527 /* initialize the ether header with a dummy header */
3528 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3529 ice_fill_sw_info(hw, f_info);
3531 switch (f_info->fltr_act) {
3532 case ICE_FWD_TO_VSI:
3533 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3534 ICE_SINGLE_ACT_VSI_ID_M;
3535 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3536 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3537 ICE_SINGLE_ACT_VALID_BIT;
3539 case ICE_FWD_TO_VSI_LIST:
3540 act |= ICE_SINGLE_ACT_VSI_LIST;
3541 act |= (f_info->fwd_id.vsi_list_id <<
3542 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3543 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3544 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3545 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3546 ICE_SINGLE_ACT_VALID_BIT;
3549 act |= ICE_SINGLE_ACT_TO_Q;
3550 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3551 ICE_SINGLE_ACT_Q_INDEX_M;
3553 case ICE_DROP_PACKET:
3554 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3555 ICE_SINGLE_ACT_VALID_BIT;
3557 case ICE_FWD_TO_QGRP:
3558 q_rgn = f_info->qgrp_size > 0 ?
3559 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3560 act |= ICE_SINGLE_ACT_TO_Q;
3561 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3562 ICE_SINGLE_ACT_Q_INDEX_M;
3563 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3564 ICE_SINGLE_ACT_Q_REGION_M;
3571 act |= ICE_SINGLE_ACT_LB_ENABLE;
3573 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3575 switch (f_info->lkup_type) {
3576 case ICE_SW_LKUP_MAC:
3577 daddr = f_info->l_data.mac.mac_addr;
3579 case ICE_SW_LKUP_VLAN:
3580 vlan_id = f_info->l_data.vlan.vlan_id;
3581 if (f_info->l_data.vlan.tpid_valid)
3582 vlan_tpid = f_info->l_data.vlan.tpid;
3583 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3584 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3585 act |= ICE_SINGLE_ACT_PRUNE;
3586 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3589 case ICE_SW_LKUP_ETHERTYPE_MAC:
3590 daddr = f_info->l_data.ethertype_mac.mac_addr;
3592 case ICE_SW_LKUP_ETHERTYPE:
3593 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3594 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3596 case ICE_SW_LKUP_MAC_VLAN:
3597 daddr = f_info->l_data.mac_vlan.mac_addr;
3598 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3600 case ICE_SW_LKUP_PROMISC_VLAN:
3601 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3603 case ICE_SW_LKUP_PROMISC:
3604 daddr = f_info->l_data.mac_vlan.mac_addr;
3610 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3611 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3612 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3614 /* Recipe set depending on lookup type */
3615 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3616 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3617 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3620 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3621 ICE_NONDMA_TO_NONDMA);
3623 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3624 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3625 *off = CPU_TO_BE16(vlan_id);
3626 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3627 *off = CPU_TO_BE16(vlan_tpid);
3630 /* Create the switch rule with the final dummy Ethernet header */
3631 if (opc != ice_aqc_opc_update_sw_rules)
3632 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3636 * ice_add_marker_act
3637 * @hw: pointer to the hardware structure
3638 * @m_ent: the management entry for which sw marker needs to be added
3639 * @sw_marker: sw marker to tag the Rx descriptor with
3640 * @l_id: large action resource ID
3642 * Create a large action to hold software marker and update the switch rule
3643 * entry pointed by m_ent with newly created large action
3645 static enum ice_status
3646 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3647 u16 sw_marker, u16 l_id)
3649 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3650 /* For software marker we need 3 large actions
3651 * 1. FWD action: FWD TO VSI or VSI LIST
3652 * 2. GENERIC VALUE action to hold the profile ID
3653 * 3. GENERIC VALUE action to hold the software marker ID
3655 const u16 num_lg_acts = 3;
3656 enum ice_status status;
3662 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3663 return ICE_ERR_PARAM;
3665 /* Create two back-to-back switch rules and submit them to the HW using
3666 * one memory buffer:
3670 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3671 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3672 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3674 return ICE_ERR_NO_MEMORY;
3676 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3678 /* Fill in the first switch rule i.e. large action */
3679 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3680 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3681 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3683 /* First action VSI forwarding or VSI list forwarding depending on how
3686 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3687 m_ent->fltr_info.fwd_id.hw_vsi_id;
3689 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3690 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3691 if (m_ent->vsi_count > 1)
3692 act |= ICE_LG_ACT_VSI_LIST;
3693 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3695 /* Second action descriptor type */
3696 act = ICE_LG_ACT_GENERIC;
3698 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3699 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3701 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3702 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3704 /* Third action Marker value */
3705 act |= ICE_LG_ACT_GENERIC;
3706 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3707 ICE_LG_ACT_GENERIC_VALUE_M;
3709 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3711 /* call the fill switch rule to fill the lookup Tx Rx structure */
3712 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3713 ice_aqc_opc_update_sw_rules);
3715 /* Update the action to point to the large action ID */
3716 rx_tx->pdata.lkup_tx_rx.act =
3717 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3718 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3719 ICE_SINGLE_ACT_PTR_VAL_M));
3721 /* Use the filter rule ID of the previously created rule with single
3722 * act. Once the update happens, hardware will treat this as large
3725 rx_tx->pdata.lkup_tx_rx.index =
3726 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3728 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3729 ice_aqc_opc_update_sw_rules, NULL);
3731 m_ent->lg_act_idx = l_id;
3732 m_ent->sw_marker_id = sw_marker;
3735 ice_free(hw, lg_act);
3740 * ice_add_counter_act - add/update filter rule with counter action
3741 * @hw: pointer to the hardware structure
3742 * @m_ent: the management entry for which counter needs to be added
3743 * @counter_id: VLAN counter ID returned as part of allocate resource
3744 * @l_id: large action resource ID
3746 static enum ice_status
3747 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3748 u16 counter_id, u16 l_id)
3750 struct ice_aqc_sw_rules_elem *lg_act;
3751 struct ice_aqc_sw_rules_elem *rx_tx;
3752 enum ice_status status;
3753 /* 2 actions will be added while adding a large action counter */
3754 const int num_acts = 2;
3761 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3762 return ICE_ERR_PARAM;
3764 /* Create two back-to-back switch rules and submit them to the HW using
3765 * one memory buffer:
3769 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3770 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3771 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3773 return ICE_ERR_NO_MEMORY;
3775 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3777 /* Fill in the first switch rule i.e. large action */
3778 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3779 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3780 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3782 /* First action VSI forwarding or VSI list forwarding depending on how
3785 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3786 m_ent->fltr_info.fwd_id.hw_vsi_id;
3788 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3789 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3790 ICE_LG_ACT_VSI_LIST_ID_M;
3791 if (m_ent->vsi_count > 1)
3792 act |= ICE_LG_ACT_VSI_LIST;
3793 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3795 /* Second action counter ID */
3796 act = ICE_LG_ACT_STAT_COUNT;
3797 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3798 ICE_LG_ACT_STAT_COUNT_M;
3799 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3801 /* call the fill switch rule to fill the lookup Tx Rx structure */
3802 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3803 ice_aqc_opc_update_sw_rules);
3805 act = ICE_SINGLE_ACT_PTR;
3806 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3807 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3809 /* Use the filter rule ID of the previously created rule with single
3810 * act. Once the update happens, hardware will treat this as large
3813 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3814 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3816 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3817 ice_aqc_opc_update_sw_rules, NULL);
3819 m_ent->lg_act_idx = l_id;
3820 m_ent->counter_index = counter_id;
3823 ice_free(hw, lg_act);
3828 * ice_create_vsi_list_map
3829 * @hw: pointer to the hardware structure
3830 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3831 * @num_vsi: number of VSI handles in the array
3832 * @vsi_list_id: VSI list ID generated as part of allocate resource
3834 * Helper function to create a new entry of VSI list ID to VSI mapping
3835 * using the given VSI list ID
3837 static struct ice_vsi_list_map_info *
3838 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3841 struct ice_switch_info *sw = hw->switch_info;
3842 struct ice_vsi_list_map_info *v_map;
3845 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3849 v_map->vsi_list_id = vsi_list_id;
3851 for (i = 0; i < num_vsi; i++)
3852 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3854 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3859 * ice_update_vsi_list_rule
3860 * @hw: pointer to the hardware structure
3861 * @vsi_handle_arr: array of VSI handles to form a VSI list
3862 * @num_vsi: number of VSI handles in the array
3863 * @vsi_list_id: VSI list ID generated as part of allocate resource
3864 * @remove: Boolean value to indicate if this is a remove action
3865 * @opc: switch rules population command type - pass in the command opcode
3866 * @lkup_type: lookup type of the filter
3868 * Call AQ command to add a new switch rule or update existing switch rule
3869 * using the given VSI list ID
3871 static enum ice_status
3872 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3873 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3874 enum ice_sw_lkup_type lkup_type)
3876 struct ice_aqc_sw_rules_elem *s_rule;
3877 enum ice_status status;
3883 return ICE_ERR_PARAM;
3885 if (lkup_type == ICE_SW_LKUP_MAC ||
3886 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3887 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3888 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3889 lkup_type == ICE_SW_LKUP_PROMISC ||
3890 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3891 lkup_type == ICE_SW_LKUP_LAST)
3892 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3893 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3894 else if (lkup_type == ICE_SW_LKUP_VLAN)
3895 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3896 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3898 return ICE_ERR_PARAM;
3900 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3901 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3903 return ICE_ERR_NO_MEMORY;
3904 for (i = 0; i < num_vsi; i++) {
3905 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3906 status = ICE_ERR_PARAM;
3909 /* AQ call requires hw_vsi_id(s) */
3910 s_rule->pdata.vsi_list.vsi[i] =
3911 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3914 s_rule->type = CPU_TO_LE16(rule_type);
3915 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3916 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3918 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3921 ice_free(hw, s_rule);
3926 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3927 * @hw: pointer to the HW struct
3928 * @vsi_handle_arr: array of VSI handles to form a VSI list
3929 * @num_vsi: number of VSI handles in the array
3930 * @vsi_list_id: stores the ID of the VSI list to be created
3931 * @lkup_type: switch rule filter's lookup type
3933 static enum ice_status
3934 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3935 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3937 enum ice_status status;
3939 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3940 ice_aqc_opc_alloc_res);
3944 /* Update the newly created VSI list to include the specified VSIs */
3945 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3946 *vsi_list_id, false,
3947 ice_aqc_opc_add_sw_rules, lkup_type);
3951 * ice_create_pkt_fwd_rule
3952 * @hw: pointer to the hardware structure
3953 * @recp_list: corresponding filter management list
3954 * @f_entry: entry containing packet forwarding information
3956 * Create switch rule with given filter information and add an entry
3957 * to the corresponding filter management list to track this switch rule
3960 static enum ice_status
3961 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3962 struct ice_fltr_list_entry *f_entry)
3964 struct ice_fltr_mgmt_list_entry *fm_entry;
3965 struct ice_aqc_sw_rules_elem *s_rule;
3966 enum ice_status status;
3968 s_rule = (struct ice_aqc_sw_rules_elem *)
3969 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3971 return ICE_ERR_NO_MEMORY;
3972 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3973 ice_malloc(hw, sizeof(*fm_entry));
3975 status = ICE_ERR_NO_MEMORY;
3976 goto ice_create_pkt_fwd_rule_exit;
3979 fm_entry->fltr_info = f_entry->fltr_info;
3981 /* Initialize all the fields for the management entry */
3982 fm_entry->vsi_count = 1;
3983 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3984 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3985 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3987 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3988 ice_aqc_opc_add_sw_rules);
3990 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3991 ice_aqc_opc_add_sw_rules, NULL);
3993 ice_free(hw, fm_entry);
3994 goto ice_create_pkt_fwd_rule_exit;
3997 f_entry->fltr_info.fltr_rule_id =
3998 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3999 fm_entry->fltr_info.fltr_rule_id =
4000 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4002 /* The book keeping entries will get removed when base driver
4003 * calls remove filter AQ command
4005 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4007 ice_create_pkt_fwd_rule_exit:
4008 ice_free(hw, s_rule);
4013 * ice_update_pkt_fwd_rule
4014 * @hw: pointer to the hardware structure
4015 * @f_info: filter information for switch rule
4017 * Call AQ command to update a previously created switch rule with a
4020 static enum ice_status
4021 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4023 struct ice_aqc_sw_rules_elem *s_rule;
4024 enum ice_status status;
4026 s_rule = (struct ice_aqc_sw_rules_elem *)
4027 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4029 return ICE_ERR_NO_MEMORY;
4031 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4033 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4035 /* Update switch rule with new rule set to forward VSI list */
4036 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4037 ice_aqc_opc_update_sw_rules, NULL);
4039 ice_free(hw, s_rule);
4044 * ice_update_sw_rule_bridge_mode
4045 * @hw: pointer to the HW struct
4047 * Updates unicast switch filter rules based on VEB/VEPA mode
4049 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4051 struct ice_switch_info *sw = hw->switch_info;
4052 struct ice_fltr_mgmt_list_entry *fm_entry;
4053 enum ice_status status = ICE_SUCCESS;
4054 struct LIST_HEAD_TYPE *rule_head;
4055 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4057 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4058 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4060 ice_acquire_lock(rule_lock);
4061 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4063 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4064 u8 *addr = fi->l_data.mac.mac_addr;
4066 /* Update unicast Tx rules to reflect the selected
4069 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4070 (fi->fltr_act == ICE_FWD_TO_VSI ||
4071 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4072 fi->fltr_act == ICE_FWD_TO_Q ||
4073 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4074 status = ice_update_pkt_fwd_rule(hw, fi);
4080 ice_release_lock(rule_lock);
4086 * ice_add_update_vsi_list
4087 * @hw: pointer to the hardware structure
4088 * @m_entry: pointer to current filter management list entry
4089 * @cur_fltr: filter information from the book keeping entry
4090 * @new_fltr: filter information with the new VSI to be added
4092 * Call AQ command to add or update previously created VSI list with new VSI.
4094 * Helper function to do book keeping associated with adding filter information
4095 * The algorithm to do the book keeping is described below :
4096 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4097 * if only one VSI has been added till now
4098 * Allocate a new VSI list and add two VSIs
4099 * to this list using switch rule command
4100 * Update the previously created switch rule with the
4101 * newly created VSI list ID
4102 * if a VSI list was previously created
4103 * Add the new VSI to the previously created VSI list set
4104 * using the update switch rule command
4106 static enum ice_status
4107 ice_add_update_vsi_list(struct ice_hw *hw,
4108 struct ice_fltr_mgmt_list_entry *m_entry,
4109 struct ice_fltr_info *cur_fltr,
4110 struct ice_fltr_info *new_fltr)
4112 enum ice_status status = ICE_SUCCESS;
4113 u16 vsi_list_id = 0;
4115 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4116 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4117 return ICE_ERR_NOT_IMPL;
4119 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4120 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4121 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4122 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4123 return ICE_ERR_NOT_IMPL;
4125 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4126 /* Only one entry existed in the mapping and it was not already
4127 * a part of a VSI list. So, create a VSI list with the old and
4130 struct ice_fltr_info tmp_fltr;
4131 u16 vsi_handle_arr[2];
4133 /* A rule already exists with the new VSI being added */
4134 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4135 return ICE_ERR_ALREADY_EXISTS;
4137 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4138 vsi_handle_arr[1] = new_fltr->vsi_handle;
4139 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4141 new_fltr->lkup_type);
4145 tmp_fltr = *new_fltr;
4146 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4147 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4148 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4149 /* Update the previous switch rule of "MAC forward to VSI" to
4150 * "MAC fwd to VSI list"
4152 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4156 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4157 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4158 m_entry->vsi_list_info =
4159 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4162 if (!m_entry->vsi_list_info)
4163 return ICE_ERR_NO_MEMORY;
4165 /* If this entry was large action then the large action needs
4166 * to be updated to point to FWD to VSI list
4168 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4170 ice_add_marker_act(hw, m_entry,
4171 m_entry->sw_marker_id,
4172 m_entry->lg_act_idx);
4174 u16 vsi_handle = new_fltr->vsi_handle;
4175 enum ice_adminq_opc opcode;
4177 if (!m_entry->vsi_list_info)
4180 /* A rule already exists with the new VSI being added */
4181 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4184 /* Update the previously created VSI list set with
4185 * the new VSI ID passed in
4187 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4188 opcode = ice_aqc_opc_update_sw_rules;
4190 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4191 vsi_list_id, false, opcode,
4192 new_fltr->lkup_type);
4193 /* update VSI list mapping info with new VSI ID */
4195 ice_set_bit(vsi_handle,
4196 m_entry->vsi_list_info->vsi_map);
4199 m_entry->vsi_count++;
4204 * ice_find_rule_entry - Search a rule entry
4205 * @list_head: head of rule list
4206 * @f_info: rule information
4208 * Helper function to search for a given rule entry
4209 * Returns pointer to entry storing the rule if found
4211 static struct ice_fltr_mgmt_list_entry *
4212 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4213 struct ice_fltr_info *f_info)
4215 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4217 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4219 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4220 sizeof(f_info->l_data)) &&
4221 f_info->flag == list_itr->fltr_info.flag) {
4230 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4231 * @recp_list: VSI lists needs to be searched
4232 * @vsi_handle: VSI handle to be found in VSI list
4233 * @vsi_list_id: VSI list ID found containing vsi_handle
4235 * Helper function to search a VSI list with single entry containing given VSI
4236 * handle element. This can be extended further to search VSI list with more
4237 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4239 static struct ice_vsi_list_map_info *
4240 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4243 struct ice_vsi_list_map_info *map_info = NULL;
4244 struct LIST_HEAD_TYPE *list_head;
4246 list_head = &recp_list->filt_rules;
4247 if (recp_list->adv_rule) {
4248 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4250 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4251 ice_adv_fltr_mgmt_list_entry,
4253 if (list_itr->vsi_list_info) {
4254 map_info = list_itr->vsi_list_info;
4255 if (ice_is_bit_set(map_info->vsi_map,
4257 *vsi_list_id = map_info->vsi_list_id;
4263 struct ice_fltr_mgmt_list_entry *list_itr;
4265 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4266 ice_fltr_mgmt_list_entry,
4268 if (list_itr->vsi_count == 1 &&
4269 list_itr->vsi_list_info) {
4270 map_info = list_itr->vsi_list_info;
4271 if (ice_is_bit_set(map_info->vsi_map,
4273 *vsi_list_id = map_info->vsi_list_id;
4283 * ice_add_rule_internal - add rule for a given lookup type
4284 * @hw: pointer to the hardware structure
4285 * @recp_list: recipe list for which rule has to be added
4286 * @lport: logic port number on which function add rule
4287 * @f_entry: structure containing MAC forwarding information
4289 * Adds or updates the rule lists for a given recipe
4291 static enum ice_status
4292 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4293 u8 lport, struct ice_fltr_list_entry *f_entry)
4295 struct ice_fltr_info *new_fltr, *cur_fltr;
4296 struct ice_fltr_mgmt_list_entry *m_entry;
4297 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4298 enum ice_status status = ICE_SUCCESS;
4300 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4301 return ICE_ERR_PARAM;
4303 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4304 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4305 f_entry->fltr_info.fwd_id.hw_vsi_id =
4306 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4308 rule_lock = &recp_list->filt_rule_lock;
4310 ice_acquire_lock(rule_lock);
4311 new_fltr = &f_entry->fltr_info;
4312 if (new_fltr->flag & ICE_FLTR_RX)
4313 new_fltr->src = lport;
4314 else if (new_fltr->flag & ICE_FLTR_TX)
4316 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4318 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4320 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4321 goto exit_add_rule_internal;
4324 cur_fltr = &m_entry->fltr_info;
4325 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4327 exit_add_rule_internal:
4328 ice_release_lock(rule_lock);
4333 * ice_remove_vsi_list_rule
4334 * @hw: pointer to the hardware structure
4335 * @vsi_list_id: VSI list ID generated as part of allocate resource
4336 * @lkup_type: switch rule filter lookup type
4338 * The VSI list should be emptied before this function is called to remove the
4341 static enum ice_status
4342 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4343 enum ice_sw_lkup_type lkup_type)
4345 /* Free the vsi_list resource that we allocated. It is assumed that the
4346 * list is empty at this point.
4348 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4349 ice_aqc_opc_free_res);
4353 * ice_rem_update_vsi_list
4354 * @hw: pointer to the hardware structure
4355 * @vsi_handle: VSI handle of the VSI to remove
4356 * @fm_list: filter management entry for which the VSI list management needs to
4359 static enum ice_status
4360 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4361 struct ice_fltr_mgmt_list_entry *fm_list)
4363 enum ice_sw_lkup_type lkup_type;
4364 enum ice_status status = ICE_SUCCESS;
4367 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4368 fm_list->vsi_count == 0)
4369 return ICE_ERR_PARAM;
4371 /* A rule with the VSI being removed does not exist */
4372 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4373 return ICE_ERR_DOES_NOT_EXIST;
4375 lkup_type = fm_list->fltr_info.lkup_type;
4376 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4377 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4378 ice_aqc_opc_update_sw_rules,
4383 fm_list->vsi_count--;
4384 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4386 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4387 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4388 struct ice_vsi_list_map_info *vsi_list_info =
4389 fm_list->vsi_list_info;
4392 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4394 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4395 return ICE_ERR_OUT_OF_RANGE;
4397 /* Make sure VSI list is empty before removing it below */
4398 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4400 ice_aqc_opc_update_sw_rules,
4405 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4406 tmp_fltr_info.fwd_id.hw_vsi_id =
4407 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4408 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4409 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4411 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4412 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4416 fm_list->fltr_info = tmp_fltr_info;
4419 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4420 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4421 struct ice_vsi_list_map_info *vsi_list_info =
4422 fm_list->vsi_list_info;
4424 /* Remove the VSI list since it is no longer used */
4425 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4427 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4428 vsi_list_id, status);
4432 LIST_DEL(&vsi_list_info->list_entry);
4433 ice_free(hw, vsi_list_info);
4434 fm_list->vsi_list_info = NULL;
4441 * ice_remove_rule_internal - Remove a filter rule of a given type
4443 * @hw: pointer to the hardware structure
4444 * @recp_list: recipe list for which the rule needs to removed
4445 * @f_entry: rule entry containing filter information
4447 static enum ice_status
4448 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4449 struct ice_fltr_list_entry *f_entry)
4451 struct ice_fltr_mgmt_list_entry *list_elem;
4452 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4453 enum ice_status status = ICE_SUCCESS;
4454 bool remove_rule = false;
4457 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4458 return ICE_ERR_PARAM;
4459 f_entry->fltr_info.fwd_id.hw_vsi_id =
4460 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4462 rule_lock = &recp_list->filt_rule_lock;
4463 ice_acquire_lock(rule_lock);
4464 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4465 &f_entry->fltr_info);
4467 status = ICE_ERR_DOES_NOT_EXIST;
4471 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4473 } else if (!list_elem->vsi_list_info) {
4474 status = ICE_ERR_DOES_NOT_EXIST;
4476 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4477 /* a ref_cnt > 1 indicates that the vsi_list is being
4478 * shared by multiple rules. Decrement the ref_cnt and
4479 * remove this rule, but do not modify the list, as it
4480 * is in-use by other rules.
4482 list_elem->vsi_list_info->ref_cnt--;
4485 /* a ref_cnt of 1 indicates the vsi_list is only used
4486 * by one rule. However, the original removal request is only
4487 * for a single VSI. Update the vsi_list first, and only
4488 * remove the rule if there are no further VSIs in this list.
4490 vsi_handle = f_entry->fltr_info.vsi_handle;
4491 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4494 /* if VSI count goes to zero after updating the VSI list */
4495 if (list_elem->vsi_count == 0)
4500 /* Remove the lookup rule */
4501 struct ice_aqc_sw_rules_elem *s_rule;
4503 s_rule = (struct ice_aqc_sw_rules_elem *)
4504 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4506 status = ICE_ERR_NO_MEMORY;
4510 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4511 ice_aqc_opc_remove_sw_rules);
4513 status = ice_aq_sw_rules(hw, s_rule,
4514 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4515 ice_aqc_opc_remove_sw_rules, NULL);
4517 /* Remove a book keeping from the list */
4518 ice_free(hw, s_rule);
4523 LIST_DEL(&list_elem->list_entry);
4524 ice_free(hw, list_elem);
4527 ice_release_lock(rule_lock);
4532 * ice_aq_get_res_alloc - get allocated resources
4533 * @hw: pointer to the HW struct
4534 * @num_entries: pointer to u16 to store the number of resource entries returned
4535 * @buf: pointer to buffer
4536 * @buf_size: size of buf
4537 * @cd: pointer to command details structure or NULL
4539 * The caller-supplied buffer must be large enough to store the resource
4540 * information for all resource types. Each resource type is an
4541 * ice_aqc_get_res_resp_elem structure.
4544 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4545 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4546 struct ice_sq_cd *cd)
4548 struct ice_aqc_get_res_alloc *resp;
4549 enum ice_status status;
4550 struct ice_aq_desc desc;
4553 return ICE_ERR_BAD_PTR;
4555 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4556 return ICE_ERR_INVAL_SIZE;
4558 resp = &desc.params.get_res;
4560 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4561 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4563 if (!status && num_entries)
4564 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4570 * ice_aq_get_res_descs - get allocated resource descriptors
4571 * @hw: pointer to the hardware structure
4572 * @num_entries: number of resource entries in buffer
4573 * @buf: structure to hold response data buffer
4574 * @buf_size: size of buffer
4575 * @res_type: resource type
4576 * @res_shared: is resource shared
4577 * @desc_id: input - first desc ID to start; output - next desc ID
4578 * @cd: pointer to command details structure or NULL
4581 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4582 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4583 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4585 struct ice_aqc_get_allocd_res_desc *cmd;
4586 struct ice_aq_desc desc;
4587 enum ice_status status;
4589 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4591 cmd = &desc.params.get_res_desc;
4594 return ICE_ERR_PARAM;
4596 if (buf_size != (num_entries * sizeof(*buf)))
4597 return ICE_ERR_PARAM;
4599 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4601 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4602 ICE_AQC_RES_TYPE_M) | (res_shared ?
4603 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4604 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4606 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4608 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4614 * ice_add_mac_rule - Add a MAC address based filter rule
4615 * @hw: pointer to the hardware structure
4616 * @m_list: list of MAC addresses and forwarding information
4617 * @sw: pointer to switch info struct for which function add rule
4618 * @lport: logic port number on which function add rule
4620 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4621 * multiple unicast addresses, the function assumes that all the
4622 * addresses are unique in a given add_mac call. It doesn't
4623 * check for duplicates in this case, removing duplicates from a given
4624 * list should be taken care of in the caller of this function.
4626 static enum ice_status
4627 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4628 struct ice_switch_info *sw, u8 lport)
4630 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4631 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4632 struct ice_fltr_list_entry *m_list_itr;
4633 struct LIST_HEAD_TYPE *rule_head;
4634 u16 total_elem_left, s_rule_size;
4635 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4636 enum ice_status status = ICE_SUCCESS;
4637 u16 num_unicast = 0;
4641 rule_lock = &recp_list->filt_rule_lock;
4642 rule_head = &recp_list->filt_rules;
4644 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4646 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4650 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4651 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4652 if (!ice_is_vsi_valid(hw, vsi_handle))
4653 return ICE_ERR_PARAM;
4654 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4655 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4656 /* update the src in case it is VSI num */
4657 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4658 return ICE_ERR_PARAM;
4659 m_list_itr->fltr_info.src = hw_vsi_id;
4660 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4661 IS_ZERO_ETHER_ADDR(add))
4662 return ICE_ERR_PARAM;
4663 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4664 /* Don't overwrite the unicast address */
4665 ice_acquire_lock(rule_lock);
4666 if (ice_find_rule_entry(rule_head,
4667 &m_list_itr->fltr_info)) {
4668 ice_release_lock(rule_lock);
4669 return ICE_ERR_ALREADY_EXISTS;
4671 ice_release_lock(rule_lock);
4673 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4674 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4675 m_list_itr->status =
4676 ice_add_rule_internal(hw, recp_list, lport,
4678 if (m_list_itr->status)
4679 return m_list_itr->status;
4683 ice_acquire_lock(rule_lock);
4684 /* Exit if no suitable entries were found for adding bulk switch rule */
4686 status = ICE_SUCCESS;
4687 goto ice_add_mac_exit;
4690 /* Allocate switch rule buffer for the bulk update for unicast */
4691 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4692 s_rule = (struct ice_aqc_sw_rules_elem *)
4693 ice_calloc(hw, num_unicast, s_rule_size);
4695 status = ICE_ERR_NO_MEMORY;
4696 goto ice_add_mac_exit;
4700 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4702 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4703 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4705 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4706 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4707 ice_aqc_opc_add_sw_rules);
4708 r_iter = (struct ice_aqc_sw_rules_elem *)
4709 ((u8 *)r_iter + s_rule_size);
4713 /* Call AQ bulk switch rule update for all unicast addresses */
4715 /* Call AQ switch rule in AQ_MAX chunk */
4716 for (total_elem_left = num_unicast; total_elem_left > 0;
4717 total_elem_left -= elem_sent) {
4718 struct ice_aqc_sw_rules_elem *entry = r_iter;
4720 elem_sent = MIN_T(u8, total_elem_left,
4721 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4722 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4723 elem_sent, ice_aqc_opc_add_sw_rules,
4726 goto ice_add_mac_exit;
4727 r_iter = (struct ice_aqc_sw_rules_elem *)
4728 ((u8 *)r_iter + (elem_sent * s_rule_size));
4731 /* Fill up rule ID based on the value returned from FW */
4733 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4735 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4736 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4737 struct ice_fltr_mgmt_list_entry *fm_entry;
4739 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4740 f_info->fltr_rule_id =
4741 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4742 f_info->fltr_act = ICE_FWD_TO_VSI;
4743 /* Create an entry to track this MAC address */
4744 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4745 ice_malloc(hw, sizeof(*fm_entry));
4747 status = ICE_ERR_NO_MEMORY;
4748 goto ice_add_mac_exit;
4750 fm_entry->fltr_info = *f_info;
4751 fm_entry->vsi_count = 1;
4752 /* The book keeping entries will get removed when
4753 * base driver calls remove filter AQ command
4756 LIST_ADD(&fm_entry->list_entry, rule_head);
4757 r_iter = (struct ice_aqc_sw_rules_elem *)
4758 ((u8 *)r_iter + s_rule_size);
4763 ice_release_lock(rule_lock);
4765 ice_free(hw, s_rule);
4770 * ice_add_mac - Add a MAC address based filter rule
4771 * @hw: pointer to the hardware structure
4772 * @m_list: list of MAC addresses and forwarding information
4774 * Function add MAC rule for logical port from HW struct
4776 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4779 return ICE_ERR_PARAM;
4781 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4782 hw->port_info->lport);
4786 * ice_add_vlan_internal - Add one VLAN based filter rule
4787 * @hw: pointer to the hardware structure
4788 * @recp_list: recipe list for which rule has to be added
4789 * @f_entry: filter entry containing one VLAN information
4791 static enum ice_status
4792 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4793 struct ice_fltr_list_entry *f_entry)
4795 struct ice_fltr_mgmt_list_entry *v_list_itr;
4796 struct ice_fltr_info *new_fltr, *cur_fltr;
4797 enum ice_sw_lkup_type lkup_type;
4798 u16 vsi_list_id = 0, vsi_handle;
4799 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4800 enum ice_status status = ICE_SUCCESS;
4802 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4803 return ICE_ERR_PARAM;
4805 f_entry->fltr_info.fwd_id.hw_vsi_id =
4806 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4807 new_fltr = &f_entry->fltr_info;
4809 /* VLAN ID should only be 12 bits */
4810 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4811 return ICE_ERR_PARAM;
4813 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4814 return ICE_ERR_PARAM;
4816 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4817 lkup_type = new_fltr->lkup_type;
4818 vsi_handle = new_fltr->vsi_handle;
4819 rule_lock = &recp_list->filt_rule_lock;
4820 ice_acquire_lock(rule_lock);
4821 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4823 struct ice_vsi_list_map_info *map_info = NULL;
4825 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4826 /* All VLAN pruning rules use a VSI list. Check if
4827 * there is already a VSI list containing VSI that we
4828 * want to add. If found, use the same vsi_list_id for
4829 * this new VLAN rule or else create a new list.
4831 map_info = ice_find_vsi_list_entry(recp_list,
4835 status = ice_create_vsi_list_rule(hw,
4843 /* Convert the action to forwarding to a VSI list. */
4844 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4845 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4848 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4850 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4853 status = ICE_ERR_DOES_NOT_EXIST;
4856 /* reuse VSI list for new rule and increment ref_cnt */
4858 v_list_itr->vsi_list_info = map_info;
4859 map_info->ref_cnt++;
4861 v_list_itr->vsi_list_info =
4862 ice_create_vsi_list_map(hw, &vsi_handle,
4866 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4867 /* Update existing VSI list to add new VSI ID only if it used
4870 cur_fltr = &v_list_itr->fltr_info;
4871 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4874 /* If VLAN rule exists and VSI list being used by this rule is
4875 * referenced by more than 1 VLAN rule. Then create a new VSI
4876 * list appending previous VSI with new VSI and update existing
4877 * VLAN rule to point to new VSI list ID
4879 struct ice_fltr_info tmp_fltr;
4880 u16 vsi_handle_arr[2];
4883 /* Current implementation only supports reusing VSI list with
4884 * one VSI count. We should never hit below condition
4886 if (v_list_itr->vsi_count > 1 &&
4887 v_list_itr->vsi_list_info->ref_cnt > 1) {
4888 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4889 status = ICE_ERR_CFG;
4894 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4897 /* A rule already exists with the new VSI being added */
4898 if (cur_handle == vsi_handle) {
4899 status = ICE_ERR_ALREADY_EXISTS;
4903 vsi_handle_arr[0] = cur_handle;
4904 vsi_handle_arr[1] = vsi_handle;
4905 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4906 &vsi_list_id, lkup_type);
4910 tmp_fltr = v_list_itr->fltr_info;
4911 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4912 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4913 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4914 /* Update the previous switch rule to a new VSI list which
4915 * includes current VSI that is requested
4917 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4921 /* before overriding VSI list map info. decrement ref_cnt of
4924 v_list_itr->vsi_list_info->ref_cnt--;
4926 /* now update to newly created list */
4927 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4928 v_list_itr->vsi_list_info =
4929 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4931 v_list_itr->vsi_count++;
4935 ice_release_lock(rule_lock);
4940 * ice_add_vlan_rule - Add VLAN based filter rule
4941 * @hw: pointer to the hardware structure
4942 * @v_list: list of VLAN entries and forwarding information
4943 * @sw: pointer to switch info struct for which function add rule
4945 static enum ice_status
4946 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4947 struct ice_switch_info *sw)
4949 struct ice_fltr_list_entry *v_list_itr;
4950 struct ice_sw_recipe *recp_list;
4952 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4953 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4955 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4956 return ICE_ERR_PARAM;
4957 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4958 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4960 if (v_list_itr->status)
4961 return v_list_itr->status;
4967 * ice_add_vlan - Add a VLAN based filter rule
4968 * @hw: pointer to the hardware structure
4969 * @v_list: list of VLAN and forwarding information
4971 * Function add VLAN rule for logical port from HW struct
4973 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4976 return ICE_ERR_PARAM;
4978 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4982 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4983 * @hw: pointer to the hardware structure
4984 * @mv_list: list of MAC and VLAN filters
4985 * @sw: pointer to switch info struct for which function add rule
4986 * @lport: logic port number on which function add rule
4988 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4989 * pruning bits enabled, then it is the responsibility of the caller to make
4990 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4991 * VLAN won't be received on that VSI otherwise.
4993 static enum ice_status
4994 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4995 struct ice_switch_info *sw, u8 lport)
4997 struct ice_fltr_list_entry *mv_list_itr;
4998 struct ice_sw_recipe *recp_list;
5000 if (!mv_list || !hw)
5001 return ICE_ERR_PARAM;
5003 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5004 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5006 enum ice_sw_lkup_type l_type =
5007 mv_list_itr->fltr_info.lkup_type;
5009 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5010 return ICE_ERR_PARAM;
5011 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5012 mv_list_itr->status =
5013 ice_add_rule_internal(hw, recp_list, lport,
5015 if (mv_list_itr->status)
5016 return mv_list_itr->status;
5022 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5023 * @hw: pointer to the hardware structure
5024 * @mv_list: list of MAC VLAN addresses and forwarding information
5026 * Function add MAC VLAN rule for logical port from HW struct
5029 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5031 if (!mv_list || !hw)
5032 return ICE_ERR_PARAM;
5034 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5035 hw->port_info->lport);
5039 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5040 * @hw: pointer to the hardware structure
5041 * @em_list: list of ether type MAC filter, MAC is optional
5042 * @sw: pointer to switch info struct for which function add rule
5043 * @lport: logic port number on which function add rule
5045 * This function requires the caller to populate the entries in
5046 * the filter list with the necessary fields (including flags to
5047 * indicate Tx or Rx rules).
5049 static enum ice_status
5050 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5051 struct ice_switch_info *sw, u8 lport)
5053 struct ice_fltr_list_entry *em_list_itr;
5055 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5057 struct ice_sw_recipe *recp_list;
5058 enum ice_sw_lkup_type l_type;
5060 l_type = em_list_itr->fltr_info.lkup_type;
5061 recp_list = &sw->recp_list[l_type];
5063 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5064 l_type != ICE_SW_LKUP_ETHERTYPE)
5065 return ICE_ERR_PARAM;
5067 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5070 if (em_list_itr->status)
5071 return em_list_itr->status;
5077 * ice_add_eth_mac - Add a ethertype based filter rule
5078 * @hw: pointer to the hardware structure
5079 * @em_list: list of ethertype and forwarding information
5081 * Function add ethertype rule for logical port from HW struct
5084 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5086 if (!em_list || !hw)
5087 return ICE_ERR_PARAM;
5089 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5090 hw->port_info->lport);
5094 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5095 * @hw: pointer to the hardware structure
5096 * @em_list: list of ethertype or ethertype MAC entries
5097 * @sw: pointer to switch info struct for which function add rule
5099 static enum ice_status
5100 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5101 struct ice_switch_info *sw)
5103 struct ice_fltr_list_entry *em_list_itr, *tmp;
5105 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5107 struct ice_sw_recipe *recp_list;
5108 enum ice_sw_lkup_type l_type;
5110 l_type = em_list_itr->fltr_info.lkup_type;
5112 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5113 l_type != ICE_SW_LKUP_ETHERTYPE)
5114 return ICE_ERR_PARAM;
5116 recp_list = &sw->recp_list[l_type];
5117 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5119 if (em_list_itr->status)
5120 return em_list_itr->status;
5126 * ice_remove_eth_mac - remove a ethertype based filter rule
5127 * @hw: pointer to the hardware structure
5128 * @em_list: list of ethertype and forwarding information
5132 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5134 if (!em_list || !hw)
5135 return ICE_ERR_PARAM;
5137 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5141 * ice_rem_sw_rule_info
5142 * @hw: pointer to the hardware structure
5143 * @rule_head: pointer to the switch list structure that we want to delete
5146 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5148 if (!LIST_EMPTY(rule_head)) {
5149 struct ice_fltr_mgmt_list_entry *entry;
5150 struct ice_fltr_mgmt_list_entry *tmp;
5152 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5153 ice_fltr_mgmt_list_entry, list_entry) {
5154 LIST_DEL(&entry->list_entry);
5155 ice_free(hw, entry);
5161 * ice_rem_adv_rule_info
5162 * @hw: pointer to the hardware structure
5163 * @rule_head: pointer to the switch list structure that we want to delete
5166 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5168 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5169 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5171 if (LIST_EMPTY(rule_head))
5174 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5175 ice_adv_fltr_mgmt_list_entry, list_entry) {
5176 LIST_DEL(&lst_itr->list_entry);
5177 ice_free(hw, lst_itr->lkups);
5178 ice_free(hw, lst_itr);
5183 * ice_rem_all_sw_rules_info
5184 * @hw: pointer to the hardware structure
5186 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5188 struct ice_switch_info *sw = hw->switch_info;
5191 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5192 struct LIST_HEAD_TYPE *rule_head;
5194 rule_head = &sw->recp_list[i].filt_rules;
5195 if (!sw->recp_list[i].adv_rule)
5196 ice_rem_sw_rule_info(hw, rule_head);
5198 ice_rem_adv_rule_info(hw, rule_head);
5199 if (sw->recp_list[i].adv_rule &&
5200 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5201 sw->recp_list[i].adv_rule = false;
5206 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5207 * @pi: pointer to the port_info structure
5208 * @vsi_handle: VSI handle to set as default
5209 * @set: true to add the above mentioned switch rule, false to remove it
5210 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5212 * add filter rule to set/unset given VSI as default VSI for the switch
5213 * (represented by swid)
5216 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5219 struct ice_aqc_sw_rules_elem *s_rule;
5220 struct ice_fltr_info f_info;
5221 struct ice_hw *hw = pi->hw;
5222 enum ice_adminq_opc opcode;
5223 enum ice_status status;
5227 if (!ice_is_vsi_valid(hw, vsi_handle))
5228 return ICE_ERR_PARAM;
5229 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5231 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5232 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5234 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5236 return ICE_ERR_NO_MEMORY;
5238 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5240 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5241 f_info.flag = direction;
5242 f_info.fltr_act = ICE_FWD_TO_VSI;
5243 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5245 if (f_info.flag & ICE_FLTR_RX) {
5246 f_info.src = pi->lport;
5247 f_info.src_id = ICE_SRC_ID_LPORT;
5249 f_info.fltr_rule_id =
5250 pi->dflt_rx_vsi_rule_id;
5251 } else if (f_info.flag & ICE_FLTR_TX) {
5252 f_info.src_id = ICE_SRC_ID_VSI;
5253 f_info.src = hw_vsi_id;
5255 f_info.fltr_rule_id =
5256 pi->dflt_tx_vsi_rule_id;
5260 opcode = ice_aqc_opc_add_sw_rules;
5262 opcode = ice_aqc_opc_remove_sw_rules;
5264 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5266 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5267 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5270 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5272 if (f_info.flag & ICE_FLTR_TX) {
5273 pi->dflt_tx_vsi_num = hw_vsi_id;
5274 pi->dflt_tx_vsi_rule_id = index;
5275 } else if (f_info.flag & ICE_FLTR_RX) {
5276 pi->dflt_rx_vsi_num = hw_vsi_id;
5277 pi->dflt_rx_vsi_rule_id = index;
5280 if (f_info.flag & ICE_FLTR_TX) {
5281 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5282 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5283 } else if (f_info.flag & ICE_FLTR_RX) {
5284 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5285 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5290 ice_free(hw, s_rule);
5295 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5296 * @list_head: head of rule list
5297 * @f_info: rule information
5299 * Helper function to search for a unicast rule entry - this is to be used
5300 * to remove unicast MAC filter that is not shared with other VSIs on the
5303 * Returns pointer to entry storing the rule if found
5305 static struct ice_fltr_mgmt_list_entry *
5306 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5307 struct ice_fltr_info *f_info)
5309 struct ice_fltr_mgmt_list_entry *list_itr;
5311 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5313 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5314 sizeof(f_info->l_data)) &&
5315 f_info->fwd_id.hw_vsi_id ==
5316 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5317 f_info->flag == list_itr->fltr_info.flag)
5324 * ice_remove_mac_rule - remove a MAC based filter rule
5325 * @hw: pointer to the hardware structure
5326 * @m_list: list of MAC addresses and forwarding information
5327 * @recp_list: list from which function remove MAC address
5329 * This function removes either a MAC filter rule or a specific VSI from a
5330 * VSI list for a multicast MAC address.
5332 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5333 * ice_add_mac. Caller should be aware that this call will only work if all
5334 * the entries passed into m_list were added previously. It will not attempt to
5335 * do a partial remove of entries that were found.
5337 static enum ice_status
5338 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5339 struct ice_sw_recipe *recp_list)
5341 struct ice_fltr_list_entry *list_itr, *tmp;
5342 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5345 return ICE_ERR_PARAM;
5347 rule_lock = &recp_list->filt_rule_lock;
5348 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5350 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5351 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5354 if (l_type != ICE_SW_LKUP_MAC)
5355 return ICE_ERR_PARAM;
5357 vsi_handle = list_itr->fltr_info.vsi_handle;
5358 if (!ice_is_vsi_valid(hw, vsi_handle))
5359 return ICE_ERR_PARAM;
5361 list_itr->fltr_info.fwd_id.hw_vsi_id =
5362 ice_get_hw_vsi_num(hw, vsi_handle);
5363 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5364 /* Don't remove the unicast address that belongs to
5365 * another VSI on the switch, since it is not being
5368 ice_acquire_lock(rule_lock);
5369 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5370 &list_itr->fltr_info)) {
5371 ice_release_lock(rule_lock);
5372 return ICE_ERR_DOES_NOT_EXIST;
5374 ice_release_lock(rule_lock);
5376 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5378 if (list_itr->status)
5379 return list_itr->status;
5385 * ice_remove_mac - remove a MAC address based filter rule
5386 * @hw: pointer to the hardware structure
5387 * @m_list: list of MAC addresses and forwarding information
5390 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5392 struct ice_sw_recipe *recp_list;
5394 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5395 return ice_remove_mac_rule(hw, m_list, recp_list);
5399 * ice_remove_vlan_rule - Remove VLAN based filter rule
5400 * @hw: pointer to the hardware structure
5401 * @v_list: list of VLAN entries and forwarding information
5402 * @recp_list: list from which function remove VLAN
5404 static enum ice_status
5405 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5406 struct ice_sw_recipe *recp_list)
5408 struct ice_fltr_list_entry *v_list_itr, *tmp;
5410 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5412 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5414 if (l_type != ICE_SW_LKUP_VLAN)
5415 return ICE_ERR_PARAM;
5416 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5418 if (v_list_itr->status)
5419 return v_list_itr->status;
5425 * ice_remove_vlan - remove a VLAN address based filter rule
5426 * @hw: pointer to the hardware structure
5427 * @v_list: list of VLAN and forwarding information
5431 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5433 struct ice_sw_recipe *recp_list;
5436 return ICE_ERR_PARAM;
5438 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5439 return ice_remove_vlan_rule(hw, v_list, recp_list);
5443 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5444 * @hw: pointer to the hardware structure
5445 * @v_list: list of MAC VLAN entries and forwarding information
5446 * @recp_list: list from which function remove MAC VLAN
5448 static enum ice_status
5449 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5450 struct ice_sw_recipe *recp_list)
5452 struct ice_fltr_list_entry *v_list_itr, *tmp;
5454 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5455 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5457 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5459 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5460 return ICE_ERR_PARAM;
5461 v_list_itr->status =
5462 ice_remove_rule_internal(hw, recp_list,
5464 if (v_list_itr->status)
5465 return v_list_itr->status;
5471 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5472 * @hw: pointer to the hardware structure
5473 * @mv_list: list of MAC VLAN and forwarding information
5476 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5478 struct ice_sw_recipe *recp_list;
5480 if (!mv_list || !hw)
5481 return ICE_ERR_PARAM;
5483 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5484 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5488 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5489 * @fm_entry: filter entry to inspect
5490 * @vsi_handle: VSI handle to compare with filter info
5493 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5495 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5496 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5497 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5498 fm_entry->vsi_list_info &&
5499 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5504 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5505 * @hw: pointer to the hardware structure
5506 * @vsi_handle: VSI handle to remove filters from
5507 * @vsi_list_head: pointer to the list to add entry to
5508 * @fi: pointer to fltr_info of filter entry to copy & add
5510 * Helper function, used when creating a list of filters to remove from
5511 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5512 * original filter entry, with the exception of fltr_info.fltr_act and
5513 * fltr_info.fwd_id fields. These are set such that later logic can
5514 * extract which VSI to remove the fltr from, and pass on that information.
5516 static enum ice_status
5517 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5518 struct LIST_HEAD_TYPE *vsi_list_head,
5519 struct ice_fltr_info *fi)
5521 struct ice_fltr_list_entry *tmp;
5523 /* this memory is freed up in the caller function
5524 * once filters for this VSI are removed
5526 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5528 return ICE_ERR_NO_MEMORY;
5530 tmp->fltr_info = *fi;
5532 /* Overwrite these fields to indicate which VSI to remove filter from,
5533 * so find and remove logic can extract the information from the
5534 * list entries. Note that original entries will still have proper
5537 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5538 tmp->fltr_info.vsi_handle = vsi_handle;
5539 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5541 LIST_ADD(&tmp->list_entry, vsi_list_head);
5547 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5548 * @hw: pointer to the hardware structure
5549 * @vsi_handle: VSI handle to remove filters from
5550 * @lkup_list_head: pointer to the list that has certain lookup type filters
5551 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5553 * Locates all filters in lkup_list_head that are used by the given VSI,
5554 * and adds COPIES of those entries to vsi_list_head (intended to be used
5555 * to remove the listed filters).
5556 * Note that this means all entries in vsi_list_head must be explicitly
5557 * deallocated by the caller when done with list.
5559 static enum ice_status
5560 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5561 struct LIST_HEAD_TYPE *lkup_list_head,
5562 struct LIST_HEAD_TYPE *vsi_list_head)
5564 struct ice_fltr_mgmt_list_entry *fm_entry;
5565 enum ice_status status = ICE_SUCCESS;
5567 /* check to make sure VSI ID is valid and within boundary */
5568 if (!ice_is_vsi_valid(hw, vsi_handle))
5569 return ICE_ERR_PARAM;
5571 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5572 ice_fltr_mgmt_list_entry, list_entry) {
5573 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5576 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5578 &fm_entry->fltr_info);
5586 * ice_determine_promisc_mask
5587 * @fi: filter info to parse
5589 * Helper function to determine which ICE_PROMISC_ mask corresponds
5590 * to given filter into.
5592 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5594 u16 vid = fi->l_data.mac_vlan.vlan_id;
5595 u8 *macaddr = fi->l_data.mac.mac_addr;
5596 bool is_tx_fltr = false;
5597 u8 promisc_mask = 0;
5599 if (fi->flag == ICE_FLTR_TX)
5602 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5603 promisc_mask |= is_tx_fltr ?
5604 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5605 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5606 promisc_mask |= is_tx_fltr ?
5607 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5608 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5609 promisc_mask |= is_tx_fltr ?
5610 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5612 promisc_mask |= is_tx_fltr ?
5613 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5615 return promisc_mask;
5619 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5620 * @hw: pointer to the hardware structure
5621 * @vsi_handle: VSI handle to retrieve info from
5622 * @promisc_mask: pointer to mask to be filled in
5623 * @vid: VLAN ID of promisc VLAN VSI
5624 * @sw: pointer to switch info struct for which function add rule
5626 static enum ice_status
5627 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5628 u16 *vid, struct ice_switch_info *sw)
5630 struct ice_fltr_mgmt_list_entry *itr;
5631 struct LIST_HEAD_TYPE *rule_head;
5632 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5634 if (!ice_is_vsi_valid(hw, vsi_handle))
5635 return ICE_ERR_PARAM;
5639 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5640 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5642 ice_acquire_lock(rule_lock);
5643 LIST_FOR_EACH_ENTRY(itr, rule_head,
5644 ice_fltr_mgmt_list_entry, list_entry) {
5645 /* Continue if this filter doesn't apply to this VSI or the
5646 * VSI ID is not in the VSI map for this filter
5648 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5651 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5653 ice_release_lock(rule_lock);
5659 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5660 * @hw: pointer to the hardware structure
5661 * @vsi_handle: VSI handle to retrieve info from
5662 * @promisc_mask: pointer to mask to be filled in
5663 * @vid: VLAN ID of promisc VLAN VSI
5666 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5669 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5670 vid, hw->switch_info);
5674 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5675 * @hw: pointer to the hardware structure
5676 * @vsi_handle: VSI handle to retrieve info from
5677 * @promisc_mask: pointer to mask to be filled in
5678 * @vid: VLAN ID of promisc VLAN VSI
5679 * @sw: pointer to switch info struct for which function add rule
5681 static enum ice_status
5682 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5683 u16 *vid, struct ice_switch_info *sw)
5685 struct ice_fltr_mgmt_list_entry *itr;
5686 struct LIST_HEAD_TYPE *rule_head;
5687 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5689 if (!ice_is_vsi_valid(hw, vsi_handle))
5690 return ICE_ERR_PARAM;
5694 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5695 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5697 ice_acquire_lock(rule_lock);
5698 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5700 /* Continue if this filter doesn't apply to this VSI or the
5701 * VSI ID is not in the VSI map for this filter
5703 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5706 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5708 ice_release_lock(rule_lock);
5714 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5715 * @hw: pointer to the hardware structure
5716 * @vsi_handle: VSI handle to retrieve info from
5717 * @promisc_mask: pointer to mask to be filled in
5718 * @vid: VLAN ID of promisc VLAN VSI
5721 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5724 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5725 vid, hw->switch_info);
5729 * ice_remove_promisc - Remove promisc based filter rules
5730 * @hw: pointer to the hardware structure
5731 * @recp_id: recipe ID for which the rule needs to removed
5732 * @v_list: list of promisc entries
5734 static enum ice_status
5735 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5736 struct LIST_HEAD_TYPE *v_list)
5738 struct ice_fltr_list_entry *v_list_itr, *tmp;
5739 struct ice_sw_recipe *recp_list;
5741 recp_list = &hw->switch_info->recp_list[recp_id];
5742 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5744 v_list_itr->status =
5745 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5746 if (v_list_itr->status)
5747 return v_list_itr->status;
5753 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5754 * @hw: pointer to the hardware structure
5755 * @vsi_handle: VSI handle to clear mode
5756 * @promisc_mask: mask of promiscuous config bits to clear
5757 * @vid: VLAN ID to clear VLAN promiscuous
5758 * @sw: pointer to switch info struct for which function add rule
5760 static enum ice_status
5761 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5762 u16 vid, struct ice_switch_info *sw)
5764 struct ice_fltr_list_entry *fm_entry, *tmp;
5765 struct LIST_HEAD_TYPE remove_list_head;
5766 struct ice_fltr_mgmt_list_entry *itr;
5767 struct LIST_HEAD_TYPE *rule_head;
5768 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5769 enum ice_status status = ICE_SUCCESS;
5772 if (!ice_is_vsi_valid(hw, vsi_handle))
5773 return ICE_ERR_PARAM;
5775 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5776 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5778 recipe_id = ICE_SW_LKUP_PROMISC;
5780 rule_head = &sw->recp_list[recipe_id].filt_rules;
5781 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5783 INIT_LIST_HEAD(&remove_list_head);
5785 ice_acquire_lock(rule_lock);
5786 LIST_FOR_EACH_ENTRY(itr, rule_head,
5787 ice_fltr_mgmt_list_entry, list_entry) {
5788 struct ice_fltr_info *fltr_info;
5789 u8 fltr_promisc_mask = 0;
5791 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5793 fltr_info = &itr->fltr_info;
5795 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5796 vid != fltr_info->l_data.mac_vlan.vlan_id)
5799 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5801 /* Skip if filter is not completely specified by given mask */
5802 if (fltr_promisc_mask & ~promisc_mask)
5805 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5809 ice_release_lock(rule_lock);
5810 goto free_fltr_list;
5813 ice_release_lock(rule_lock);
5815 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5818 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5819 ice_fltr_list_entry, list_entry) {
5820 LIST_DEL(&fm_entry->list_entry);
5821 ice_free(hw, fm_entry);
5828 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5829 * @hw: pointer to the hardware structure
5830 * @vsi_handle: VSI handle to clear mode
5831 * @promisc_mask: mask of promiscuous config bits to clear
5832 * @vid: VLAN ID to clear VLAN promiscuous
5835 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5836 u8 promisc_mask, u16 vid)
5838 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5839 vid, hw->switch_info);
5843 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5844 * @hw: pointer to the hardware structure
5845 * @vsi_handle: VSI handle to configure
5846 * @promisc_mask: mask of promiscuous config bits
5847 * @vid: VLAN ID to set VLAN promiscuous
5848 * @lport: logical port number to configure promisc mode
5849 * @sw: pointer to switch info struct for which function add rule
5851 static enum ice_status
5852 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5853 u16 vid, u8 lport, struct ice_switch_info *sw)
5855 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5856 struct ice_fltr_list_entry f_list_entry;
5857 struct ice_fltr_info new_fltr;
5858 enum ice_status status = ICE_SUCCESS;
5864 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5866 if (!ice_is_vsi_valid(hw, vsi_handle))
5867 return ICE_ERR_PARAM;
5868 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5870 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5872 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5873 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5874 new_fltr.l_data.mac_vlan.vlan_id = vid;
5875 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5877 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5878 recipe_id = ICE_SW_LKUP_PROMISC;
5881 /* Separate filters must be set for each direction/packet type
5882 * combination, so we will loop over the mask value, store the
5883 * individual type, and clear it out in the input mask as it
5886 while (promisc_mask) {
5887 struct ice_sw_recipe *recp_list;
5893 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5894 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5895 pkt_type = UCAST_FLTR;
5896 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5897 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5898 pkt_type = UCAST_FLTR;
5900 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5901 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5902 pkt_type = MCAST_FLTR;
5903 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5904 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5905 pkt_type = MCAST_FLTR;
5907 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5908 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5909 pkt_type = BCAST_FLTR;
5910 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5911 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5912 pkt_type = BCAST_FLTR;
5916 /* Check for VLAN promiscuous flag */
5917 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5918 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5919 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5920 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5924 /* Set filter DA based on packet type */
5925 mac_addr = new_fltr.l_data.mac.mac_addr;
5926 if (pkt_type == BCAST_FLTR) {
5927 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5928 } else if (pkt_type == MCAST_FLTR ||
5929 pkt_type == UCAST_FLTR) {
5930 /* Use the dummy ether header DA */
5931 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5932 ICE_NONDMA_TO_NONDMA);
5933 if (pkt_type == MCAST_FLTR)
5934 mac_addr[0] |= 0x1; /* Set multicast bit */
5937 /* Need to reset this to zero for all iterations */
5940 new_fltr.flag |= ICE_FLTR_TX;
5941 new_fltr.src = hw_vsi_id;
5943 new_fltr.flag |= ICE_FLTR_RX;
5944 new_fltr.src = lport;
5947 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5948 new_fltr.vsi_handle = vsi_handle;
5949 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5950 f_list_entry.fltr_info = new_fltr;
5951 recp_list = &sw->recp_list[recipe_id];
5953 status = ice_add_rule_internal(hw, recp_list, lport,
5955 if (status != ICE_SUCCESS)
5956 goto set_promisc_exit;
5964 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5965 * @hw: pointer to the hardware structure
5966 * @vsi_handle: VSI handle to configure
5967 * @promisc_mask: mask of promiscuous config bits
5968 * @vid: VLAN ID to set VLAN promiscuous
5971 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5974 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5975 hw->port_info->lport,
5980 * _ice_set_vlan_vsi_promisc
5981 * @hw: pointer to the hardware structure
5982 * @vsi_handle: VSI handle to configure
5983 * @promisc_mask: mask of promiscuous config bits
5984 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5985 * @lport: logical port number to configure promisc mode
5986 * @sw: pointer to switch info struct for which function add rule
5988 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5990 static enum ice_status
5991 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5992 bool rm_vlan_promisc, u8 lport,
5993 struct ice_switch_info *sw)
5995 struct ice_fltr_list_entry *list_itr, *tmp;
5996 struct LIST_HEAD_TYPE vsi_list_head;
5997 struct LIST_HEAD_TYPE *vlan_head;
5998 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5999 enum ice_status status;
6002 INIT_LIST_HEAD(&vsi_list_head);
6003 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6004 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6005 ice_acquire_lock(vlan_lock);
6006 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6008 ice_release_lock(vlan_lock);
6010 goto free_fltr_list;
6012 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6014 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6015 if (rm_vlan_promisc)
6016 status = _ice_clear_vsi_promisc(hw, vsi_handle,
6020 status = _ice_set_vsi_promisc(hw, vsi_handle,
6021 promisc_mask, vlan_id,
6028 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6029 ice_fltr_list_entry, list_entry) {
6030 LIST_DEL(&list_itr->list_entry);
6031 ice_free(hw, list_itr);
6037 * ice_set_vlan_vsi_promisc
6038 * @hw: pointer to the hardware structure
6039 * @vsi_handle: VSI handle to configure
6040 * @promisc_mask: mask of promiscuous config bits
6041 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6043 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6046 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6047 bool rm_vlan_promisc)
6049 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6050 rm_vlan_promisc, hw->port_info->lport,
6055 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6056 * @hw: pointer to the hardware structure
6057 * @vsi_handle: VSI handle to remove filters from
6058 * @recp_list: recipe list from which function remove fltr
6059 * @lkup: switch rule filter lookup type
6062 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6063 struct ice_sw_recipe *recp_list,
6064 enum ice_sw_lkup_type lkup)
6066 struct ice_fltr_list_entry *fm_entry;
6067 struct LIST_HEAD_TYPE remove_list_head;
6068 struct LIST_HEAD_TYPE *rule_head;
6069 struct ice_fltr_list_entry *tmp;
6070 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6071 enum ice_status status;
6073 INIT_LIST_HEAD(&remove_list_head);
6074 rule_lock = &recp_list[lkup].filt_rule_lock;
6075 rule_head = &recp_list[lkup].filt_rules;
6076 ice_acquire_lock(rule_lock);
6077 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6079 ice_release_lock(rule_lock);
6081 goto free_fltr_list;
6084 case ICE_SW_LKUP_MAC:
6085 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6087 case ICE_SW_LKUP_VLAN:
6088 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6090 case ICE_SW_LKUP_PROMISC:
6091 case ICE_SW_LKUP_PROMISC_VLAN:
6092 ice_remove_promisc(hw, lkup, &remove_list_head);
6094 case ICE_SW_LKUP_MAC_VLAN:
6095 ice_remove_mac_vlan(hw, &remove_list_head);
6097 case ICE_SW_LKUP_ETHERTYPE:
6098 case ICE_SW_LKUP_ETHERTYPE_MAC:
6099 ice_remove_eth_mac(hw, &remove_list_head);
6101 case ICE_SW_LKUP_DFLT:
6102 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6104 case ICE_SW_LKUP_LAST:
6105 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6110 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6111 ice_fltr_list_entry, list_entry) {
6112 LIST_DEL(&fm_entry->list_entry);
6113 ice_free(hw, fm_entry);
6118 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6119 * @hw: pointer to the hardware structure
6120 * @vsi_handle: VSI handle to remove filters from
6121 * @sw: pointer to switch info struct
6124 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6125 struct ice_switch_info *sw)
6127 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6129 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6130 sw->recp_list, ICE_SW_LKUP_MAC);
6131 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6132 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6133 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6134 sw->recp_list, ICE_SW_LKUP_PROMISC);
6135 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6136 sw->recp_list, ICE_SW_LKUP_VLAN);
6137 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6138 sw->recp_list, ICE_SW_LKUP_DFLT);
6139 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6140 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6141 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6142 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6143 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6144 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6148 * ice_remove_vsi_fltr - Remove all filters for a VSI
6149 * @hw: pointer to the hardware structure
6150 * @vsi_handle: VSI handle to remove filters from
6152 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6154 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6158 * ice_alloc_res_cntr - allocating resource counter
6159 * @hw: pointer to the hardware structure
6160 * @type: type of resource
6161 * @alloc_shared: if set it is shared else dedicated
6162 * @num_items: number of entries requested for FD resource type
6163 * @counter_id: counter index returned by AQ call
6166 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6169 struct ice_aqc_alloc_free_res_elem *buf;
6170 enum ice_status status;
6173 /* Allocate resource */
6174 buf_len = ice_struct_size(buf, elem, 1);
6175 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6177 return ICE_ERR_NO_MEMORY;
6179 buf->num_elems = CPU_TO_LE16(num_items);
6180 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6181 ICE_AQC_RES_TYPE_M) | alloc_shared);
6183 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6184 ice_aqc_opc_alloc_res, NULL);
6188 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6196 * ice_free_res_cntr - free resource counter
6197 * @hw: pointer to the hardware structure
6198 * @type: type of resource
6199 * @alloc_shared: if set it is shared else dedicated
6200 * @num_items: number of entries to be freed for FD resource type
6201 * @counter_id: counter ID resource which needs to be freed
6204 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6207 struct ice_aqc_alloc_free_res_elem *buf;
6208 enum ice_status status;
6212 buf_len = ice_struct_size(buf, elem, 1);
6213 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6215 return ICE_ERR_NO_MEMORY;
6217 buf->num_elems = CPU_TO_LE16(num_items);
6218 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6219 ICE_AQC_RES_TYPE_M) | alloc_shared);
6220 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6222 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6223 ice_aqc_opc_free_res, NULL);
6225 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6232 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6233 * @hw: pointer to the hardware structure
6234 * @counter_id: returns counter index
6236 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6238 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6239 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6244 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6245 * @hw: pointer to the hardware structure
6246 * @counter_id: counter index to be freed
6248 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6250 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6251 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6256 * ice_alloc_res_lg_act - add large action resource
6257 * @hw: pointer to the hardware structure
6258 * @l_id: large action ID to fill it in
6259 * @num_acts: number of actions to hold with a large action entry
6261 static enum ice_status
6262 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6264 struct ice_aqc_alloc_free_res_elem *sw_buf;
6265 enum ice_status status;
6268 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6269 return ICE_ERR_PARAM;
6271 /* Allocate resource for large action */
6272 buf_len = ice_struct_size(sw_buf, elem, 1);
6273 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6275 return ICE_ERR_NO_MEMORY;
6277 sw_buf->num_elems = CPU_TO_LE16(1);
6279 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6280 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6281 * If num_acts is greater than 2, then use
6282 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6283 * The num_acts cannot exceed 4. This was ensured at the
6284 * beginning of the function.
6287 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6288 else if (num_acts == 2)
6289 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6291 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6293 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6294 ice_aqc_opc_alloc_res, NULL);
6296 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6298 ice_free(hw, sw_buf);
6303 * ice_add_mac_with_sw_marker - add filter with sw marker
6304 * @hw: pointer to the hardware structure
6305 * @f_info: filter info structure containing the MAC filter information
6306 * @sw_marker: sw marker to tag the Rx descriptor with
6309 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6312 struct ice_fltr_mgmt_list_entry *m_entry;
6313 struct ice_fltr_list_entry fl_info;
6314 struct ice_sw_recipe *recp_list;
6315 struct LIST_HEAD_TYPE l_head;
6316 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6317 enum ice_status ret;
6321 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6322 return ICE_ERR_PARAM;
6324 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6325 return ICE_ERR_PARAM;
6327 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6328 return ICE_ERR_PARAM;
6330 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6331 return ICE_ERR_PARAM;
6332 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6334 /* Add filter if it doesn't exist so then the adding of large
6335 * action always results in update
6338 INIT_LIST_HEAD(&l_head);
6339 fl_info.fltr_info = *f_info;
6340 LIST_ADD(&fl_info.list_entry, &l_head);
6342 entry_exists = false;
6343 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6344 hw->port_info->lport);
6345 if (ret == ICE_ERR_ALREADY_EXISTS)
6346 entry_exists = true;
6350 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6351 rule_lock = &recp_list->filt_rule_lock;
6352 ice_acquire_lock(rule_lock);
6353 /* Get the book keeping entry for the filter */
6354 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6358 /* If counter action was enabled for this rule then don't enable
6359 * sw marker large action
6361 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6362 ret = ICE_ERR_PARAM;
6366 /* if same marker was added before */
6367 if (m_entry->sw_marker_id == sw_marker) {
6368 ret = ICE_ERR_ALREADY_EXISTS;
6372 /* Allocate a hardware table entry to hold large act. Three actions
6373 * for marker based large action
6375 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6379 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6382 /* Update the switch rule to add the marker action */
6383 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6385 ice_release_lock(rule_lock);
6390 ice_release_lock(rule_lock);
6391 /* only remove entry if it did not exist previously */
6393 ret = ice_remove_mac(hw, &l_head);
6399 * ice_add_mac_with_counter - add filter with counter enabled
6400 * @hw: pointer to the hardware structure
6401 * @f_info: pointer to filter info structure containing the MAC filter
6405 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6407 struct ice_fltr_mgmt_list_entry *m_entry;
6408 struct ice_fltr_list_entry fl_info;
6409 struct ice_sw_recipe *recp_list;
6410 struct LIST_HEAD_TYPE l_head;
6411 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6412 enum ice_status ret;
6417 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6418 return ICE_ERR_PARAM;
6420 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6421 return ICE_ERR_PARAM;
6423 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6424 return ICE_ERR_PARAM;
6425 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6426 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6428 entry_exist = false;
6430 rule_lock = &recp_list->filt_rule_lock;
6432 /* Add filter if it doesn't exist so then the adding of large
6433 * action always results in update
6435 INIT_LIST_HEAD(&l_head);
6437 fl_info.fltr_info = *f_info;
6438 LIST_ADD(&fl_info.list_entry, &l_head);
6440 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6441 hw->port_info->lport);
6442 if (ret == ICE_ERR_ALREADY_EXISTS)
6447 ice_acquire_lock(rule_lock);
6448 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6450 ret = ICE_ERR_BAD_PTR;
6454 /* Don't enable counter for a filter for which sw marker was enabled */
6455 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6456 ret = ICE_ERR_PARAM;
6460 /* If a counter was already enabled then don't need to add again */
6461 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6462 ret = ICE_ERR_ALREADY_EXISTS;
6466 /* Allocate a hardware table entry to VLAN counter */
6467 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6471 /* Allocate a hardware table entry to hold large act. Two actions for
6472 * counter based large action
6474 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6478 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6481 /* Update the switch rule to add the counter action */
6482 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6484 ice_release_lock(rule_lock);
6489 ice_release_lock(rule_lock);
6490 /* only remove entry if it did not exist previously */
6492 ret = ice_remove_mac(hw, &l_head);
6497 /* This is mapping table entry that maps every word within a given protocol
6498 * structure to the real byte offset as per the specification of that
6500 * for example dst address is 3 words in ethertype header and corresponding
6501 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6502 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6503 * matching entry describing its field. This needs to be updated if new
6504 * structure is added to that union.
6506 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6507 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6508 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6509 { ICE_ETYPE_OL, { 0 } },
6510 { ICE_VLAN_OFOS, { 0, 2 } },
6511 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6512 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6513 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6514 26, 28, 30, 32, 34, 36, 38 } },
6515 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6516 26, 28, 30, 32, 34, 36, 38 } },
6517 { ICE_TCP_IL, { 0, 2 } },
6518 { ICE_UDP_OF, { 0, 2 } },
6519 { ICE_UDP_ILOS, { 0, 2 } },
6520 { ICE_SCTP_IL, { 0, 2 } },
6521 { ICE_VXLAN, { 8, 10, 12, 14 } },
6522 { ICE_GENEVE, { 8, 10, 12, 14 } },
6523 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6524 { ICE_NVGRE, { 0, 2, 4, 6 } },
6525 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6526 { ICE_PPPOE, { 0, 2, 4, 6 } },
6527 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6528 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6529 { ICE_ESP, { 0, 2, 4, 6 } },
6530 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6531 { ICE_NAT_T, { 8, 10, 12, 14 } },
6532 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6533 { ICE_VLAN_EX, { 0, 2 } },
6536 /* The following table describes preferred grouping of recipes.
6537 * If a recipe that needs to be programmed is a superset or matches one of the
6538 * following combinations, then the recipe needs to be chained as per the
6542 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6543 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6544 { ICE_MAC_IL, ICE_MAC_IL_HW },
6545 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6546 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6547 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6548 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6549 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6550 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6551 { ICE_TCP_IL, ICE_TCP_IL_HW },
6552 { ICE_UDP_OF, ICE_UDP_OF_HW },
6553 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6554 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6555 { ICE_VXLAN, ICE_UDP_OF_HW },
6556 { ICE_GENEVE, ICE_UDP_OF_HW },
6557 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6558 { ICE_NVGRE, ICE_GRE_OF_HW },
6559 { ICE_GTP, ICE_UDP_OF_HW },
6560 { ICE_PPPOE, ICE_PPPOE_HW },
6561 { ICE_PFCP, ICE_UDP_ILOS_HW },
6562 { ICE_L2TPV3, ICE_L2TPV3_HW },
6563 { ICE_ESP, ICE_ESP_HW },
6564 { ICE_AH, ICE_AH_HW },
6565 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6566 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6567 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6571 * ice_find_recp - find a recipe
6572 * @hw: pointer to the hardware structure
6573 * @lkup_exts: extension sequence to match
6575 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6577 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6578 enum ice_sw_tunnel_type tun_type)
6580 bool refresh_required = true;
6581 struct ice_sw_recipe *recp;
6584 /* Walk through existing recipes to find a match */
6585 recp = hw->switch_info->recp_list;
6586 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6587 /* If recipe was not created for this ID, in SW bookkeeping,
6588 * check if FW has an entry for this recipe. If the FW has an
6589 * entry update it in our SW bookkeeping and continue with the
6592 if (!recp[i].recp_created)
6593 if (ice_get_recp_frm_fw(hw,
6594 hw->switch_info->recp_list, i,
6598 /* Skip inverse action recipes */
6599 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6600 ICE_AQ_RECIPE_ACT_INV_ACT)
6603 /* if number of words we are looking for match */
6604 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6605 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6606 struct ice_fv_word *be = lkup_exts->fv_words;
6607 u16 *cr = recp[i].lkup_exts.field_mask;
6608 u16 *de = lkup_exts->field_mask;
6612 /* ar, cr, and qr are related to the recipe words, while
6613 * be, de, and pe are related to the lookup words
6615 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6616 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6618 if (ar[qr].off == be[pe].off &&
6619 ar[qr].prot_id == be[pe].prot_id &&
6621 /* Found the "pe"th word in the
6626 /* After walking through all the words in the
6627 * "i"th recipe if "p"th word was not found then
6628 * this recipe is not what we are looking for.
6629 * So break out from this loop and try the next
6632 if (qr >= recp[i].lkup_exts.n_val_words) {
6637 /* If for "i"th recipe the found was never set to false
6638 * then it means we found our match
6640 if (tun_type == recp[i].tun_type && found)
6641 return i; /* Return the recipe ID */
6644 return ICE_MAX_NUM_RECIPES;
6648 * ice_prot_type_to_id - get protocol ID from protocol type
6649 * @type: protocol type
6650 * @id: pointer to variable that will receive the ID
6652 * Returns true if found, false otherwise
6654 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6658 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6659 if (ice_prot_id_tbl[i].type == type) {
6660 *id = ice_prot_id_tbl[i].protocol_id;
6667 * ice_find_valid_words - count valid words
6668 * @rule: advanced rule with lookup information
6669 * @lkup_exts: byte offset extractions of the words that are valid
6671 * calculate valid words in a lookup rule using mask value
6674 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6675 struct ice_prot_lkup_ext *lkup_exts)
6677 u8 j, word, prot_id, ret_val;
6679 if (!ice_prot_type_to_id(rule->type, &prot_id))
6682 word = lkup_exts->n_val_words;
6684 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6685 if (((u16 *)&rule->m_u)[j] &&
6686 rule->type < ARRAY_SIZE(ice_prot_ext)) {
6687 /* No more space to accommodate */
6688 if (word >= ICE_MAX_CHAIN_WORDS)
6690 lkup_exts->fv_words[word].off =
6691 ice_prot_ext[rule->type].offs[j];
6692 lkup_exts->fv_words[word].prot_id =
6693 ice_prot_id_tbl[rule->type].protocol_id;
6694 lkup_exts->field_mask[word] =
6695 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6699 ret_val = word - lkup_exts->n_val_words;
6700 lkup_exts->n_val_words = word;
6706 * ice_create_first_fit_recp_def - Create a recipe grouping
6707 * @hw: pointer to the hardware structure
6708 * @lkup_exts: an array of protocol header extractions
6709 * @rg_list: pointer to a list that stores new recipe groups
6710 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6712 * Using first fit algorithm, take all the words that are still not done
6713 * and start grouping them in 4-word groups. Each group makes up one
6716 static enum ice_status
6717 ice_create_first_fit_recp_def(struct ice_hw *hw,
6718 struct ice_prot_lkup_ext *lkup_exts,
6719 struct LIST_HEAD_TYPE *rg_list,
6722 struct ice_pref_recipe_group *grp = NULL;
6727 if (!lkup_exts->n_val_words) {
6728 struct ice_recp_grp_entry *entry;
6730 entry = (struct ice_recp_grp_entry *)
6731 ice_malloc(hw, sizeof(*entry));
6733 return ICE_ERR_NO_MEMORY;
6734 LIST_ADD(&entry->l_entry, rg_list);
6735 grp = &entry->r_group;
6737 grp->n_val_pairs = 0;
6740 /* Walk through every word in the rule to check if it is not done. If so
6741 * then this word needs to be part of a new recipe.
6743 for (j = 0; j < lkup_exts->n_val_words; j++)
6744 if (!ice_is_bit_set(lkup_exts->done, j)) {
6746 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6747 struct ice_recp_grp_entry *entry;
6749 entry = (struct ice_recp_grp_entry *)
6750 ice_malloc(hw, sizeof(*entry));
6752 return ICE_ERR_NO_MEMORY;
6753 LIST_ADD(&entry->l_entry, rg_list);
6754 grp = &entry->r_group;
6758 grp->pairs[grp->n_val_pairs].prot_id =
6759 lkup_exts->fv_words[j].prot_id;
6760 grp->pairs[grp->n_val_pairs].off =
6761 lkup_exts->fv_words[j].off;
6762 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6770 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6771 * @hw: pointer to the hardware structure
6772 * @fv_list: field vector with the extraction sequence information
6773 * @rg_list: recipe groupings with protocol-offset pairs
6775 * Helper function to fill in the field vector indices for protocol-offset
6776 * pairs. These indexes are then ultimately programmed into a recipe.
6778 static enum ice_status
6779 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6780 struct LIST_HEAD_TYPE *rg_list)
6782 struct ice_sw_fv_list_entry *fv;
6783 struct ice_recp_grp_entry *rg;
6784 struct ice_fv_word *fv_ext;
6786 if (LIST_EMPTY(fv_list))
6789 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6790 fv_ext = fv->fv_ptr->ew;
6792 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6795 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6796 struct ice_fv_word *pr;
6801 pr = &rg->r_group.pairs[i];
6802 mask = rg->r_group.mask[i];
6804 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6805 if (fv_ext[j].prot_id == pr->prot_id &&
6806 fv_ext[j].off == pr->off) {
6809 /* Store index of field vector */
6811 rg->fv_mask[i] = mask;
6815 /* Protocol/offset could not be found, caller gave an
6819 return ICE_ERR_PARAM;
6827 * ice_find_free_recp_res_idx - find free result indexes for recipe
6828 * @hw: pointer to hardware structure
6829 * @profiles: bitmap of profiles that will be associated with the new recipe
6830 * @free_idx: pointer to variable to receive the free index bitmap
6832 * The algorithm used here is:
6833 * 1. When creating a new recipe, create a set P which contains all
6834 * Profiles that will be associated with our new recipe
6836 * 2. For each Profile p in set P:
6837 * a. Add all recipes associated with Profile p into set R
6838 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6839 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6840 * i. Or just assume they all have the same possible indexes:
6842 * i.e., PossibleIndexes = 0x0000F00000000000
6844 * 3. For each Recipe r in set R:
6845 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6846 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6848 * FreeIndexes will contain the bits indicating the indexes free for use,
6849 * then the code needs to update the recipe[r].used_result_idx_bits to
6850 * indicate which indexes were selected for use by this recipe.
6853 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6854 ice_bitmap_t *free_idx)
6856 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6857 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6858 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6861 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6862 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6863 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6864 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6866 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6868 /* For each profile we are going to associate the recipe with, add the
6869 * recipes that are associated with that profile. This will give us
6870 * the set of recipes that our recipe may collide with. Also, determine
6871 * what possible result indexes are usable given this set of profiles.
6873 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6874 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6875 ICE_MAX_NUM_RECIPES);
6876 ice_and_bitmap(possible_idx, possible_idx,
6877 hw->switch_info->prof_res_bm[bit],
6881 /* For each recipe that our new recipe may collide with, determine
6882 * which indexes have been used.
6884 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6885 ice_or_bitmap(used_idx, used_idx,
6886 hw->switch_info->recp_list[bit].res_idxs,
6889 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6891 /* return number of free indexes */
6892 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6896 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6897 * @hw: pointer to hardware structure
6898 * @rm: recipe management list entry
6899 * @profiles: bitmap of profiles that will be associated.
6901 static enum ice_status
6902 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6903 ice_bitmap_t *profiles)
6905 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6906 struct ice_aqc_recipe_data_elem *tmp;
6907 struct ice_aqc_recipe_data_elem *buf;
6908 struct ice_recp_grp_entry *entry;
6909 enum ice_status status;
6915 /* When more than one recipe are required, another recipe is needed to
6916 * chain them together. Matching a tunnel metadata ID takes up one of
6917 * the match fields in the chaining recipe reducing the number of
6918 * chained recipes by one.
6920 /* check number of free result indices */
6921 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6922 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6924 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6925 free_res_idx, rm->n_grp_count);
6927 if (rm->n_grp_count > 1) {
6928 if (rm->n_grp_count > free_res_idx)
6929 return ICE_ERR_MAX_LIMIT;
6934 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6935 return ICE_ERR_MAX_LIMIT;
6937 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6938 ICE_MAX_NUM_RECIPES,
6941 return ICE_ERR_NO_MEMORY;
6943 buf = (struct ice_aqc_recipe_data_elem *)
6944 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6946 status = ICE_ERR_NO_MEMORY;
6950 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6951 recipe_count = ICE_MAX_NUM_RECIPES;
6952 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6954 if (status || recipe_count == 0)
6957 /* Allocate the recipe resources, and configure them according to the
6958 * match fields from protocol headers and extracted field vectors.
6960 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6961 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6964 status = ice_alloc_recipe(hw, &entry->rid);
6968 /* Clear the result index of the located recipe, as this will be
6969 * updated, if needed, later in the recipe creation process.
6971 tmp[0].content.result_indx = 0;
6973 buf[recps] = tmp[0];
6974 buf[recps].recipe_indx = (u8)entry->rid;
6975 /* if the recipe is a non-root recipe RID should be programmed
6976 * as 0 for the rules to be applied correctly.
6978 buf[recps].content.rid = 0;
6979 ice_memset(&buf[recps].content.lkup_indx, 0,
6980 sizeof(buf[recps].content.lkup_indx),
6983 /* All recipes use look-up index 0 to match switch ID. */
6984 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6985 buf[recps].content.mask[0] =
6986 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6987 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6990 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6991 buf[recps].content.lkup_indx[i] = 0x80;
6992 buf[recps].content.mask[i] = 0;
6995 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6996 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6997 buf[recps].content.mask[i + 1] =
6998 CPU_TO_LE16(entry->fv_mask[i]);
7001 if (rm->n_grp_count > 1) {
7002 /* Checks to see if there really is a valid result index
7005 if (chain_idx >= ICE_MAX_FV_WORDS) {
7006 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7007 status = ICE_ERR_MAX_LIMIT;
7011 entry->chain_idx = chain_idx;
7012 buf[recps].content.result_indx =
7013 ICE_AQ_RECIPE_RESULT_EN |
7014 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7015 ICE_AQ_RECIPE_RESULT_DATA_M);
7016 ice_clear_bit(chain_idx, result_idx_bm);
7017 chain_idx = ice_find_first_bit(result_idx_bm,
7021 /* fill recipe dependencies */
7022 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7023 ICE_MAX_NUM_RECIPES);
7024 ice_set_bit(buf[recps].recipe_indx,
7025 (ice_bitmap_t *)buf[recps].recipe_bitmap);
7026 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7030 if (rm->n_grp_count == 1) {
7031 rm->root_rid = buf[0].recipe_indx;
7032 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7033 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7034 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7035 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7036 sizeof(buf[0].recipe_bitmap),
7037 ICE_NONDMA_TO_NONDMA);
7039 status = ICE_ERR_BAD_PTR;
7042 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7043 * the recipe which is getting created if specified
7044 * by user. Usually any advanced switch filter, which results
7045 * into new extraction sequence, ended up creating a new recipe
7046 * of type ROOT and usually recipes are associated with profiles
7047 * Switch rule referreing newly created recipe, needs to have
7048 * either/or 'fwd' or 'join' priority, otherwise switch rule
7049 * evaluation will not happen correctly. In other words, if
7050 * switch rule to be evaluated on priority basis, then recipe
7051 * needs to have priority, otherwise it will be evaluated last.
7053 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7055 struct ice_recp_grp_entry *last_chain_entry;
7058 /* Allocate the last recipe that will chain the outcomes of the
7059 * other recipes together
7061 status = ice_alloc_recipe(hw, &rid);
7065 buf[recps].recipe_indx = (u8)rid;
7066 buf[recps].content.rid = (u8)rid;
7067 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7068 /* the new entry created should also be part of rg_list to
7069 * make sure we have complete recipe
7071 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7072 sizeof(*last_chain_entry));
7073 if (!last_chain_entry) {
7074 status = ICE_ERR_NO_MEMORY;
7077 last_chain_entry->rid = rid;
7078 ice_memset(&buf[recps].content.lkup_indx, 0,
7079 sizeof(buf[recps].content.lkup_indx),
7081 /* All recipes use look-up index 0 to match switch ID. */
7082 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7083 buf[recps].content.mask[0] =
7084 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7085 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7086 buf[recps].content.lkup_indx[i] =
7087 ICE_AQ_RECIPE_LKUP_IGNORE;
7088 buf[recps].content.mask[i] = 0;
7092 /* update r_bitmap with the recp that is used for chaining */
7093 ice_set_bit(rid, rm->r_bitmap);
7094 /* this is the recipe that chains all the other recipes so it
7095 * should not have a chaining ID to indicate the same
7097 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7098 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7100 last_chain_entry->fv_idx[i] = entry->chain_idx;
7101 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7102 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7103 ice_set_bit(entry->rid, rm->r_bitmap);
7105 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7106 if (sizeof(buf[recps].recipe_bitmap) >=
7107 sizeof(rm->r_bitmap)) {
7108 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7109 sizeof(buf[recps].recipe_bitmap),
7110 ICE_NONDMA_TO_NONDMA);
7112 status = ICE_ERR_BAD_PTR;
7115 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7118 rm->root_rid = (u8)rid;
7120 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7124 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7125 ice_release_change_lock(hw);
7129 /* Every recipe that just got created add it to the recipe
7132 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7133 struct ice_switch_info *sw = hw->switch_info;
7134 bool is_root, idx_found = false;
7135 struct ice_sw_recipe *recp;
7136 u16 idx, buf_idx = 0;
7138 /* find buffer index for copying some data */
7139 for (idx = 0; idx < rm->n_grp_count; idx++)
7140 if (buf[idx].recipe_indx == entry->rid) {
7146 status = ICE_ERR_OUT_OF_RANGE;
7150 recp = &sw->recp_list[entry->rid];
7151 is_root = (rm->root_rid == entry->rid);
7152 recp->is_root = is_root;
7154 recp->root_rid = entry->rid;
7155 recp->big_recp = (is_root && rm->n_grp_count > 1);
7157 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7158 entry->r_group.n_val_pairs *
7159 sizeof(struct ice_fv_word),
7160 ICE_NONDMA_TO_NONDMA);
7162 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7163 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7165 /* Copy non-result fv index values and masks to recipe. This
7166 * call will also update the result recipe bitmask.
7168 ice_collect_result_idx(&buf[buf_idx], recp);
7170 /* for non-root recipes, also copy to the root, this allows
7171 * easier matching of a complete chained recipe
7174 ice_collect_result_idx(&buf[buf_idx],
7175 &sw->recp_list[rm->root_rid]);
7177 recp->n_ext_words = entry->r_group.n_val_pairs;
7178 recp->chain_idx = entry->chain_idx;
7179 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7180 recp->n_grp_count = rm->n_grp_count;
7181 recp->tun_type = rm->tun_type;
7182 recp->recp_created = true;
7196 * ice_create_recipe_group - creates recipe group
7197 * @hw: pointer to hardware structure
7198 * @rm: recipe management list entry
7199 * @lkup_exts: lookup elements
7201 static enum ice_status
7202 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7203 struct ice_prot_lkup_ext *lkup_exts)
7205 enum ice_status status;
7208 rm->n_grp_count = 0;
7210 /* Create recipes for words that are marked not done by packing them
7213 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7214 &rm->rg_list, &recp_count);
7216 rm->n_grp_count += recp_count;
7217 rm->n_ext_words = lkup_exts->n_val_words;
7218 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7219 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7220 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7221 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7228 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7229 * @hw: pointer to hardware structure
7230 * @lkups: lookup elements or match criteria for the advanced recipe, one
7231 * structure per protocol header
7232 * @lkups_cnt: number of protocols
7233 * @bm: bitmap of field vectors to consider
7234 * @fv_list: pointer to a list that holds the returned field vectors
7236 static enum ice_status
7237 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7238 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7240 enum ice_status status;
7247 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7249 return ICE_ERR_NO_MEMORY;
7251 for (i = 0; i < lkups_cnt; i++)
7252 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7253 status = ICE_ERR_CFG;
7257 /* Find field vectors that include all specified protocol types */
7258 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7261 ice_free(hw, prot_ids);
7266 * ice_tun_type_match_mask - determine if tun type needs a match mask
7267 * @tun_type: tunnel type
7268 * @mask: mask to be used for the tunnel
7270 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7273 case ICE_SW_TUN_VXLAN_GPE:
7274 case ICE_SW_TUN_GENEVE:
7275 case ICE_SW_TUN_VXLAN:
7276 case ICE_SW_TUN_NVGRE:
7277 case ICE_SW_TUN_UDP:
7278 case ICE_ALL_TUNNELS:
7279 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7280 case ICE_NON_TUN_QINQ:
7281 case ICE_SW_TUN_PPPOE_QINQ:
7282 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7283 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7284 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7285 *mask = ICE_TUN_FLAG_MASK;
7288 case ICE_SW_TUN_GENEVE_VLAN:
7289 case ICE_SW_TUN_VXLAN_VLAN:
7290 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7300 * ice_add_special_words - Add words that are not protocols, such as metadata
7301 * @rinfo: other information regarding the rule e.g. priority and action info
7302 * @lkup_exts: lookup word structure
7304 static enum ice_status
7305 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7306 struct ice_prot_lkup_ext *lkup_exts)
7310 /* If this is a tunneled packet, then add recipe index to match the
7311 * tunnel bit in the packet metadata flags.
7313 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7314 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7315 u8 word = lkup_exts->n_val_words++;
7317 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7318 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7319 lkup_exts->field_mask[word] = mask;
7321 return ICE_ERR_MAX_LIMIT;
7328 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7329 * @hw: pointer to hardware structure
7330 * @rinfo: other information regarding the rule e.g. priority and action info
7331 * @bm: pointer to memory for returning the bitmap of field vectors
7334 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7337 enum ice_prof_type prof_type;
7339 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7341 switch (rinfo->tun_type) {
7343 case ICE_NON_TUN_QINQ:
7344 prof_type = ICE_PROF_NON_TUN;
7346 case ICE_ALL_TUNNELS:
7347 prof_type = ICE_PROF_TUN_ALL;
7349 case ICE_SW_TUN_VXLAN_GPE:
7350 case ICE_SW_TUN_GENEVE:
7351 case ICE_SW_TUN_GENEVE_VLAN:
7352 case ICE_SW_TUN_VXLAN:
7353 case ICE_SW_TUN_VXLAN_VLAN:
7354 case ICE_SW_TUN_UDP:
7355 case ICE_SW_TUN_GTP:
7356 prof_type = ICE_PROF_TUN_UDP;
7358 case ICE_SW_TUN_NVGRE:
7359 prof_type = ICE_PROF_TUN_GRE;
7361 case ICE_SW_TUN_PPPOE:
7362 case ICE_SW_TUN_PPPOE_QINQ:
7363 prof_type = ICE_PROF_TUN_PPPOE;
7365 case ICE_SW_TUN_PPPOE_PAY:
7366 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7367 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7369 case ICE_SW_TUN_PPPOE_IPV4:
7370 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7371 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7372 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7373 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7375 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7376 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7378 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7379 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7381 case ICE_SW_TUN_PPPOE_IPV6:
7382 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7383 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7384 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7385 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7387 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7388 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7390 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7391 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7393 case ICE_SW_TUN_PROFID_IPV6_ESP:
7394 case ICE_SW_TUN_IPV6_ESP:
7395 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7397 case ICE_SW_TUN_PROFID_IPV6_AH:
7398 case ICE_SW_TUN_IPV6_AH:
7399 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7401 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7402 case ICE_SW_TUN_IPV6_L2TPV3:
7403 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7405 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7406 case ICE_SW_TUN_IPV6_NAT_T:
7407 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7409 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7410 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7412 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7413 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7415 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7416 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7418 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7419 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7421 case ICE_SW_TUN_IPV4_NAT_T:
7422 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7424 case ICE_SW_TUN_IPV4_L2TPV3:
7425 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7427 case ICE_SW_TUN_IPV4_ESP:
7428 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7430 case ICE_SW_TUN_IPV4_AH:
7431 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7433 case ICE_SW_IPV4_TCP:
7434 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7436 case ICE_SW_IPV4_UDP:
7437 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7439 case ICE_SW_IPV6_TCP:
7440 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7442 case ICE_SW_IPV6_UDP:
7443 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7445 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7446 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7447 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7448 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7449 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7450 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7451 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7453 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7454 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7455 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7456 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7457 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7458 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7459 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7461 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7462 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7463 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7464 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7465 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7466 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7467 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7469 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7470 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7471 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7472 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7473 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7474 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7475 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7477 case ICE_SW_TUN_AND_NON_TUN:
7478 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7480 prof_type = ICE_PROF_ALL;
7484 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7488 * ice_is_prof_rule - determine if rule type is a profile rule
7489 * @type: the rule type
7491 * if the rule type is a profile rule, that means that there no field value
7492 * match required, in this case just a profile hit is required.
7494 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7497 case ICE_SW_TUN_PROFID_IPV6_ESP:
7498 case ICE_SW_TUN_PROFID_IPV6_AH:
7499 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7500 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7501 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7502 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7503 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7504 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7514 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7515 * @hw: pointer to hardware structure
7516 * @lkups: lookup elements or match criteria for the advanced recipe, one
7517 * structure per protocol header
7518 * @lkups_cnt: number of protocols
7519 * @rinfo: other information regarding the rule e.g. priority and action info
7520 * @rid: return the recipe ID of the recipe created
7522 static enum ice_status
7523 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7524 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7526 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7527 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7528 struct ice_prot_lkup_ext *lkup_exts;
7529 struct ice_recp_grp_entry *r_entry;
7530 struct ice_sw_fv_list_entry *fvit;
7531 struct ice_recp_grp_entry *r_tmp;
7532 struct ice_sw_fv_list_entry *tmp;
7533 enum ice_status status = ICE_SUCCESS;
7534 struct ice_sw_recipe *rm;
7537 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7538 return ICE_ERR_PARAM;
7540 lkup_exts = (struct ice_prot_lkup_ext *)
7541 ice_malloc(hw, sizeof(*lkup_exts));
7543 return ICE_ERR_NO_MEMORY;
7545 /* Determine the number of words to be matched and if it exceeds a
7546 * recipe's restrictions
7548 for (i = 0; i < lkups_cnt; i++) {
7551 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7552 status = ICE_ERR_CFG;
7553 goto err_free_lkup_exts;
7556 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7558 status = ICE_ERR_CFG;
7559 goto err_free_lkup_exts;
7563 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7565 status = ICE_ERR_NO_MEMORY;
7566 goto err_free_lkup_exts;
7569 /* Get field vectors that contain fields extracted from all the protocol
7570 * headers being programmed.
7572 INIT_LIST_HEAD(&rm->fv_list);
7573 INIT_LIST_HEAD(&rm->rg_list);
7575 /* Get bitmap of field vectors (profiles) that are compatible with the
7576 * rule request; only these will be searched in the subsequent call to
7579 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7581 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7585 /* Create any special protocol/offset pairs, such as looking at tunnel
7586 * bits by extracting metadata
7588 status = ice_add_special_words(rinfo, lkup_exts);
7590 goto err_free_lkup_exts;
7592 /* Group match words into recipes using preferred recipe grouping
7595 status = ice_create_recipe_group(hw, rm, lkup_exts);
7599 /* set the recipe priority if specified */
7600 rm->priority = (u8)rinfo->priority;
7602 /* Find offsets from the field vector. Pick the first one for all the
7605 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7609 /* An empty FV list means to use all the profiles returned in the
7612 if (LIST_EMPTY(&rm->fv_list)) {
7615 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7616 struct ice_sw_fv_list_entry *fvl;
7618 fvl = (struct ice_sw_fv_list_entry *)
7619 ice_malloc(hw, sizeof(*fvl));
7623 fvl->profile_id = j;
7624 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7628 /* get bitmap of all profiles the recipe will be associated with */
7629 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7630 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7632 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7633 ice_set_bit((u16)fvit->profile_id, profiles);
7636 /* Look for a recipe which matches our requested fv / mask list */
7637 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7638 if (*rid < ICE_MAX_NUM_RECIPES)
7639 /* Success if found a recipe that match the existing criteria */
7642 rm->tun_type = rinfo->tun_type;
7643 /* Recipe we need does not exist, add a recipe */
7644 status = ice_add_sw_recipe(hw, rm, profiles);
7648 /* Associate all the recipes created with all the profiles in the
7649 * common field vector.
7651 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7653 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7656 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7657 (u8 *)r_bitmap, NULL);
7661 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7662 ICE_MAX_NUM_RECIPES);
7663 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7667 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7670 ice_release_change_lock(hw);
7675 /* Update profile to recipe bitmap array */
7676 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7677 ICE_MAX_NUM_RECIPES);
7679 /* Update recipe to profile bitmap array */
7680 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7681 ice_set_bit((u16)fvit->profile_id,
7682 recipe_to_profile[j]);
7685 *rid = rm->root_rid;
7686 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7687 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7689 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7690 ice_recp_grp_entry, l_entry) {
7691 LIST_DEL(&r_entry->l_entry);
7692 ice_free(hw, r_entry);
7695 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7697 LIST_DEL(&fvit->list_entry);
7702 ice_free(hw, rm->root_buf);
7707 ice_free(hw, lkup_exts);
7713 * ice_find_dummy_packet - find dummy packet by tunnel type
7715 * @lkups: lookup elements or match criteria for the advanced recipe, one
7716 * structure per protocol header
7717 * @lkups_cnt: number of protocols
7718 * @tun_type: tunnel type from the match criteria
7719 * @pkt: dummy packet to fill according to filter match criteria
7720 * @pkt_len: packet length of dummy packet
7721 * @offsets: pointer to receive the pointer to the offsets for the packet
7724 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7725 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7727 const struct ice_dummy_pkt_offsets **offsets)
7729 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7733 for (i = 0; i < lkups_cnt; i++) {
7734 if (lkups[i].type == ICE_UDP_ILOS)
7736 else if (lkups[i].type == ICE_TCP_IL)
7738 else if (lkups[i].type == ICE_IPV6_OFOS)
7740 else if (lkups[i].type == ICE_VLAN_OFOS)
7742 else if (lkups[i].type == ICE_IPV4_OFOS &&
7743 lkups[i].h_u.ipv4_hdr.protocol ==
7744 ICE_IPV4_NVGRE_PROTO_ID &&
7745 lkups[i].m_u.ipv4_hdr.protocol ==
7748 else if (lkups[i].type == ICE_PPPOE &&
7749 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7750 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7751 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7754 else if (lkups[i].type == ICE_ETYPE_OL &&
7755 lkups[i].h_u.ethertype.ethtype_id ==
7756 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7757 lkups[i].m_u.ethertype.ethtype_id ==
7760 else if (lkups[i].type == ICE_IPV4_IL &&
7761 lkups[i].h_u.ipv4_hdr.protocol ==
7763 lkups[i].m_u.ipv4_hdr.protocol ==
7768 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7769 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7770 *pkt = dummy_qinq_ipv6_pkt;
7771 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7772 *offsets = dummy_qinq_ipv6_packet_offsets;
7774 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7775 tun_type == ICE_NON_TUN_QINQ) {
7776 *pkt = dummy_qinq_ipv4_pkt;
7777 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7778 *offsets = dummy_qinq_ipv4_packet_offsets;
7782 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7783 *pkt = dummy_qinq_pppoe_ipv6_packet;
7784 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7785 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7787 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7788 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7789 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7790 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7792 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7793 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7794 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7795 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7796 *offsets = dummy_qinq_pppoe_packet_offsets;
7800 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7801 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7802 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7803 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7805 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7806 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7807 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7808 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7812 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7813 *pkt = dummy_ipv4_esp_pkt;
7814 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7815 *offsets = dummy_ipv4_esp_packet_offsets;
7819 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7820 *pkt = dummy_ipv6_esp_pkt;
7821 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7822 *offsets = dummy_ipv6_esp_packet_offsets;
7826 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7827 *pkt = dummy_ipv4_ah_pkt;
7828 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7829 *offsets = dummy_ipv4_ah_packet_offsets;
7833 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7834 *pkt = dummy_ipv6_ah_pkt;
7835 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7836 *offsets = dummy_ipv6_ah_packet_offsets;
7840 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7841 *pkt = dummy_ipv4_nat_pkt;
7842 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7843 *offsets = dummy_ipv4_nat_packet_offsets;
7847 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7848 *pkt = dummy_ipv6_nat_pkt;
7849 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7850 *offsets = dummy_ipv6_nat_packet_offsets;
7854 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7855 *pkt = dummy_ipv4_l2tpv3_pkt;
7856 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7857 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7861 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7862 *pkt = dummy_ipv6_l2tpv3_pkt;
7863 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7864 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7868 if (tun_type == ICE_SW_TUN_GTP) {
7869 *pkt = dummy_udp_gtp_packet;
7870 *pkt_len = sizeof(dummy_udp_gtp_packet);
7871 *offsets = dummy_udp_gtp_packet_offsets;
7875 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7876 *pkt = dummy_pppoe_ipv6_packet;
7877 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7878 *offsets = dummy_pppoe_packet_offsets;
7880 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7881 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7882 *pkt = dummy_pppoe_ipv4_packet;
7883 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7884 *offsets = dummy_pppoe_packet_offsets;
7888 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7889 *pkt = dummy_pppoe_ipv4_packet;
7890 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7891 *offsets = dummy_pppoe_packet_ipv4_offsets;
7895 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7896 *pkt = dummy_pppoe_ipv4_tcp_packet;
7897 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7898 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7902 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7903 *pkt = dummy_pppoe_ipv4_udp_packet;
7904 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7905 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7909 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7910 *pkt = dummy_pppoe_ipv6_packet;
7911 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7912 *offsets = dummy_pppoe_packet_ipv6_offsets;
7916 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7917 *pkt = dummy_pppoe_ipv6_tcp_packet;
7918 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7919 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7923 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7924 *pkt = dummy_pppoe_ipv6_udp_packet;
7925 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7926 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7930 if (tun_type == ICE_SW_IPV4_TCP) {
7931 *pkt = dummy_tcp_packet;
7932 *pkt_len = sizeof(dummy_tcp_packet);
7933 *offsets = dummy_tcp_packet_offsets;
7937 if (tun_type == ICE_SW_IPV4_UDP) {
7938 *pkt = dummy_udp_packet;
7939 *pkt_len = sizeof(dummy_udp_packet);
7940 *offsets = dummy_udp_packet_offsets;
7944 if (tun_type == ICE_SW_IPV6_TCP) {
7945 *pkt = dummy_tcp_ipv6_packet;
7946 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7947 *offsets = dummy_tcp_ipv6_packet_offsets;
7951 if (tun_type == ICE_SW_IPV6_UDP) {
7952 *pkt = dummy_udp_ipv6_packet;
7953 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7954 *offsets = dummy_udp_ipv6_packet_offsets;
7958 /* Support GTP tunnel + L3 */
7959 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7960 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7961 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7962 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7965 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7966 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7967 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7968 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7971 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7972 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7973 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7974 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7977 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7978 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7979 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7980 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7984 if (tun_type == ICE_ALL_TUNNELS) {
7985 *pkt = dummy_gre_udp_packet;
7986 *pkt_len = sizeof(dummy_gre_udp_packet);
7987 *offsets = dummy_gre_udp_packet_offsets;
7991 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7993 *pkt = dummy_gre_tcp_packet;
7994 *pkt_len = sizeof(dummy_gre_tcp_packet);
7995 *offsets = dummy_gre_tcp_packet_offsets;
7999 *pkt = dummy_gre_udp_packet;
8000 *pkt_len = sizeof(dummy_gre_udp_packet);
8001 *offsets = dummy_gre_udp_packet_offsets;
8005 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8006 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8007 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8008 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8010 *pkt = dummy_udp_tun_tcp_packet;
8011 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8012 *offsets = dummy_udp_tun_tcp_packet_offsets;
8016 *pkt = dummy_udp_tun_udp_packet;
8017 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8018 *offsets = dummy_udp_tun_udp_packet_offsets;
8024 *pkt = dummy_vlan_udp_packet;
8025 *pkt_len = sizeof(dummy_vlan_udp_packet);
8026 *offsets = dummy_vlan_udp_packet_offsets;
8029 *pkt = dummy_udp_packet;
8030 *pkt_len = sizeof(dummy_udp_packet);
8031 *offsets = dummy_udp_packet_offsets;
8033 } else if (udp && ipv6) {
8035 *pkt = dummy_vlan_udp_ipv6_packet;
8036 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8037 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8040 *pkt = dummy_udp_ipv6_packet;
8041 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8042 *offsets = dummy_udp_ipv6_packet_offsets;
8044 } else if ((tcp && ipv6) || ipv6) {
8046 *pkt = dummy_vlan_tcp_ipv6_packet;
8047 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8048 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8051 *pkt = dummy_tcp_ipv6_packet;
8052 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8053 *offsets = dummy_tcp_ipv6_packet_offsets;
8058 *pkt = dummy_vlan_tcp_packet;
8059 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8060 *offsets = dummy_vlan_tcp_packet_offsets;
8062 *pkt = dummy_tcp_packet;
8063 *pkt_len = sizeof(dummy_tcp_packet);
8064 *offsets = dummy_tcp_packet_offsets;
8069 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8071 * @lkups: lookup elements or match criteria for the advanced recipe, one
8072 * structure per protocol header
8073 * @lkups_cnt: number of protocols
8074 * @s_rule: stores rule information from the match criteria
8075 * @dummy_pkt: dummy packet to fill according to filter match criteria
8076 * @pkt_len: packet length of dummy packet
8077 * @offsets: offset info for the dummy packet
8079 static enum ice_status
8080 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8081 struct ice_aqc_sw_rules_elem *s_rule,
8082 const u8 *dummy_pkt, u16 pkt_len,
8083 const struct ice_dummy_pkt_offsets *offsets)
8088 /* Start with a packet with a pre-defined/dummy content. Then, fill
8089 * in the header values to be looked up or matched.
8091 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8093 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8095 for (i = 0; i < lkups_cnt; i++) {
8096 enum ice_protocol_type type;
8097 u16 offset = 0, len = 0, j;
8100 /* find the start of this layer; it should be found since this
8101 * was already checked when search for the dummy packet
8103 type = lkups[i].type;
8104 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8105 if (type == offsets[j].type) {
8106 offset = offsets[j].offset;
8111 /* this should never happen in a correct calling sequence */
8113 return ICE_ERR_PARAM;
8115 switch (lkups[i].type) {
8118 len = sizeof(struct ice_ether_hdr);
8121 len = sizeof(struct ice_ethtype_hdr);
8125 len = sizeof(struct ice_vlan_hdr);
8129 len = sizeof(struct ice_ipv4_hdr);
8133 len = sizeof(struct ice_ipv6_hdr);
8138 len = sizeof(struct ice_l4_hdr);
8141 len = sizeof(struct ice_sctp_hdr);
8144 len = sizeof(struct ice_nvgre);
8149 len = sizeof(struct ice_udp_tnl_hdr);
8153 case ICE_GTP_NO_PAY:
8154 len = sizeof(struct ice_udp_gtp_hdr);
8157 len = sizeof(struct ice_pppoe_hdr);
8160 len = sizeof(struct ice_esp_hdr);
8163 len = sizeof(struct ice_nat_t_hdr);
8166 len = sizeof(struct ice_ah_hdr);
8169 len = sizeof(struct ice_l2tpv3_sess_hdr);
8172 return ICE_ERR_PARAM;
8175 /* the length should be a word multiple */
8176 if (len % ICE_BYTES_PER_WORD)
8179 /* We have the offset to the header start, the length, the
8180 * caller's header values and mask. Use this information to
8181 * copy the data into the dummy packet appropriately based on
8182 * the mask. Note that we need to only write the bits as
8183 * indicated by the mask to make sure we don't improperly write
8184 * over any significant packet data.
8186 for (j = 0; j < len / sizeof(u16); j++)
8187 if (((u16 *)&lkups[i].m_u)[j])
8188 ((u16 *)(pkt + offset))[j] =
8189 (((u16 *)(pkt + offset))[j] &
8190 ~((u16 *)&lkups[i].m_u)[j]) |
8191 (((u16 *)&lkups[i].h_u)[j] &
8192 ((u16 *)&lkups[i].m_u)[j]);
8195 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8201 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8202 * @hw: pointer to the hardware structure
8203 * @tun_type: tunnel type
8204 * @pkt: dummy packet to fill in
8205 * @offsets: offset info for the dummy packet
8207 static enum ice_status
8208 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8209 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8214 case ICE_SW_TUN_AND_NON_TUN:
8215 case ICE_SW_TUN_VXLAN_GPE:
8216 case ICE_SW_TUN_VXLAN:
8217 case ICE_SW_TUN_VXLAN_VLAN:
8218 case ICE_SW_TUN_UDP:
8219 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8223 case ICE_SW_TUN_GENEVE:
8224 case ICE_SW_TUN_GENEVE_VLAN:
8225 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8230 /* Nothing needs to be done for this tunnel type */
8234 /* Find the outer UDP protocol header and insert the port number */
8235 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8236 if (offsets[i].type == ICE_UDP_OF) {
8237 struct ice_l4_hdr *hdr;
8240 offset = offsets[i].offset;
8241 hdr = (struct ice_l4_hdr *)&pkt[offset];
8242 hdr->dst_port = CPU_TO_BE16(open_port);
8252 * ice_find_adv_rule_entry - Search a rule entry
8253 * @hw: pointer to the hardware structure
8254 * @lkups: lookup elements or match criteria for the advanced recipe, one
8255 * structure per protocol header
8256 * @lkups_cnt: number of protocols
8257 * @recp_id: recipe ID for which we are finding the rule
8258 * @rinfo: other information regarding the rule e.g. priority and action info
8260 * Helper function to search for a given advance rule entry
8261 * Returns pointer to entry storing the rule if found
8263 static struct ice_adv_fltr_mgmt_list_entry *
8264 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8265 u16 lkups_cnt, u16 recp_id,
8266 struct ice_adv_rule_info *rinfo)
8268 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8269 struct ice_switch_info *sw = hw->switch_info;
8272 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8273 ice_adv_fltr_mgmt_list_entry, list_entry) {
8274 bool lkups_matched = true;
8276 if (lkups_cnt != list_itr->lkups_cnt)
8278 for (i = 0; i < list_itr->lkups_cnt; i++)
8279 if (memcmp(&list_itr->lkups[i], &lkups[i],
8281 lkups_matched = false;
8284 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8285 rinfo->tun_type == list_itr->rule_info.tun_type &&
8293 * ice_adv_add_update_vsi_list
8294 * @hw: pointer to the hardware structure
8295 * @m_entry: pointer to current adv filter management list entry
8296 * @cur_fltr: filter information from the book keeping entry
8297 * @new_fltr: filter information with the new VSI to be added
8299 * Call AQ command to add or update previously created VSI list with new VSI.
8301 * Helper function to do book keeping associated with adding filter information
8302 * The algorithm to do the booking keeping is described below :
8303 * When a VSI needs to subscribe to a given advanced filter
8304 * if only one VSI has been added till now
8305 * Allocate a new VSI list and add two VSIs
8306 * to this list using switch rule command
8307 * Update the previously created switch rule with the
8308 * newly created VSI list ID
8309 * if a VSI list was previously created
8310 * Add the new VSI to the previously created VSI list set
8311 * using the update switch rule command
8313 static enum ice_status
8314 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8315 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8316 struct ice_adv_rule_info *cur_fltr,
8317 struct ice_adv_rule_info *new_fltr)
8319 enum ice_status status;
8320 u16 vsi_list_id = 0;
8322 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8323 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8324 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8325 return ICE_ERR_NOT_IMPL;
8327 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8328 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8329 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8330 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8331 return ICE_ERR_NOT_IMPL;
8333 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8334 /* Only one entry existed in the mapping and it was not already
8335 * a part of a VSI list. So, create a VSI list with the old and
8338 struct ice_fltr_info tmp_fltr;
8339 u16 vsi_handle_arr[2];
8341 /* A rule already exists with the new VSI being added */
8342 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8343 new_fltr->sw_act.fwd_id.hw_vsi_id)
8344 return ICE_ERR_ALREADY_EXISTS;
8346 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8347 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8348 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8354 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8355 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8356 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8357 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8358 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8359 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8361 /* Update the previous switch rule of "forward to VSI" to
8364 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8368 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8369 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8370 m_entry->vsi_list_info =
8371 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8374 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8376 if (!m_entry->vsi_list_info)
8379 /* A rule already exists with the new VSI being added */
8380 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8383 /* Update the previously created VSI list set with
8384 * the new VSI ID passed in
8386 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8388 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8390 ice_aqc_opc_update_sw_rules,
8392 /* update VSI list mapping info with new VSI ID */
8394 ice_set_bit(vsi_handle,
8395 m_entry->vsi_list_info->vsi_map);
8398 m_entry->vsi_count++;
8403 * ice_add_adv_rule - helper function to create an advanced switch rule
8404 * @hw: pointer to the hardware structure
8405 * @lkups: information on the words that needs to be looked up. All words
8406 * together makes one recipe
8407 * @lkups_cnt: num of entries in the lkups array
8408 * @rinfo: other information related to the rule that needs to be programmed
8409 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8410 * ignored is case of error.
8412 * This function can program only 1 rule at a time. The lkups is used to
8413 * describe the all the words that forms the "lookup" portion of the recipe.
8414 * These words can span multiple protocols. Callers to this function need to
8415 * pass in a list of protocol headers with lookup information along and mask
8416 * that determines which words are valid from the given protocol header.
8417 * rinfo describes other information related to this rule such as forwarding
8418 * IDs, priority of this rule, etc.
8421 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8422 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8423 struct ice_rule_query_data *added_entry)
8425 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8426 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8427 const struct ice_dummy_pkt_offsets *pkt_offsets;
8428 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8429 struct LIST_HEAD_TYPE *rule_head;
8430 struct ice_switch_info *sw;
8431 enum ice_status status;
8432 const u8 *pkt = NULL;
8438 /* Initialize profile to result index bitmap */
8439 if (!hw->switch_info->prof_res_bm_init) {
8440 hw->switch_info->prof_res_bm_init = 1;
8441 ice_init_prof_result_bm(hw);
8444 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8445 if (!prof_rule && !lkups_cnt)
8446 return ICE_ERR_PARAM;
8448 /* get # of words we need to match */
8450 for (i = 0; i < lkups_cnt; i++) {
8453 ptr = (u16 *)&lkups[i].m_u;
8454 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8460 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8461 return ICE_ERR_PARAM;
8463 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8464 return ICE_ERR_PARAM;
8467 /* make sure that we can locate a dummy packet */
8468 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8471 status = ICE_ERR_PARAM;
8472 goto err_ice_add_adv_rule;
8475 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8476 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8477 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8478 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8481 vsi_handle = rinfo->sw_act.vsi_handle;
8482 if (!ice_is_vsi_valid(hw, vsi_handle))
8483 return ICE_ERR_PARAM;
8485 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8486 rinfo->sw_act.fwd_id.hw_vsi_id =
8487 ice_get_hw_vsi_num(hw, vsi_handle);
8488 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8489 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8491 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8494 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8496 /* we have to add VSI to VSI_LIST and increment vsi_count.
8497 * Also Update VSI list so that we can change forwarding rule
8498 * if the rule already exists, we will check if it exists with
8499 * same vsi_id, if not then add it to the VSI list if it already
8500 * exists if not then create a VSI list and add the existing VSI
8501 * ID and the new VSI ID to the list
8502 * We will add that VSI to the list
8504 status = ice_adv_add_update_vsi_list(hw, m_entry,
8505 &m_entry->rule_info,
8508 added_entry->rid = rid;
8509 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8510 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8514 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8515 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8517 return ICE_ERR_NO_MEMORY;
8518 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8519 switch (rinfo->sw_act.fltr_act) {
8520 case ICE_FWD_TO_VSI:
8521 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8522 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8523 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8526 act |= ICE_SINGLE_ACT_TO_Q;
8527 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8528 ICE_SINGLE_ACT_Q_INDEX_M;
8530 case ICE_FWD_TO_QGRP:
8531 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8532 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8533 act |= ICE_SINGLE_ACT_TO_Q;
8534 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8535 ICE_SINGLE_ACT_Q_INDEX_M;
8536 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8537 ICE_SINGLE_ACT_Q_REGION_M;
8539 case ICE_DROP_PACKET:
8540 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8541 ICE_SINGLE_ACT_VALID_BIT;
8544 status = ICE_ERR_CFG;
8545 goto err_ice_add_adv_rule;
8548 /* set the rule LOOKUP type based on caller specified 'RX'
8549 * instead of hardcoding it to be either LOOKUP_TX/RX
8551 * for 'RX' set the source to be the port number
8552 * for 'TX' set the source to be the source HW VSI number (determined
8556 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8557 s_rule->pdata.lkup_tx_rx.src =
8558 CPU_TO_LE16(hw->port_info->lport);
8560 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8561 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8564 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8565 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8567 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8568 pkt_len, pkt_offsets);
8570 goto err_ice_add_adv_rule;
8572 if (rinfo->tun_type != ICE_NON_TUN &&
8573 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8574 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8575 s_rule->pdata.lkup_tx_rx.hdr,
8578 goto err_ice_add_adv_rule;
8581 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8582 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8585 goto err_ice_add_adv_rule;
8586 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8587 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8589 status = ICE_ERR_NO_MEMORY;
8590 goto err_ice_add_adv_rule;
8593 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8594 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8595 ICE_NONDMA_TO_NONDMA);
8596 if (!adv_fltr->lkups && !prof_rule) {
8597 status = ICE_ERR_NO_MEMORY;
8598 goto err_ice_add_adv_rule;
8601 adv_fltr->lkups_cnt = lkups_cnt;
8602 adv_fltr->rule_info = *rinfo;
8603 adv_fltr->rule_info.fltr_rule_id =
8604 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8605 sw = hw->switch_info;
8606 sw->recp_list[rid].adv_rule = true;
8607 rule_head = &sw->recp_list[rid].filt_rules;
8609 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8610 adv_fltr->vsi_count = 1;
8612 /* Add rule entry to book keeping list */
8613 LIST_ADD(&adv_fltr->list_entry, rule_head);
8615 added_entry->rid = rid;
8616 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8617 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8619 err_ice_add_adv_rule:
8620 if (status && adv_fltr) {
8621 ice_free(hw, adv_fltr->lkups);
8622 ice_free(hw, adv_fltr);
8625 ice_free(hw, s_rule);
8631 * ice_adv_rem_update_vsi_list
8632 * @hw: pointer to the hardware structure
8633 * @vsi_handle: VSI handle of the VSI to remove
8634 * @fm_list: filter management entry for which the VSI list management needs to
8637 static enum ice_status
8638 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8639 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8641 struct ice_vsi_list_map_info *vsi_list_info;
8642 enum ice_sw_lkup_type lkup_type;
8643 enum ice_status status;
8646 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8647 fm_list->vsi_count == 0)
8648 return ICE_ERR_PARAM;
8650 /* A rule with the VSI being removed does not exist */
8651 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8652 return ICE_ERR_DOES_NOT_EXIST;
8654 lkup_type = ICE_SW_LKUP_LAST;
8655 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8656 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8657 ice_aqc_opc_update_sw_rules,
8662 fm_list->vsi_count--;
8663 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8664 vsi_list_info = fm_list->vsi_list_info;
8665 if (fm_list->vsi_count == 1) {
8666 struct ice_fltr_info tmp_fltr;
8669 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8671 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8672 return ICE_ERR_OUT_OF_RANGE;
8674 /* Make sure VSI list is empty before removing it below */
8675 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8677 ice_aqc_opc_update_sw_rules,
8682 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8683 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8684 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8685 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8686 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8687 tmp_fltr.fwd_id.hw_vsi_id =
8688 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8689 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8690 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8691 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8693 /* Update the previous switch rule of "MAC forward to VSI" to
8694 * "MAC fwd to VSI list"
8696 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8698 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8699 tmp_fltr.fwd_id.hw_vsi_id, status);
8702 fm_list->vsi_list_info->ref_cnt--;
8704 /* Remove the VSI list since it is no longer used */
8705 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8707 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8708 vsi_list_id, status);
8712 LIST_DEL(&vsi_list_info->list_entry);
8713 ice_free(hw, vsi_list_info);
8714 fm_list->vsi_list_info = NULL;
8721 * ice_rem_adv_rule - removes existing advanced switch rule
8722 * @hw: pointer to the hardware structure
8723 * @lkups: information on the words that needs to be looked up. All words
8724 * together makes one recipe
8725 * @lkups_cnt: num of entries in the lkups array
8726 * @rinfo: Its the pointer to the rule information for the rule
8728 * This function can be used to remove 1 rule at a time. The lkups is
8729 * used to describe all the words that forms the "lookup" portion of the
8730 * rule. These words can span multiple protocols. Callers to this function
8731 * need to pass in a list of protocol headers with lookup information along
8732 * and mask that determines which words are valid from the given protocol
8733 * header. rinfo describes other information related to this rule such as
8734 * forwarding IDs, priority of this rule, etc.
8737 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8738 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8740 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8741 struct ice_prot_lkup_ext lkup_exts;
8742 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8743 enum ice_status status = ICE_SUCCESS;
8744 bool remove_rule = false;
8745 u16 i, rid, vsi_handle;
8747 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8748 for (i = 0; i < lkups_cnt; i++) {
8751 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8754 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8759 /* Create any special protocol/offset pairs, such as looking at tunnel
8760 * bits by extracting metadata
8762 status = ice_add_special_words(rinfo, &lkup_exts);
8766 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8767 /* If did not find a recipe that match the existing criteria */
8768 if (rid == ICE_MAX_NUM_RECIPES)
8769 return ICE_ERR_PARAM;
8771 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8772 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8773 /* the rule is already removed */
8776 ice_acquire_lock(rule_lock);
8777 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8779 } else if (list_elem->vsi_count > 1) {
8780 remove_rule = false;
8781 vsi_handle = rinfo->sw_act.vsi_handle;
8782 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8784 vsi_handle = rinfo->sw_act.vsi_handle;
8785 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8787 ice_release_lock(rule_lock);
8790 if (list_elem->vsi_count == 0)
8793 ice_release_lock(rule_lock);
8795 struct ice_aqc_sw_rules_elem *s_rule;
8798 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8799 s_rule = (struct ice_aqc_sw_rules_elem *)
8800 ice_malloc(hw, rule_buf_sz);
8802 return ICE_ERR_NO_MEMORY;
8803 s_rule->pdata.lkup_tx_rx.act = 0;
8804 s_rule->pdata.lkup_tx_rx.index =
8805 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8806 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8807 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8809 ice_aqc_opc_remove_sw_rules, NULL);
8810 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8811 struct ice_switch_info *sw = hw->switch_info;
8813 ice_acquire_lock(rule_lock);
8814 LIST_DEL(&list_elem->list_entry);
8815 ice_free(hw, list_elem->lkups);
8816 ice_free(hw, list_elem);
8817 ice_release_lock(rule_lock);
8818 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8819 sw->recp_list[rid].adv_rule = false;
8821 ice_free(hw, s_rule);
8827 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8828 * @hw: pointer to the hardware structure
8829 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8831 * This function is used to remove 1 rule at a time. The removal is based on
8832 * the remove_entry parameter. This function will remove rule for a given
8833 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8836 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8837 struct ice_rule_query_data *remove_entry)
8839 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8840 struct LIST_HEAD_TYPE *list_head;
8841 struct ice_adv_rule_info rinfo;
8842 struct ice_switch_info *sw;
8844 sw = hw->switch_info;
8845 if (!sw->recp_list[remove_entry->rid].recp_created)
8846 return ICE_ERR_PARAM;
8847 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8848 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8850 if (list_itr->rule_info.fltr_rule_id ==
8851 remove_entry->rule_id) {
8852 rinfo = list_itr->rule_info;
8853 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8854 return ice_rem_adv_rule(hw, list_itr->lkups,
8855 list_itr->lkups_cnt, &rinfo);
8858 /* either list is empty or unable to find rule */
8859 return ICE_ERR_DOES_NOT_EXIST;
8863 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8865 * @hw: pointer to the hardware structure
8866 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8868 * This function is used to remove all the rules for a given VSI and as soon
8869 * as removing a rule fails, it will return immediately with the error code,
8870 * else it will return ICE_SUCCESS
8872 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8874 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8875 struct ice_vsi_list_map_info *map_info;
8876 struct LIST_HEAD_TYPE *list_head;
8877 struct ice_adv_rule_info rinfo;
8878 struct ice_switch_info *sw;
8879 enum ice_status status;
8882 sw = hw->switch_info;
8883 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8884 if (!sw->recp_list[rid].recp_created)
8886 if (!sw->recp_list[rid].adv_rule)
8889 list_head = &sw->recp_list[rid].filt_rules;
8890 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8891 ice_adv_fltr_mgmt_list_entry,
8893 rinfo = list_itr->rule_info;
8895 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8896 map_info = list_itr->vsi_list_info;
8900 if (!ice_is_bit_set(map_info->vsi_map,
8903 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8907 rinfo.sw_act.vsi_handle = vsi_handle;
8908 status = ice_rem_adv_rule(hw, list_itr->lkups,
8909 list_itr->lkups_cnt, &rinfo);
8919 * ice_replay_fltr - Replay all the filters stored by a specific list head
8920 * @hw: pointer to the hardware structure
8921 * @list_head: list for which filters needs to be replayed
8922 * @recp_id: Recipe ID for which rules need to be replayed
8924 static enum ice_status
8925 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8927 struct ice_fltr_mgmt_list_entry *itr;
8928 enum ice_status status = ICE_SUCCESS;
8929 struct ice_sw_recipe *recp_list;
8930 u8 lport = hw->port_info->lport;
8931 struct LIST_HEAD_TYPE l_head;
8933 if (LIST_EMPTY(list_head))
8936 recp_list = &hw->switch_info->recp_list[recp_id];
8937 /* Move entries from the given list_head to a temporary l_head so that
8938 * they can be replayed. Otherwise when trying to re-add the same
8939 * filter, the function will return already exists
8941 LIST_REPLACE_INIT(list_head, &l_head);
8943 /* Mark the given list_head empty by reinitializing it so filters
8944 * could be added again by *handler
8946 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8948 struct ice_fltr_list_entry f_entry;
8951 f_entry.fltr_info = itr->fltr_info;
8952 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8953 status = ice_add_rule_internal(hw, recp_list, lport,
8955 if (status != ICE_SUCCESS)
8960 /* Add a filter per VSI separately */
8961 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8963 if (!ice_is_vsi_valid(hw, vsi_handle))
8966 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8967 f_entry.fltr_info.vsi_handle = vsi_handle;
8968 f_entry.fltr_info.fwd_id.hw_vsi_id =
8969 ice_get_hw_vsi_num(hw, vsi_handle);
8970 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8971 if (recp_id == ICE_SW_LKUP_VLAN)
8972 status = ice_add_vlan_internal(hw, recp_list,
8975 status = ice_add_rule_internal(hw, recp_list,
8978 if (status != ICE_SUCCESS)
8983 /* Clear the filter management list */
8984 ice_rem_sw_rule_info(hw, &l_head);
8989 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8990 * @hw: pointer to the hardware structure
8992 * NOTE: This function does not clean up partially added filters on error.
8993 * It is up to caller of the function to issue a reset or fail early.
8995 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8997 struct ice_switch_info *sw = hw->switch_info;
8998 enum ice_status status = ICE_SUCCESS;
9001 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9002 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9004 status = ice_replay_fltr(hw, i, head);
9005 if (status != ICE_SUCCESS)
9012 * ice_replay_vsi_fltr - Replay filters for requested VSI
9013 * @hw: pointer to the hardware structure
9014 * @pi: pointer to port information structure
9015 * @sw: pointer to switch info struct for which function replays filters
9016 * @vsi_handle: driver VSI handle
9017 * @recp_id: Recipe ID for which rules need to be replayed
9018 * @list_head: list for which filters need to be replayed
9020 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9021 * It is required to pass valid VSI handle.
9023 static enum ice_status
9024 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9025 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9026 struct LIST_HEAD_TYPE *list_head)
9028 struct ice_fltr_mgmt_list_entry *itr;
9029 enum ice_status status = ICE_SUCCESS;
9030 struct ice_sw_recipe *recp_list;
9033 if (LIST_EMPTY(list_head))
9035 recp_list = &sw->recp_list[recp_id];
9036 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9038 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9040 struct ice_fltr_list_entry f_entry;
9042 f_entry.fltr_info = itr->fltr_info;
9043 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9044 itr->fltr_info.vsi_handle == vsi_handle) {
9045 /* update the src in case it is VSI num */
9046 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9047 f_entry.fltr_info.src = hw_vsi_id;
9048 status = ice_add_rule_internal(hw, recp_list,
9051 if (status != ICE_SUCCESS)
9055 if (!itr->vsi_list_info ||
9056 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9058 /* Clearing it so that the logic can add it back */
9059 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9060 f_entry.fltr_info.vsi_handle = vsi_handle;
9061 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9062 /* update the src in case it is VSI num */
9063 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9064 f_entry.fltr_info.src = hw_vsi_id;
9065 if (recp_id == ICE_SW_LKUP_VLAN)
9066 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9068 status = ice_add_rule_internal(hw, recp_list,
9071 if (status != ICE_SUCCESS)
9079 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9080 * @hw: pointer to the hardware structure
9081 * @vsi_handle: driver VSI handle
9082 * @list_head: list for which filters need to be replayed
9084 * Replay the advanced rule for the given VSI.
9086 static enum ice_status
9087 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9088 struct LIST_HEAD_TYPE *list_head)
9090 struct ice_rule_query_data added_entry = { 0 };
9091 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9092 enum ice_status status = ICE_SUCCESS;
9094 if (LIST_EMPTY(list_head))
9096 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9098 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9099 u16 lk_cnt = adv_fltr->lkups_cnt;
9101 if (vsi_handle != rinfo->sw_act.vsi_handle)
9103 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9112 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9113 * @hw: pointer to the hardware structure
9114 * @pi: pointer to port information structure
9115 * @vsi_handle: driver VSI handle
9117 * Replays filters for requested VSI via vsi_handle.
9120 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9123 struct ice_switch_info *sw = hw->switch_info;
9124 enum ice_status status;
9127 /* Update the recipes that were created */
9128 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9129 struct LIST_HEAD_TYPE *head;
9131 head = &sw->recp_list[i].filt_replay_rules;
9132 if (!sw->recp_list[i].adv_rule)
9133 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9136 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9137 if (status != ICE_SUCCESS)
9145 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
9146 * @hw: pointer to the HW struct
9147 * @sw: pointer to switch info struct for which function removes filters
9149 * Deletes the filter replay rules for given switch
9151 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9158 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9159 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9160 struct LIST_HEAD_TYPE *l_head;
9162 l_head = &sw->recp_list[i].filt_replay_rules;
9163 if (!sw->recp_list[i].adv_rule)
9164 ice_rem_sw_rule_info(hw, l_head);
9166 ice_rem_adv_rule_info(hw, l_head);
9172 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9173 * @hw: pointer to the HW struct
9175 * Deletes the filter replay rules.
9177 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9179 ice_rm_sw_replay_rule_info(hw, hw->switch_info);