1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_ETH_P_8021Q 0x8100
19 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
20 * struct to configure any switch filter rules.
21 * {DA (6 bytes), SA(6 bytes),
22 * Ether type (2 bytes for header without VLAN tag) OR
23 * VLAN tag (4 bytes for header with VLAN tag) }
25 * Word on Hardcoded values
26 * byte 0 = 0x2: to identify it as locally administered DA MAC
27 * byte 6 = 0x2: to identify it as locally administered SA MAC
28 * byte 12 = 0x81 & byte 13 = 0x00:
29 * In case of VLAN filter first two bytes defines ether type (0x8100)
30 * and remaining two bytes are placeholder for programming a given VLAN ID
31 * In case of Ether type filter it is treated as header without VLAN tag
32 * and byte 12 and 13 is used to program a given Ether type instead
34 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
38 struct ice_dummy_pkt_offsets {
39 enum ice_protocol_type type;
40 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46 { ICE_IPV4_OFOS, 14 },
51 { ICE_PROTOCOL_LAST, 0 },
54 static const u8 dummy_gre_tcp_packet[] = {
55 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
56 0x00, 0x00, 0x00, 0x00,
57 0x00, 0x00, 0x00, 0x00,
59 0x08, 0x00, /* ICE_ETYPE_OL 12 */
61 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x2F, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x00, 0x00, 0x00,
67 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
68 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
71 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00,
75 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x06, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x50, 0x02, 0x20, 0x00,
85 0x00, 0x00, 0x00, 0x00
88 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91 { ICE_IPV4_OFOS, 14 },
96 { ICE_PROTOCOL_LAST, 0 },
99 static const u8 dummy_gre_udp_packet[] = {
100 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
104 0x08, 0x00, /* ICE_ETYPE_OL 12 */
106 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x2F, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
112 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
113 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
120 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x11, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
127 0x00, 0x08, 0x00, 0x00,
130 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
132 { ICE_ETYPE_OL, 12 },
133 { ICE_IPV4_OFOS, 14 },
137 { ICE_VXLAN_GPE, 42 },
141 { ICE_PROTOCOL_LAST, 0 },
144 static const u8 dummy_udp_tun_tcp_packet[] = {
145 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
146 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00,
149 0x08, 0x00, /* ICE_ETYPE_OL 12 */
151 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
152 0x00, 0x01, 0x00, 0x00,
153 0x40, 0x11, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
158 0x00, 0x46, 0x00, 0x00,
160 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
161 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
164 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
168 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x06, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x50, 0x02, 0x20, 0x00,
178 0x00, 0x00, 0x00, 0x00
181 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
183 { ICE_ETYPE_OL, 12 },
184 { ICE_IPV4_OFOS, 14 },
188 { ICE_VXLAN_GPE, 42 },
191 { ICE_UDP_ILOS, 84 },
192 { ICE_PROTOCOL_LAST, 0 },
195 static const u8 dummy_udp_tun_udp_packet[] = {
196 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
200 0x08, 0x00, /* ICE_ETYPE_OL 12 */
202 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
203 0x00, 0x01, 0x00, 0x00,
204 0x00, 0x11, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
209 0x00, 0x3a, 0x00, 0x00,
211 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
212 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
215 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00,
219 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
226 0x00, 0x08, 0x00, 0x00,
229 /* offset info for MAC + IPv4 + UDP dummy packet */
230 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
232 { ICE_ETYPE_OL, 12 },
233 { ICE_IPV4_OFOS, 14 },
234 { ICE_UDP_ILOS, 34 },
235 { ICE_PROTOCOL_LAST, 0 },
238 /* Dummy packet for MAC + IPv4 + UDP */
239 static const u8 dummy_udp_packet[] = {
240 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
241 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00,
244 0x08, 0x00, /* ICE_ETYPE_OL 12 */
246 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
247 0x00, 0x01, 0x00, 0x00,
248 0x00, 0x11, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
253 0x00, 0x08, 0x00, 0x00,
255 0x00, 0x00, /* 2 bytes for 4 byte alignment */
258 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
259 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
261 { ICE_ETYPE_OL, 12 },
262 { ICE_VLAN_OFOS, 14 },
263 { ICE_IPV4_OFOS, 18 },
264 { ICE_UDP_ILOS, 38 },
265 { ICE_PROTOCOL_LAST, 0 },
268 /* C-tag (801.1Q), IPv4:UDP dummy packet */
269 static const u8 dummy_vlan_udp_packet[] = {
270 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x81, 0x00, /* ICE_ETYPE_OL 12 */
276 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
278 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
279 0x00, 0x01, 0x00, 0x00,
280 0x00, 0x11, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
285 0x00, 0x08, 0x00, 0x00,
287 0x00, 0x00, /* 2 bytes for 4 byte alignment */
290 /* offset info for MAC + IPv4 + TCP dummy packet */
291 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
293 { ICE_ETYPE_OL, 12 },
294 { ICE_IPV4_OFOS, 14 },
296 { ICE_PROTOCOL_LAST, 0 },
299 /* Dummy packet for MAC + IPv4 + TCP */
300 static const u8 dummy_tcp_packet[] = {
301 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x08, 0x00, /* ICE_ETYPE_OL 12 */
307 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
308 0x00, 0x01, 0x00, 0x00,
309 0x00, 0x06, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
314 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
316 0x50, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
319 0x00, 0x00, /* 2 bytes for 4 byte alignment */
322 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
323 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
325 { ICE_ETYPE_OL, 12 },
326 { ICE_VLAN_OFOS, 14 },
327 { ICE_IPV4_OFOS, 18 },
329 { ICE_PROTOCOL_LAST, 0 },
332 /* C-tag (801.1Q), IPv4:TCP dummy packet */
333 static const u8 dummy_vlan_tcp_packet[] = {
334 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
338 0x81, 0x00, /* ICE_ETYPE_OL 12 */
340 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
342 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
343 0x00, 0x01, 0x00, 0x00,
344 0x00, 0x06, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
349 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
351 0x50, 0x00, 0x00, 0x00,
352 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, /* 2 bytes for 4 byte alignment */
357 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
359 { ICE_ETYPE_OL, 12 },
360 { ICE_IPV6_OFOS, 14 },
362 { ICE_PROTOCOL_LAST, 0 },
365 static const u8 dummy_tcp_ipv6_packet[] = {
366 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
367 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00,
370 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
372 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
373 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
386 0x50, 0x00, 0x00, 0x00,
387 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, /* 2 bytes for 4 byte alignment */
392 /* C-tag (802.1Q): IPv6 + TCP */
393 static const struct ice_dummy_pkt_offsets
394 dummy_vlan_tcp_ipv6_packet_offsets[] = {
396 { ICE_ETYPE_OL, 12 },
397 { ICE_VLAN_OFOS, 14 },
398 { ICE_IPV6_OFOS, 18 },
400 { ICE_PROTOCOL_LAST, 0 },
403 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
404 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
405 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x81, 0x00, /* ICE_ETYPE_OL 12 */
411 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
413 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
414 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
425 0x00, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
427 0x50, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
430 0x00, 0x00, /* 2 bytes for 4 byte alignment */
434 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
436 { ICE_ETYPE_OL, 12 },
437 { ICE_IPV6_OFOS, 14 },
438 { ICE_UDP_ILOS, 54 },
439 { ICE_PROTOCOL_LAST, 0 },
442 /* IPv6 + UDP dummy packet */
443 static const u8 dummy_udp_ipv6_packet[] = {
444 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
448 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
450 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
451 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00,
461 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
462 0x00, 0x10, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
465 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, /* 2 bytes for 4 byte alignment */
470 /* C-tag (802.1Q): IPv6 + UDP */
471 static const struct ice_dummy_pkt_offsets
472 dummy_vlan_udp_ipv6_packet_offsets[] = {
474 { ICE_ETYPE_OL, 12 },
475 { ICE_VLAN_OFOS, 14 },
476 { ICE_IPV6_OFOS, 18 },
477 { ICE_UDP_ILOS, 58 },
478 { ICE_PROTOCOL_LAST, 0 },
481 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
482 static const u8 dummy_vlan_udp_ipv6_packet[] = {
483 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
484 0x00, 0x00, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00,
487 0x81, 0x00, /* ICE_ETYPE_OL 12 */
489 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
491 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
492 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00,
502 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
503 0x00, 0x08, 0x00, 0x00,
505 0x00, 0x00, /* 2 bytes for 4 byte alignment */
508 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
509 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
511 { ICE_IPV4_OFOS, 14 },
516 { ICE_PROTOCOL_LAST, 0 },
519 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
520 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
525 0x45, 0x00, 0x00, 0x58, /* IP 14 */
526 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x11, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x00,
529 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
532 0x00, 0x44, 0x00, 0x00,
534 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
535 0x00, 0x00, 0x00, 0x00,
536 0x00, 0x00, 0x00, 0x85,
538 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
539 0x00, 0x00, 0x00, 0x00,
541 0x45, 0x00, 0x00, 0x28, /* IP 62 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x06, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
548 0x00, 0x00, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x50, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
553 0x00, 0x00, /* 2 bytes for 4 byte alignment */
556 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
557 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
559 { ICE_IPV4_OFOS, 14 },
563 { ICE_UDP_ILOS, 82 },
564 { ICE_PROTOCOL_LAST, 0 },
567 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
568 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
573 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
574 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x11, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
580 0x00, 0x38, 0x00, 0x00,
582 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x85,
586 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
587 0x00, 0x00, 0x00, 0x00,
589 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x11, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00,
595 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
596 0x00, 0x08, 0x00, 0x00,
598 0x00, 0x00, /* 2 bytes for 4 byte alignment */
601 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
602 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
604 { ICE_IPV4_OFOS, 14 },
609 { ICE_PROTOCOL_LAST, 0 },
612 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
613 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
618 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x11, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
625 0x00, 0x58, 0x00, 0x00,
627 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
628 0x00, 0x00, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x85,
631 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
632 0x00, 0x00, 0x00, 0x00,
634 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
635 0x00, 0x14, 0x06, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x50, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 byte alignment */
654 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
656 { ICE_IPV4_OFOS, 14 },
660 { ICE_UDP_ILOS, 102 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
670 0x45, 0x00, 0x00, 0x60, /* IP 14 */
671 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x11, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00,
676 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
677 0x00, 0x4c, 0x00, 0x00,
679 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x85,
683 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
684 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
687 0x00, 0x08, 0x11, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
698 0x00, 0x08, 0x00, 0x00,
700 0x00, 0x00, /* 2 bytes for 4 byte alignment */
703 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
705 { ICE_IPV6_OFOS, 14 },
710 { ICE_PROTOCOL_LAST, 0 },
713 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
714 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
719 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
720 0x00, 0x44, 0x11, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
731 0x00, 0x44, 0x00, 0x00,
733 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
734 0x00, 0x00, 0x00, 0x00,
735 0x00, 0x00, 0x00, 0x85,
737 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
738 0x00, 0x00, 0x00, 0x00,
740 0x45, 0x00, 0x00, 0x28, /* IP 82 */
741 0x00, 0x00, 0x00, 0x00,
742 0x00, 0x06, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x50, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, /* 2 bytes for 4 byte alignment */
755 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
757 { ICE_IPV6_OFOS, 14 },
761 { ICE_UDP_ILOS, 102 },
762 { ICE_PROTOCOL_LAST, 0 },
765 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
766 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
771 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
772 0x00, 0x38, 0x11, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
783 0x00, 0x38, 0x00, 0x00,
785 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
786 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x85,
789 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
790 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
793 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x11, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
799 0x00, 0x08, 0x00, 0x00,
801 0x00, 0x00, /* 2 bytes for 4 byte alignment */
804 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
806 { ICE_IPV6_OFOS, 14 },
811 { ICE_PROTOCOL_LAST, 0 },
814 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
815 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
820 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
821 0x00, 0x58, 0x11, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, 0x00, 0x00,
831 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
832 0x00, 0x58, 0x00, 0x00,
834 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
835 0x00, 0x00, 0x00, 0x00,
836 0x00, 0x00, 0x00, 0x85,
838 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
839 0x00, 0x00, 0x00, 0x00,
841 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
842 0x00, 0x14, 0x06, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
855 0x50, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x00, 0x00,
858 0x00, 0x00, /* 2 bytes for 4 byte alignment */
861 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
863 { ICE_IPV6_OFOS, 14 },
867 { ICE_UDP_ILOS, 102 },
868 { ICE_PROTOCOL_LAST, 0 },
871 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
872 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
877 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
878 0x00, 0x4c, 0x11, 0x00,
879 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
889 0x00, 0x4c, 0x00, 0x00,
891 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
892 0x00, 0x00, 0x00, 0x00,
893 0x00, 0x00, 0x00, 0x85,
895 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
896 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
899 0x00, 0x08, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
910 0x00, 0x08, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 byte alignment */
915 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
917 { ICE_IPV4_OFOS, 14 },
921 { ICE_PROTOCOL_LAST, 0 },
924 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
925 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
930 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
931 0x00, 0x00, 0x40, 0x00,
932 0x40, 0x11, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
936 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
937 0x00, 0x00, 0x00, 0x00,
939 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
940 0x00, 0x00, 0x00, 0x00,
941 0x00, 0x00, 0x00, 0x85,
943 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
944 0x00, 0x00, 0x00, 0x00,
946 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
947 0x00, 0x00, 0x40, 0x00,
948 0x40, 0x00, 0x00, 0x00,
949 0x00, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
955 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
957 { ICE_IPV4_OFOS, 14 },
961 { ICE_PROTOCOL_LAST, 0 },
964 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
965 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
966 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00,
970 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
971 0x00, 0x00, 0x40, 0x00,
972 0x40, 0x11, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
976 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
977 0x00, 0x00, 0x00, 0x00,
979 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
980 0x00, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x85,
983 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
984 0x00, 0x00, 0x00, 0x00,
986 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
987 0x00, 0x00, 0x3b, 0x00,
988 0x00, 0x00, 0x00, 0x00,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
1001 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1002 { ICE_MAC_OFOS, 0 },
1003 { ICE_IPV6_OFOS, 14 },
1006 { ICE_IPV4_IL, 82 },
1007 { ICE_PROTOCOL_LAST, 0 },
1010 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1011 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x00,
1016 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1017 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1018 0x00, 0x00, 0x00, 0x00,
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1027 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1028 0x00, 0x00, 0x00, 0x00,
1030 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1031 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x85,
1034 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1035 0x00, 0x00, 0x00, 0x00,
1037 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1038 0x00, 0x00, 0x40, 0x00,
1039 0x40, 0x00, 0x00, 0x00,
1040 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00,
1047 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1048 { ICE_MAC_OFOS, 0 },
1049 { ICE_IPV6_OFOS, 14 },
1052 { ICE_IPV6_IL, 82 },
1053 { ICE_PROTOCOL_LAST, 0 },
1056 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1057 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1058 0x00, 0x00, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00,
1062 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1063 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1064 0x00, 0x00, 0x00, 0x00,
1065 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x00,
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1069 0x00, 0x00, 0x00, 0x00,
1070 0x00, 0x00, 0x00, 0x00,
1071 0x00, 0x00, 0x00, 0x00,
1073 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1074 0x00, 0x00, 0x00, 0x00,
1076 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1077 0x00, 0x00, 0x00, 0x00,
1078 0x00, 0x00, 0x00, 0x85,
1080 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1081 0x00, 0x00, 0x00, 0x00,
1083 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1084 0x00, 0x00, 0x3b, 0x00,
1085 0x00, 0x00, 0x00, 0x00,
1086 0x00, 0x00, 0x00, 0x00,
1087 0x00, 0x00, 0x00, 0x00,
1088 0x00, 0x00, 0x00, 0x00,
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1092 0x00, 0x00, 0x00, 0x00,
1097 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1098 { ICE_MAC_OFOS, 0 },
1099 { ICE_IPV4_OFOS, 14 },
1102 { ICE_PROTOCOL_LAST, 0 },
1105 static const u8 dummy_udp_gtp_packet[] = {
1106 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1107 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x00, 0x00,
1111 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x11, 0x00, 0x00,
1114 0x00, 0x00, 0x00, 0x00,
1115 0x00, 0x00, 0x00, 0x00,
1117 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1118 0x00, 0x1c, 0x00, 0x00,
1120 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1121 0x00, 0x00, 0x00, 0x00,
1122 0x00, 0x00, 0x00, 0x85,
1124 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1125 0x00, 0x00, 0x00, 0x00,
1128 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1129 { ICE_MAC_OFOS, 0 },
1130 { ICE_IPV4_OFOS, 14 },
1132 { ICE_GTP_NO_PAY, 42 },
1133 { ICE_PROTOCOL_LAST, 0 },
1137 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1138 { ICE_MAC_OFOS, 0 },
1139 { ICE_IPV6_OFOS, 14 },
1141 { ICE_GTP_NO_PAY, 62 },
1142 { ICE_PROTOCOL_LAST, 0 },
1145 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1146 { ICE_MAC_OFOS, 0 },
1147 { ICE_ETYPE_OL, 12 },
1148 { ICE_VLAN_OFOS, 14},
1150 { ICE_PROTOCOL_LAST, 0 },
1153 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1154 { ICE_MAC_OFOS, 0 },
1155 { ICE_ETYPE_OL, 12 },
1156 { ICE_VLAN_OFOS, 14},
1158 { ICE_IPV4_OFOS, 26 },
1159 { ICE_PROTOCOL_LAST, 0 },
1162 static const u8 dummy_pppoe_ipv4_packet[] = {
1163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1164 0x00, 0x00, 0x00, 0x00,
1165 0x00, 0x00, 0x00, 0x00,
1167 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1169 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1171 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1174 0x00, 0x21, /* PPP Link Layer 24 */
1176 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1177 0x00, 0x00, 0x00, 0x00,
1178 0x00, 0x00, 0x00, 0x00,
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1182 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1186 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1187 { ICE_MAC_OFOS, 0 },
1188 { ICE_ETYPE_OL, 12 },
1189 { ICE_VLAN_OFOS, 14},
1191 { ICE_IPV4_OFOS, 26 },
1193 { ICE_PROTOCOL_LAST, 0 },
1196 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1197 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1198 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00,
1201 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1203 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1205 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1208 0x00, 0x21, /* PPP Link Layer 24 */
1210 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1211 0x00, 0x01, 0x00, 0x00,
1212 0x00, 0x06, 0x00, 0x00,
1213 0x00, 0x00, 0x00, 0x00,
1214 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1217 0x00, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00,
1219 0x50, 0x00, 0x00, 0x00,
1220 0x00, 0x00, 0x00, 0x00,
1222 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1226 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1227 { ICE_MAC_OFOS, 0 },
1228 { ICE_ETYPE_OL, 12 },
1229 { ICE_VLAN_OFOS, 14},
1231 { ICE_IPV4_OFOS, 26 },
1232 { ICE_UDP_ILOS, 46 },
1233 { ICE_PROTOCOL_LAST, 0 },
1236 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1237 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1238 0x00, 0x00, 0x00, 0x00,
1239 0x00, 0x00, 0x00, 0x00,
1241 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1243 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1245 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1248 0x00, 0x21, /* PPP Link Layer 24 */
1250 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1251 0x00, 0x01, 0x00, 0x00,
1252 0x00, 0x11, 0x00, 0x00,
1253 0x00, 0x00, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1257 0x00, 0x08, 0x00, 0x00,
1259 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1262 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1263 { ICE_MAC_OFOS, 0 },
1264 { ICE_ETYPE_OL, 12 },
1265 { ICE_VLAN_OFOS, 14},
1267 { ICE_IPV6_OFOS, 26 },
1268 { ICE_PROTOCOL_LAST, 0 },
1271 static const u8 dummy_pppoe_ipv6_packet[] = {
1272 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1273 0x00, 0x00, 0x00, 0x00,
1274 0x00, 0x00, 0x00, 0x00,
1276 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1278 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1280 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1283 0x00, 0x57, /* PPP Link Layer 24 */
1285 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1286 0x00, 0x00, 0x3b, 0x00,
1287 0x00, 0x00, 0x00, 0x00,
1288 0x00, 0x00, 0x00, 0x00,
1289 0x00, 0x00, 0x00, 0x00,
1290 0x00, 0x00, 0x00, 0x00,
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1293 0x00, 0x00, 0x00, 0x00,
1294 0x00, 0x00, 0x00, 0x00,
1296 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1300 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1301 { ICE_MAC_OFOS, 0 },
1302 { ICE_ETYPE_OL, 12 },
1303 { ICE_VLAN_OFOS, 14},
1305 { ICE_IPV6_OFOS, 26 },
1307 { ICE_PROTOCOL_LAST, 0 },
1310 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1311 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1312 0x00, 0x00, 0x00, 0x00,
1313 0x00, 0x00, 0x00, 0x00,
1315 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1317 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1319 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1322 0x00, 0x57, /* PPP Link Layer 24 */
1324 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1325 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1326 0x00, 0x00, 0x00, 0x00,
1327 0x00, 0x00, 0x00, 0x00,
1328 0x00, 0x00, 0x00, 0x00,
1329 0x00, 0x00, 0x00, 0x00,
1330 0x00, 0x00, 0x00, 0x00,
1331 0x00, 0x00, 0x00, 0x00,
1332 0x00, 0x00, 0x00, 0x00,
1333 0x00, 0x00, 0x00, 0x00,
1335 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1336 0x00, 0x00, 0x00, 0x00,
1337 0x00, 0x00, 0x00, 0x00,
1338 0x50, 0x00, 0x00, 0x00,
1339 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1345 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1346 { ICE_MAC_OFOS, 0 },
1347 { ICE_ETYPE_OL, 12 },
1348 { ICE_VLAN_OFOS, 14},
1350 { ICE_IPV6_OFOS, 26 },
1351 { ICE_UDP_ILOS, 66 },
1352 { ICE_PROTOCOL_LAST, 0 },
1355 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1356 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1357 0x00, 0x00, 0x00, 0x00,
1358 0x00, 0x00, 0x00, 0x00,
1360 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1362 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1364 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1367 0x00, 0x57, /* PPP Link Layer 24 */
1369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1371 0x00, 0x00, 0x00, 0x00,
1372 0x00, 0x00, 0x00, 0x00,
1373 0x00, 0x00, 0x00, 0x00,
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1381 0x00, 0x08, 0x00, 0x00,
1383 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1386 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1387 { ICE_MAC_OFOS, 0 },
1388 { ICE_IPV4_OFOS, 14 },
1390 { ICE_PROTOCOL_LAST, 0 },
1393 static const u8 dummy_ipv4_esp_pkt[] = {
1394 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1395 0x00, 0x00, 0x00, 0x00,
1396 0x00, 0x00, 0x00, 0x00,
1399 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1400 0x00, 0x00, 0x40, 0x00,
1401 0x40, 0x32, 0x00, 0x00,
1402 0x00, 0x00, 0x00, 0x00,
1403 0x00, 0x00, 0x00, 0x00,
1405 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1406 0x00, 0x00, 0x00, 0x00,
1407 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1410 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1411 { ICE_MAC_OFOS, 0 },
1412 { ICE_IPV6_OFOS, 14 },
1414 { ICE_PROTOCOL_LAST, 0 },
1417 static const u8 dummy_ipv6_esp_pkt[] = {
1418 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1419 0x00, 0x00, 0x00, 0x00,
1420 0x00, 0x00, 0x00, 0x00,
1423 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1424 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1425 0x00, 0x00, 0x00, 0x00,
1426 0x00, 0x00, 0x00, 0x00,
1427 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00,
1429 0x00, 0x00, 0x00, 0x00,
1430 0x00, 0x00, 0x00, 0x00,
1431 0x00, 0x00, 0x00, 0x00,
1432 0x00, 0x00, 0x00, 0x00,
1434 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1435 0x00, 0x00, 0x00, 0x00,
1436 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1439 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1440 { ICE_MAC_OFOS, 0 },
1441 { ICE_IPV4_OFOS, 14 },
1443 { ICE_PROTOCOL_LAST, 0 },
1446 static const u8 dummy_ipv4_ah_pkt[] = {
1447 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1448 0x00, 0x00, 0x00, 0x00,
1449 0x00, 0x00, 0x00, 0x00,
1452 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1453 0x00, 0x00, 0x40, 0x00,
1454 0x40, 0x33, 0x00, 0x00,
1455 0x00, 0x00, 0x00, 0x00,
1456 0x00, 0x00, 0x00, 0x00,
1458 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1459 0x00, 0x00, 0x00, 0x00,
1460 0x00, 0x00, 0x00, 0x00,
1461 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1464 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1465 { ICE_MAC_OFOS, 0 },
1466 { ICE_IPV6_OFOS, 14 },
1468 { ICE_PROTOCOL_LAST, 0 },
1471 static const u8 dummy_ipv6_ah_pkt[] = {
1472 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1473 0x00, 0x00, 0x00, 0x00,
1474 0x00, 0x00, 0x00, 0x00,
1477 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1478 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1479 0x00, 0x00, 0x00, 0x00,
1480 0x00, 0x00, 0x00, 0x00,
1481 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, 0x00, 0x00,
1485 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x00,
1488 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1489 0x00, 0x00, 0x00, 0x00,
1490 0x00, 0x00, 0x00, 0x00,
1491 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1494 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1495 { ICE_MAC_OFOS, 0 },
1496 { ICE_IPV4_OFOS, 14 },
1497 { ICE_UDP_ILOS, 34 },
1499 { ICE_PROTOCOL_LAST, 0 },
1502 static const u8 dummy_ipv4_nat_pkt[] = {
1503 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1504 0x00, 0x00, 0x00, 0x00,
1505 0x00, 0x00, 0x00, 0x00,
1508 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1509 0x00, 0x00, 0x40, 0x00,
1510 0x40, 0x11, 0x00, 0x00,
1511 0x00, 0x00, 0x00, 0x00,
1512 0x00, 0x00, 0x00, 0x00,
1514 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1515 0x00, 0x00, 0x00, 0x00,
1517 0x00, 0x00, 0x00, 0x00,
1518 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1522 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1523 { ICE_MAC_OFOS, 0 },
1524 { ICE_IPV6_OFOS, 14 },
1525 { ICE_UDP_ILOS, 54 },
1527 { ICE_PROTOCOL_LAST, 0 },
1530 static const u8 dummy_ipv6_nat_pkt[] = {
1531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1532 0x00, 0x00, 0x00, 0x00,
1533 0x00, 0x00, 0x00, 0x00,
1536 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1537 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1538 0x00, 0x00, 0x00, 0x00,
1539 0x00, 0x00, 0x00, 0x00,
1540 0x00, 0x00, 0x00, 0x00,
1541 0x00, 0x00, 0x00, 0x00,
1542 0x00, 0x00, 0x00, 0x00,
1543 0x00, 0x00, 0x00, 0x00,
1544 0x00, 0x00, 0x00, 0x00,
1545 0x00, 0x00, 0x00, 0x00,
1547 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1548 0x00, 0x00, 0x00, 0x00,
1550 0x00, 0x00, 0x00, 0x00,
1551 0x00, 0x00, 0x00, 0x00,
1552 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1556 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1557 { ICE_MAC_OFOS, 0 },
1558 { ICE_IPV4_OFOS, 14 },
1560 { ICE_PROTOCOL_LAST, 0 },
1563 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1564 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1565 0x00, 0x00, 0x00, 0x00,
1566 0x00, 0x00, 0x00, 0x00,
1569 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1570 0x00, 0x00, 0x40, 0x00,
1571 0x40, 0x73, 0x00, 0x00,
1572 0x00, 0x00, 0x00, 0x00,
1573 0x00, 0x00, 0x00, 0x00,
1575 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1576 0x00, 0x00, 0x00, 0x00,
1577 0x00, 0x00, 0x00, 0x00,
1578 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1581 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1582 { ICE_MAC_OFOS, 0 },
1583 { ICE_IPV6_OFOS, 14 },
1585 { ICE_PROTOCOL_LAST, 0 },
1588 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1590 0x00, 0x00, 0x00, 0x00,
1591 0x00, 0x00, 0x00, 0x00,
1594 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1595 0x00, 0x0c, 0x73, 0x40,
1596 0x00, 0x00, 0x00, 0x00,
1597 0x00, 0x00, 0x00, 0x00,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, 0x00, 0x00,
1601 0x00, 0x00, 0x00, 0x00,
1602 0x00, 0x00, 0x00, 0x00,
1603 0x00, 0x00, 0x00, 0x00,
1605 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1606 0x00, 0x00, 0x00, 0x00,
1607 0x00, 0x00, 0x00, 0x00,
1608 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1611 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1612 { ICE_MAC_OFOS, 0 },
1613 { ICE_ETYPE_OL, 12 },
1614 { ICE_VLAN_EX, 14 },
1615 { ICE_VLAN_OFOS, 18 },
1616 { ICE_IPV4_OFOS, 22 },
1617 { ICE_PROTOCOL_LAST, 0 },
1620 static const u8 dummy_qinq_ipv4_pkt[] = {
1621 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1622 0x00, 0x00, 0x00, 0x00,
1623 0x00, 0x00, 0x00, 0x00,
1625 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1627 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1628 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1630 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1631 0x00, 0x01, 0x00, 0x00,
1632 0x00, 0x11, 0x00, 0x00,
1633 0x00, 0x00, 0x00, 0x00,
1634 0x00, 0x00, 0x00, 0x00,
1636 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1637 0x00, 0x08, 0x00, 0x00,
1639 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1642 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1643 { ICE_MAC_OFOS, 0 },
1644 { ICE_ETYPE_OL, 12 },
1645 { ICE_VLAN_EX, 14 },
1646 { ICE_VLAN_OFOS, 18 },
1647 { ICE_IPV6_OFOS, 22 },
1648 { ICE_PROTOCOL_LAST, 0 },
1651 static const u8 dummy_qinq_ipv6_pkt[] = {
1652 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1653 0x00, 0x00, 0x00, 0x00,
1654 0x00, 0x00, 0x00, 0x00,
1656 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1658 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1659 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1661 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1662 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1663 0x00, 0x00, 0x00, 0x00,
1664 0x00, 0x00, 0x00, 0x00,
1665 0x00, 0x00, 0x00, 0x00,
1666 0x00, 0x00, 0x00, 0x00,
1667 0x00, 0x00, 0x00, 0x00,
1668 0x00, 0x00, 0x00, 0x00,
1669 0x00, 0x00, 0x00, 0x00,
1670 0x00, 0x00, 0x00, 0x00,
1672 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1673 0x00, 0x10, 0x00, 0x00,
1675 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1676 0x00, 0x00, 0x00, 0x00,
1678 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1681 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1682 { ICE_MAC_OFOS, 0 },
1683 { ICE_ETYPE_OL, 12 },
1684 { ICE_VLAN_EX, 14 },
1685 { ICE_VLAN_OFOS, 18 },
1687 { ICE_PROTOCOL_LAST, 0 },
1691 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1692 { ICE_MAC_OFOS, 0 },
1693 { ICE_ETYPE_OL, 12 },
1694 { ICE_VLAN_EX, 14 },
1695 { ICE_VLAN_OFOS, 18 },
1697 { ICE_IPV4_OFOS, 30 },
1698 { ICE_PROTOCOL_LAST, 0 },
1701 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1702 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1703 0x00, 0x00, 0x00, 0x00,
1704 0x00, 0x00, 0x00, 0x00,
1706 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1708 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1709 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1711 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1714 0x00, 0x21, /* PPP Link Layer 28 */
1716 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1717 0x00, 0x00, 0x00, 0x00,
1718 0x00, 0x00, 0x00, 0x00,
1719 0x00, 0x00, 0x00, 0x00,
1720 0x00, 0x00, 0x00, 0x00,
1722 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1726 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1727 { ICE_MAC_OFOS, 0 },
1728 { ICE_ETYPE_OL, 12 },
1730 { ICE_VLAN_OFOS, 18 },
1732 { ICE_IPV6_OFOS, 30 },
1733 { ICE_PROTOCOL_LAST, 0 },
1736 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1737 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1738 0x00, 0x00, 0x00, 0x00,
1739 0x00, 0x00, 0x00, 0x00,
1741 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1743 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1744 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1746 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1749 0x00, 0x57, /* PPP Link Layer 28*/
1751 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1752 0x00, 0x00, 0x3b, 0x00,
1753 0x00, 0x00, 0x00, 0x00,
1754 0x00, 0x00, 0x00, 0x00,
1755 0x00, 0x00, 0x00, 0x00,
1756 0x00, 0x00, 0x00, 0x00,
1757 0x00, 0x00, 0x00, 0x00,
1758 0x00, 0x00, 0x00, 0x00,
1759 0x00, 0x00, 0x00, 0x00,
1760 0x00, 0x00, 0x00, 0x00,
1762 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1765 /* this is a recipe to profile association bitmap */
1766 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1767 ICE_MAX_NUM_PROFILES);
1769 /* this is a profile to recipe association bitmap */
1770 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1771 ICE_MAX_NUM_RECIPES);
1773 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1776 * ice_collect_result_idx - copy result index values
1777 * @buf: buffer that contains the result index
1778 * @recp: the recipe struct to copy data into
1780 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1781 struct ice_sw_recipe *recp)
1783 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1784 ice_set_bit(buf->content.result_indx &
1785 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1789 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1790 * @rid: recipe ID that we are populating
1792 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1794 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1795 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1796 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1797 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1798 enum ice_sw_tunnel_type tun_type;
1799 u16 i, j, profile_num = 0;
1800 bool non_tun_valid = false;
1801 bool pppoe_valid = false;
1802 bool vxlan_valid = false;
1803 bool gre_valid = false;
1804 bool gtp_valid = false;
1805 bool flag_valid = false;
1807 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1808 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1813 for (i = 0; i < 12; i++) {
1814 if (gre_profile[i] == j)
1818 for (i = 0; i < 12; i++) {
1819 if (vxlan_profile[i] == j)
1823 for (i = 0; i < 7; i++) {
1824 if (pppoe_profile[i] == j)
1828 for (i = 0; i < 6; i++) {
1829 if (non_tun_profile[i] == j)
1830 non_tun_valid = true;
1833 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1834 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1837 if ((j >= ICE_PROFID_IPV4_ESP &&
1838 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1839 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1840 j <= ICE_PROFID_IPV6_GTPU_TEID))
1844 if (!non_tun_valid && vxlan_valid)
1845 tun_type = ICE_SW_TUN_VXLAN;
1846 else if (!non_tun_valid && gre_valid)
1847 tun_type = ICE_SW_TUN_NVGRE;
1848 else if (!non_tun_valid && pppoe_valid)
1849 tun_type = ICE_SW_TUN_PPPOE;
1850 else if (!non_tun_valid && gtp_valid)
1851 tun_type = ICE_SW_TUN_GTP;
1852 else if (non_tun_valid &&
1853 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1854 tun_type = ICE_SW_TUN_AND_NON_TUN;
1855 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1857 tun_type = ICE_NON_TUN;
1859 tun_type = ICE_NON_TUN;
1861 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1862 i = ice_is_bit_set(recipe_to_profile[rid],
1863 ICE_PROFID_PPPOE_IPV4_OTHER);
1864 j = ice_is_bit_set(recipe_to_profile[rid],
1865 ICE_PROFID_PPPOE_IPV6_OTHER);
1867 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1869 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1872 if (tun_type == ICE_SW_TUN_GTP) {
1873 if (ice_is_bit_set(recipe_to_profile[rid],
1874 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1875 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1876 else if (ice_is_bit_set(recipe_to_profile[rid],
1877 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1878 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1879 else if (ice_is_bit_set(recipe_to_profile[rid],
1880 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1881 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1882 else if (ice_is_bit_set(recipe_to_profile[rid],
1883 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1884 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1887 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1888 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1889 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1891 case ICE_PROFID_IPV4_TCP:
1892 tun_type = ICE_SW_IPV4_TCP;
1894 case ICE_PROFID_IPV4_UDP:
1895 tun_type = ICE_SW_IPV4_UDP;
1897 case ICE_PROFID_IPV6_TCP:
1898 tun_type = ICE_SW_IPV6_TCP;
1900 case ICE_PROFID_IPV6_UDP:
1901 tun_type = ICE_SW_IPV6_UDP;
1903 case ICE_PROFID_PPPOE_PAY:
1904 tun_type = ICE_SW_TUN_PPPOE_PAY;
1906 case ICE_PROFID_PPPOE_IPV4_TCP:
1907 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1909 case ICE_PROFID_PPPOE_IPV4_UDP:
1910 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1912 case ICE_PROFID_PPPOE_IPV4_OTHER:
1913 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1915 case ICE_PROFID_PPPOE_IPV6_TCP:
1916 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1918 case ICE_PROFID_PPPOE_IPV6_UDP:
1919 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1921 case ICE_PROFID_PPPOE_IPV6_OTHER:
1922 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1924 case ICE_PROFID_IPV4_ESP:
1925 tun_type = ICE_SW_TUN_IPV4_ESP;
1927 case ICE_PROFID_IPV6_ESP:
1928 tun_type = ICE_SW_TUN_IPV6_ESP;
1930 case ICE_PROFID_IPV4_AH:
1931 tun_type = ICE_SW_TUN_IPV4_AH;
1933 case ICE_PROFID_IPV6_AH:
1934 tun_type = ICE_SW_TUN_IPV6_AH;
1936 case ICE_PROFID_IPV4_NAT_T:
1937 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1939 case ICE_PROFID_IPV6_NAT_T:
1940 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1942 case ICE_PROFID_IPV4_PFCP_NODE:
1944 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1946 case ICE_PROFID_IPV6_PFCP_NODE:
1948 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1950 case ICE_PROFID_IPV4_PFCP_SESSION:
1952 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1954 case ICE_PROFID_IPV6_PFCP_SESSION:
1956 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1958 case ICE_PROFID_MAC_IPV4_L2TPV3:
1959 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1961 case ICE_PROFID_MAC_IPV6_L2TPV3:
1962 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1964 case ICE_PROFID_IPV4_GTPU_TEID:
1965 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1967 case ICE_PROFID_IPV6_GTPU_TEID:
1968 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1979 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1980 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1981 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1982 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1983 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1984 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1985 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1986 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1987 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1988 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1989 else if (vlan && tun_type == ICE_NON_TUN)
1990 tun_type = ICE_NON_TUN_QINQ;
1996 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1997 * @hw: pointer to hardware structure
1998 * @recps: struct that we need to populate
1999 * @rid: recipe ID that we are populating
2000 * @refresh_required: true if we should get recipe to profile mapping from FW
2002 * This function is used to populate all the necessary entries into our
2003 * bookkeeping so that we have a current list of all the recipes that are
2004 * programmed in the firmware.
2006 static enum ice_status
2007 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2008 bool *refresh_required)
2010 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2011 struct ice_aqc_recipe_data_elem *tmp;
2012 u16 num_recps = ICE_MAX_NUM_RECIPES;
2013 struct ice_prot_lkup_ext *lkup_exts;
2014 enum ice_status status;
2019 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2021 /* we need a buffer big enough to accommodate all the recipes */
2022 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2023 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2025 return ICE_ERR_NO_MEMORY;
2027 tmp[0].recipe_indx = rid;
2028 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2029 /* non-zero status meaning recipe doesn't exist */
2033 /* Get recipe to profile map so that we can get the fv from lkups that
2034 * we read for a recipe from FW. Since we want to minimize the number of
2035 * times we make this FW call, just make one call and cache the copy
2036 * until a new recipe is added. This operation is only required the
2037 * first time to get the changes from FW. Then to search existing
2038 * entries we don't need to update the cache again until another recipe
2041 if (*refresh_required) {
2042 ice_get_recp_to_prof_map(hw);
2043 *refresh_required = false;
2046 /* Start populating all the entries for recps[rid] based on lkups from
2047 * firmware. Note that we are only creating the root recipe in our
2050 lkup_exts = &recps[rid].lkup_exts;
2052 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2053 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2054 struct ice_recp_grp_entry *rg_entry;
2055 u8 i, prof, idx, prot = 0;
2059 rg_entry = (struct ice_recp_grp_entry *)
2060 ice_malloc(hw, sizeof(*rg_entry));
2062 status = ICE_ERR_NO_MEMORY;
2066 idx = root_bufs.recipe_indx;
2067 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2069 /* Mark all result indices in this chain */
2070 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2071 ice_set_bit(root_bufs.content.result_indx &
2072 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2074 /* get the first profile that is associated with rid */
2075 prof = ice_find_first_bit(recipe_to_profile[idx],
2076 ICE_MAX_NUM_PROFILES);
2077 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2078 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2080 rg_entry->fv_idx[i] = lkup_indx;
2081 rg_entry->fv_mask[i] =
2082 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2084 /* If the recipe is a chained recipe then all its
2085 * child recipe's result will have a result index.
2086 * To fill fv_words we should not use those result
2087 * index, we only need the protocol ids and offsets.
2088 * We will skip all the fv_idx which stores result
2089 * index in them. We also need to skip any fv_idx which
2090 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2091 * valid offset value.
2093 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2094 rg_entry->fv_idx[i]) ||
2095 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2096 rg_entry->fv_idx[i] == 0)
2099 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2100 rg_entry->fv_idx[i], &prot, &off);
2101 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2102 lkup_exts->fv_words[fv_word_idx].off = off;
2103 lkup_exts->field_mask[fv_word_idx] =
2104 rg_entry->fv_mask[i];
2105 if (prot == ICE_META_DATA_ID_HW &&
2106 off == ICE_TUN_FLAG_MDID_OFF)
2110 /* populate rg_list with the data from the child entry of this
2113 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2115 /* Propagate some data to the recipe database */
2116 recps[idx].is_root = !!is_root;
2117 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2118 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2119 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2120 recps[idx].chain_idx = root_bufs.content.result_indx &
2121 ~ICE_AQ_RECIPE_RESULT_EN;
2122 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2124 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2130 /* Only do the following for root recipes entries */
2131 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2132 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2133 recps[idx].root_rid = root_bufs.content.rid &
2134 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2135 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2138 /* Complete initialization of the root recipe entry */
2139 lkup_exts->n_val_words = fv_word_idx;
2140 recps[rid].big_recp = (num_recps > 1);
2141 recps[rid].n_grp_count = (u8)num_recps;
2142 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2143 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2144 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2145 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2146 if (!recps[rid].root_buf)
2149 /* Copy result indexes */
2150 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2151 recps[rid].recp_created = true;
2159 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2160 * @hw: pointer to hardware structure
2162 * This function is used to populate recipe_to_profile matrix where index to
2163 * this array is the recipe ID and the element is the mapping of which profiles
2164 * is this recipe mapped to.
2166 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2168 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2171 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2174 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2175 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2176 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2178 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2179 ICE_MAX_NUM_RECIPES);
2180 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2181 ice_set_bit(i, recipe_to_profile[j]);
2186 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2187 * @hw: pointer to the HW struct
2188 * @recp_list: pointer to sw recipe list
2190 * Allocate memory for the entire recipe table and initialize the structures/
2191 * entries corresponding to basic recipes.
2194 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2196 struct ice_sw_recipe *recps;
2199 recps = (struct ice_sw_recipe *)
2200 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2202 return ICE_ERR_NO_MEMORY;
2204 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2205 recps[i].root_rid = i;
2206 INIT_LIST_HEAD(&recps[i].filt_rules);
2207 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2208 INIT_LIST_HEAD(&recps[i].rg_list);
2209 ice_init_lock(&recps[i].filt_rule_lock);
2218 * ice_aq_get_sw_cfg - get switch configuration
2219 * @hw: pointer to the hardware structure
2220 * @buf: pointer to the result buffer
2221 * @buf_size: length of the buffer available for response
2222 * @req_desc: pointer to requested descriptor
2223 * @num_elems: pointer to number of elements
2224 * @cd: pointer to command details structure or NULL
2226 * Get switch configuration (0x0200) to be placed in buf.
2227 * This admin command returns information such as initial VSI/port number
2228 * and switch ID it belongs to.
2230 * NOTE: *req_desc is both an input/output parameter.
2231 * The caller of this function first calls this function with *request_desc set
2232 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2233 * configuration information has been returned; if non-zero (meaning not all
2234 * the information was returned), the caller should call this function again
2235 * with *req_desc set to the previous value returned by f/w to get the
2236 * next block of switch configuration information.
2238 * *num_elems is output only parameter. This reflects the number of elements
2239 * in response buffer. The caller of this function to use *num_elems while
2240 * parsing the response buffer.
2242 static enum ice_status
2243 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2244 u16 buf_size, u16 *req_desc, u16 *num_elems,
2245 struct ice_sq_cd *cd)
2247 struct ice_aqc_get_sw_cfg *cmd;
2248 struct ice_aq_desc desc;
2249 enum ice_status status;
2251 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2252 cmd = &desc.params.get_sw_conf;
2253 cmd->element = CPU_TO_LE16(*req_desc);
2255 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2257 *req_desc = LE16_TO_CPU(cmd->element);
2258 *num_elems = LE16_TO_CPU(cmd->num_elems);
2265 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2266 * @hw: pointer to the HW struct
2267 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2268 * @global_lut_id: output parameter for the RSS global LUT's ID
2270 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2272 struct ice_aqc_alloc_free_res_elem *sw_buf;
2273 enum ice_status status;
2276 buf_len = ice_struct_size(sw_buf, elem, 1);
2277 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2279 return ICE_ERR_NO_MEMORY;
2281 sw_buf->num_elems = CPU_TO_LE16(1);
2282 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2283 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2284 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2286 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2288 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2289 shared_res ? "shared" : "dedicated", status);
2290 goto ice_alloc_global_lut_exit;
2293 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2295 ice_alloc_global_lut_exit:
2296 ice_free(hw, sw_buf);
2301 * ice_free_global_lut - free a RSS global LUT
2302 * @hw: pointer to the HW struct
2303 * @global_lut_id: ID of the RSS global LUT to free
2305 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2307 struct ice_aqc_alloc_free_res_elem *sw_buf;
2308 u16 buf_len, num_elems = 1;
2309 enum ice_status status;
2311 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2312 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2314 return ICE_ERR_NO_MEMORY;
2316 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2317 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2318 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2320 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2322 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2323 global_lut_id, status);
2325 ice_free(hw, sw_buf);
2330 * ice_alloc_sw - allocate resources specific to switch
2331 * @hw: pointer to the HW struct
2332 * @ena_stats: true to turn on VEB stats
2333 * @shared_res: true for shared resource, false for dedicated resource
2334 * @sw_id: switch ID returned
2335 * @counter_id: VEB counter ID returned
2337 * allocates switch resources (SWID and VEB counter) (0x0208)
2340 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2343 struct ice_aqc_alloc_free_res_elem *sw_buf;
2344 struct ice_aqc_res_elem *sw_ele;
2345 enum ice_status status;
2348 buf_len = ice_struct_size(sw_buf, elem, 1);
2349 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2351 return ICE_ERR_NO_MEMORY;
2353 /* Prepare buffer for switch ID.
2354 * The number of resource entries in buffer is passed as 1 since only a
2355 * single switch/VEB instance is allocated, and hence a single sw_id
2358 sw_buf->num_elems = CPU_TO_LE16(1);
2360 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2361 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2362 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2364 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2365 ice_aqc_opc_alloc_res, NULL);
2368 goto ice_alloc_sw_exit;
2370 sw_ele = &sw_buf->elem[0];
2371 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2374 /* Prepare buffer for VEB Counter */
2375 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2376 struct ice_aqc_alloc_free_res_elem *counter_buf;
2377 struct ice_aqc_res_elem *counter_ele;
2379 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2380 ice_malloc(hw, buf_len);
2382 status = ICE_ERR_NO_MEMORY;
2383 goto ice_alloc_sw_exit;
2386 /* The number of resource entries in buffer is passed as 1 since
2387 * only a single switch/VEB instance is allocated, and hence a
2388 * single VEB counter is requested.
2390 counter_buf->num_elems = CPU_TO_LE16(1);
2391 counter_buf->res_type =
2392 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2393 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2394 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2398 ice_free(hw, counter_buf);
2399 goto ice_alloc_sw_exit;
2401 counter_ele = &counter_buf->elem[0];
2402 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2403 ice_free(hw, counter_buf);
2407 ice_free(hw, sw_buf);
2412 * ice_free_sw - free resources specific to switch
2413 * @hw: pointer to the HW struct
2414 * @sw_id: switch ID returned
2415 * @counter_id: VEB counter ID returned
2417 * free switch resources (SWID and VEB counter) (0x0209)
2419 * NOTE: This function frees multiple resources. It continues
2420 * releasing other resources even after it encounters error.
2421 * The error code returned is the last error it encountered.
2423 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2425 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2426 enum ice_status status, ret_status;
2429 buf_len = ice_struct_size(sw_buf, elem, 1);
2430 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2432 return ICE_ERR_NO_MEMORY;
2434 /* Prepare buffer to free for switch ID res.
2435 * The number of resource entries in buffer is passed as 1 since only a
2436 * single switch/VEB instance is freed, and hence a single sw_id
2439 sw_buf->num_elems = CPU_TO_LE16(1);
2440 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2441 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2443 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2444 ice_aqc_opc_free_res, NULL);
2447 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2449 /* Prepare buffer to free for VEB Counter resource */
2450 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2451 ice_malloc(hw, buf_len);
2453 ice_free(hw, sw_buf);
2454 return ICE_ERR_NO_MEMORY;
2457 /* The number of resource entries in buffer is passed as 1 since only a
2458 * single switch/VEB instance is freed, and hence a single VEB counter
2461 counter_buf->num_elems = CPU_TO_LE16(1);
2462 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2463 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2465 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2466 ice_aqc_opc_free_res, NULL);
2468 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2469 ret_status = status;
2472 ice_free(hw, counter_buf);
2473 ice_free(hw, sw_buf);
2479 * @hw: pointer to the HW struct
2480 * @vsi_ctx: pointer to a VSI context struct
2481 * @cd: pointer to command details structure or NULL
2483 * Add a VSI context to the hardware (0x0210)
2486 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2487 struct ice_sq_cd *cd)
2489 struct ice_aqc_add_update_free_vsi_resp *res;
2490 struct ice_aqc_add_get_update_free_vsi *cmd;
2491 struct ice_aq_desc desc;
2492 enum ice_status status;
2494 cmd = &desc.params.vsi_cmd;
2495 res = &desc.params.add_update_free_vsi_res;
2497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2499 if (!vsi_ctx->alloc_from_pool)
2500 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2501 ICE_AQ_VSI_IS_VALID);
2503 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2505 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2507 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2508 sizeof(vsi_ctx->info), cd);
2511 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2512 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2513 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2521 * @hw: pointer to the HW struct
2522 * @vsi_ctx: pointer to a VSI context struct
2523 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2524 * @cd: pointer to command details structure or NULL
2526 * Free VSI context info from hardware (0x0213)
2529 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2530 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2532 struct ice_aqc_add_update_free_vsi_resp *resp;
2533 struct ice_aqc_add_get_update_free_vsi *cmd;
2534 struct ice_aq_desc desc;
2535 enum ice_status status;
2537 cmd = &desc.params.vsi_cmd;
2538 resp = &desc.params.add_update_free_vsi_res;
2540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2542 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2544 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2546 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2548 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2549 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2557 * @hw: pointer to the HW struct
2558 * @vsi_ctx: pointer to a VSI context struct
2559 * @cd: pointer to command details structure or NULL
2561 * Update VSI context in the hardware (0x0211)
2564 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2565 struct ice_sq_cd *cd)
2567 struct ice_aqc_add_update_free_vsi_resp *resp;
2568 struct ice_aqc_add_get_update_free_vsi *cmd;
2569 struct ice_aq_desc desc;
2570 enum ice_status status;
2572 cmd = &desc.params.vsi_cmd;
2573 resp = &desc.params.add_update_free_vsi_res;
2575 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2577 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2579 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2581 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2582 sizeof(vsi_ctx->info), cd);
2585 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2586 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2593 * ice_is_vsi_valid - check whether the VSI is valid or not
2594 * @hw: pointer to the HW struct
2595 * @vsi_handle: VSI handle
2597 * check whether the VSI is valid or not
2599 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2601 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2605 * ice_get_hw_vsi_num - return the HW VSI number
2606 * @hw: pointer to the HW struct
2607 * @vsi_handle: VSI handle
2609 * return the HW VSI number
2610 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2612 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2614 return hw->vsi_ctx[vsi_handle]->vsi_num;
2618 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2619 * @hw: pointer to the HW struct
2620 * @vsi_handle: VSI handle
2622 * return the VSI context entry for a given VSI handle
2624 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2626 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2630 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2631 * @hw: pointer to the HW struct
2632 * @vsi_handle: VSI handle
2633 * @vsi: VSI context pointer
2635 * save the VSI context entry for a given VSI handle
2638 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2640 hw->vsi_ctx[vsi_handle] = vsi;
2644 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2645 * @hw: pointer to the HW struct
2646 * @vsi_handle: VSI handle
2648 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2650 struct ice_vsi_ctx *vsi;
2653 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2656 ice_for_each_traffic_class(i) {
2657 if (vsi->lan_q_ctx[i]) {
2658 ice_free(hw, vsi->lan_q_ctx[i]);
2659 vsi->lan_q_ctx[i] = NULL;
2665 * ice_clear_vsi_ctx - clear the VSI context entry
2666 * @hw: pointer to the HW struct
2667 * @vsi_handle: VSI handle
2669 * clear the VSI context entry
2671 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2673 struct ice_vsi_ctx *vsi;
2675 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2677 ice_clear_vsi_q_ctx(hw, vsi_handle);
2679 hw->vsi_ctx[vsi_handle] = NULL;
2684 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2685 * @hw: pointer to the HW struct
2687 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2691 for (i = 0; i < ICE_MAX_VSI; i++)
2692 ice_clear_vsi_ctx(hw, i);
2696 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2697 * @hw: pointer to the HW struct
2698 * @vsi_handle: unique VSI handle provided by drivers
2699 * @vsi_ctx: pointer to a VSI context struct
2700 * @cd: pointer to command details structure or NULL
2702 * Add a VSI context to the hardware also add it into the VSI handle list.
2703 * If this function gets called after reset for existing VSIs then update
2704 * with the new HW VSI number in the corresponding VSI handle list entry.
2707 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2708 struct ice_sq_cd *cd)
2710 struct ice_vsi_ctx *tmp_vsi_ctx;
2711 enum ice_status status;
2713 if (vsi_handle >= ICE_MAX_VSI)
2714 return ICE_ERR_PARAM;
2715 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2718 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2720 /* Create a new VSI context */
2721 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2722 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2724 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2725 return ICE_ERR_NO_MEMORY;
2727 *tmp_vsi_ctx = *vsi_ctx;
2729 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2731 /* update with new HW VSI num */
2732 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2739 * ice_free_vsi- free VSI context from hardware and VSI handle list
2740 * @hw: pointer to the HW struct
2741 * @vsi_handle: unique VSI handle
2742 * @vsi_ctx: pointer to a VSI context struct
2743 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2744 * @cd: pointer to command details structure or NULL
2746 * Free VSI context info from hardware as well as from VSI handle list
2749 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2750 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2752 enum ice_status status;
2754 if (!ice_is_vsi_valid(hw, vsi_handle))
2755 return ICE_ERR_PARAM;
2756 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2757 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2759 ice_clear_vsi_ctx(hw, vsi_handle);
2765 * @hw: pointer to the HW struct
2766 * @vsi_handle: unique VSI handle
2767 * @vsi_ctx: pointer to a VSI context struct
2768 * @cd: pointer to command details structure or NULL
2770 * Update VSI context in the hardware
2773 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2774 struct ice_sq_cd *cd)
2776 if (!ice_is_vsi_valid(hw, vsi_handle))
2777 return ICE_ERR_PARAM;
2778 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2779 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2783 * ice_aq_get_vsi_params
2784 * @hw: pointer to the HW struct
2785 * @vsi_ctx: pointer to a VSI context struct
2786 * @cd: pointer to command details structure or NULL
2788 * Get VSI context info from hardware (0x0212)
2791 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2792 struct ice_sq_cd *cd)
2794 struct ice_aqc_add_get_update_free_vsi *cmd;
2795 struct ice_aqc_get_vsi_resp *resp;
2796 struct ice_aq_desc desc;
2797 enum ice_status status;
2799 cmd = &desc.params.vsi_cmd;
2800 resp = &desc.params.get_vsi_resp;
2802 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2804 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2806 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2807 sizeof(vsi_ctx->info), cd);
2809 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2811 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2812 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2819 * ice_aq_add_update_mir_rule - add/update a mirror rule
2820 * @hw: pointer to the HW struct
2821 * @rule_type: Rule Type
2822 * @dest_vsi: VSI number to which packets will be mirrored
2823 * @count: length of the list
2824 * @mr_buf: buffer for list of mirrored VSI numbers
2825 * @cd: pointer to command details structure or NULL
2828 * Add/Update Mirror Rule (0x260).
2831 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2832 u16 count, struct ice_mir_rule_buf *mr_buf,
2833 struct ice_sq_cd *cd, u16 *rule_id)
2835 struct ice_aqc_add_update_mir_rule *cmd;
2836 struct ice_aq_desc desc;
2837 enum ice_status status;
2838 __le16 *mr_list = NULL;
2841 switch (rule_type) {
2842 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2843 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2844 /* Make sure count and mr_buf are set for these rule_types */
2845 if (!(count && mr_buf))
2846 return ICE_ERR_PARAM;
2848 buf_size = count * sizeof(__le16);
2849 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2851 return ICE_ERR_NO_MEMORY;
2853 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2854 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2855 /* Make sure count and mr_buf are not set for these
2858 if (count || mr_buf)
2859 return ICE_ERR_PARAM;
2862 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2863 return ICE_ERR_OUT_OF_RANGE;
2866 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2868 /* Pre-process 'mr_buf' items for add/update of virtual port
2869 * ingress/egress mirroring (but not physical port ingress/egress
2875 for (i = 0; i < count; i++) {
2878 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2880 /* Validate specified VSI number, make sure it is less
2881 * than ICE_MAX_VSI, if not return with error.
2883 if (id >= ICE_MAX_VSI) {
2884 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2886 ice_free(hw, mr_list);
2887 return ICE_ERR_OUT_OF_RANGE;
2890 /* add VSI to mirror rule */
2893 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2894 else /* remove VSI from mirror rule */
2895 mr_list[i] = CPU_TO_LE16(id);
2899 cmd = &desc.params.add_update_rule;
2900 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2901 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2902 ICE_AQC_RULE_ID_VALID_M);
2903 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2904 cmd->num_entries = CPU_TO_LE16(count);
2905 cmd->dest = CPU_TO_LE16(dest_vsi);
2907 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2909 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2911 ice_free(hw, mr_list);
2917 * ice_aq_delete_mir_rule - delete a mirror rule
2918 * @hw: pointer to the HW struct
2919 * @rule_id: Mirror rule ID (to be deleted)
2920 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2921 * otherwise it is returned to the shared pool
2922 * @cd: pointer to command details structure or NULL
2924 * Delete Mirror Rule (0x261).
2927 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2928 struct ice_sq_cd *cd)
2930 struct ice_aqc_delete_mir_rule *cmd;
2931 struct ice_aq_desc desc;
2933 /* rule_id should be in the range 0...63 */
2934 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2935 return ICE_ERR_OUT_OF_RANGE;
2937 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2939 cmd = &desc.params.del_rule;
2940 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2941 cmd->rule_id = CPU_TO_LE16(rule_id);
2944 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2946 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2950 * ice_aq_alloc_free_vsi_list
2951 * @hw: pointer to the HW struct
2952 * @vsi_list_id: VSI list ID returned or used for lookup
2953 * @lkup_type: switch rule filter lookup type
2954 * @opc: switch rules population command type - pass in the command opcode
2956 * allocates or free a VSI list resource
2958 static enum ice_status
2959 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2960 enum ice_sw_lkup_type lkup_type,
2961 enum ice_adminq_opc opc)
2963 struct ice_aqc_alloc_free_res_elem *sw_buf;
2964 struct ice_aqc_res_elem *vsi_ele;
2965 enum ice_status status;
2968 buf_len = ice_struct_size(sw_buf, elem, 1);
2969 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2971 return ICE_ERR_NO_MEMORY;
2972 sw_buf->num_elems = CPU_TO_LE16(1);
2974 if (lkup_type == ICE_SW_LKUP_MAC ||
2975 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2976 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2977 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2978 lkup_type == ICE_SW_LKUP_PROMISC ||
2979 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2980 lkup_type == ICE_SW_LKUP_LAST) {
2981 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2982 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2984 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2986 status = ICE_ERR_PARAM;
2987 goto ice_aq_alloc_free_vsi_list_exit;
2990 if (opc == ice_aqc_opc_free_res)
2991 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2993 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2995 goto ice_aq_alloc_free_vsi_list_exit;
2997 if (opc == ice_aqc_opc_alloc_res) {
2998 vsi_ele = &sw_buf->elem[0];
2999 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3002 ice_aq_alloc_free_vsi_list_exit:
3003 ice_free(hw, sw_buf);
3008 * ice_aq_set_storm_ctrl - Sets storm control configuration
3009 * @hw: pointer to the HW struct
3010 * @bcast_thresh: represents the upper threshold for broadcast storm control
3011 * @mcast_thresh: represents the upper threshold for multicast storm control
3012 * @ctl_bitmask: storm control knobs
3014 * Sets the storm control configuration (0x0280)
3017 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3020 struct ice_aqc_storm_cfg *cmd;
3021 struct ice_aq_desc desc;
3023 cmd = &desc.params.storm_conf;
3025 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3027 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3028 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3029 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3031 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3035 * ice_aq_get_storm_ctrl - gets storm control configuration
3036 * @hw: pointer to the HW struct
3037 * @bcast_thresh: represents the upper threshold for broadcast storm control
3038 * @mcast_thresh: represents the upper threshold for multicast storm control
3039 * @ctl_bitmask: storm control knobs
3041 * Gets the storm control configuration (0x0281)
3044 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3047 enum ice_status status;
3048 struct ice_aq_desc desc;
3050 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3052 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3054 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3057 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3060 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3063 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3070 * ice_aq_sw_rules - add/update/remove switch rules
3071 * @hw: pointer to the HW struct
3072 * @rule_list: pointer to switch rule population list
3073 * @rule_list_sz: total size of the rule list in bytes
3074 * @num_rules: number of switch rules in the rule_list
3075 * @opc: switch rules population command type - pass in the command opcode
3076 * @cd: pointer to command details structure or NULL
3078 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3080 static enum ice_status
3081 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3082 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3084 struct ice_aq_desc desc;
3085 enum ice_status status;
3087 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3089 if (opc != ice_aqc_opc_add_sw_rules &&
3090 opc != ice_aqc_opc_update_sw_rules &&
3091 opc != ice_aqc_opc_remove_sw_rules)
3092 return ICE_ERR_PARAM;
3094 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3096 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3097 desc.params.sw_rules.num_rules_fltr_entry_index =
3098 CPU_TO_LE16(num_rules);
3099 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3100 if (opc != ice_aqc_opc_add_sw_rules &&
3101 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3102 status = ICE_ERR_DOES_NOT_EXIST;
3108 * ice_aq_add_recipe - add switch recipe
3109 * @hw: pointer to the HW struct
3110 * @s_recipe_list: pointer to switch rule population list
3111 * @num_recipes: number of switch recipes in the list
3112 * @cd: pointer to command details structure or NULL
3117 ice_aq_add_recipe(struct ice_hw *hw,
3118 struct ice_aqc_recipe_data_elem *s_recipe_list,
3119 u16 num_recipes, struct ice_sq_cd *cd)
3121 struct ice_aqc_add_get_recipe *cmd;
3122 struct ice_aq_desc desc;
3125 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3126 cmd = &desc.params.add_get_recipe;
3127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3129 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3130 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3132 buf_size = num_recipes * sizeof(*s_recipe_list);
3134 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3138 * ice_aq_get_recipe - get switch recipe
3139 * @hw: pointer to the HW struct
3140 * @s_recipe_list: pointer to switch rule population list
3141 * @num_recipes: pointer to the number of recipes (input and output)
3142 * @recipe_root: root recipe number of recipe(s) to retrieve
3143 * @cd: pointer to command details structure or NULL
3147 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3148 * On output, *num_recipes will equal the number of entries returned in
3151 * The caller must supply enough space in s_recipe_list to hold all possible
3152 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3155 ice_aq_get_recipe(struct ice_hw *hw,
3156 struct ice_aqc_recipe_data_elem *s_recipe_list,
3157 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3159 struct ice_aqc_add_get_recipe *cmd;
3160 struct ice_aq_desc desc;
3161 enum ice_status status;
3164 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3165 return ICE_ERR_PARAM;
3167 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3168 cmd = &desc.params.add_get_recipe;
3169 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3171 cmd->return_index = CPU_TO_LE16(recipe_root);
3172 cmd->num_sub_recipes = 0;
3174 buf_size = *num_recipes * sizeof(*s_recipe_list);
3176 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3177 /* cppcheck-suppress constArgument */
3178 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3184 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3185 * @hw: pointer to the HW struct
3186 * @params: parameters used to update the default recipe
3188 * This function only supports updating default recipes and it only supports
3189 * updating a single recipe based on the lkup_idx at a time.
3191 * This is done as a read-modify-write operation. First, get the current recipe
3192 * contents based on the recipe's ID. Then modify the field vector index and
3193 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3194 * the pre-existing recipe with the modifications.
3197 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3198 struct ice_update_recipe_lkup_idx_params *params)
3200 struct ice_aqc_recipe_data_elem *rcp_list;
3201 u16 num_recps = ICE_MAX_NUM_RECIPES;
3202 enum ice_status status;
3204 rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3206 return ICE_ERR_NO_MEMORY;
3208 /* read current recipe list from firmware */
3209 rcp_list->recipe_indx = params->rid;
3210 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3212 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3213 params->rid, status);
3217 /* only modify existing recipe's lkup_idx and mask if valid, while
3218 * leaving all other fields the same, then update the recipe firmware
3220 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3221 if (params->mask_valid)
3222 rcp_list->content.mask[params->lkup_idx] =
3223 CPU_TO_LE16(params->mask);
3225 if (params->ignore_valid)
3226 rcp_list->content.lkup_indx[params->lkup_idx] |=
3227 ICE_AQ_RECIPE_LKUP_IGNORE;
3229 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3231 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3232 params->rid, params->lkup_idx, params->fv_idx,
3233 params->mask, params->mask_valid ? "true" : "false",
3237 ice_free(hw, rcp_list);
3242 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3243 * @hw: pointer to the HW struct
3244 * @profile_id: package profile ID to associate the recipe with
3245 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3246 * @cd: pointer to command details structure or NULL
3247 * Recipe to profile association (0x0291)
3250 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3251 struct ice_sq_cd *cd)
3253 struct ice_aqc_recipe_to_profile *cmd;
3254 struct ice_aq_desc desc;
3256 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3257 cmd = &desc.params.recipe_to_profile;
3258 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3259 cmd->profile_id = CPU_TO_LE16(profile_id);
3260 /* Set the recipe ID bit in the bitmask to let the device know which
3261 * profile we are associating the recipe to
3263 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3264 ICE_NONDMA_TO_NONDMA);
3266 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3270 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3271 * @hw: pointer to the HW struct
3272 * @profile_id: package profile ID to associate the recipe with
3273 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3274 * @cd: pointer to command details structure or NULL
3275 * Associate profile ID with given recipe (0x0293)
3278 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3279 struct ice_sq_cd *cd)
3281 struct ice_aqc_recipe_to_profile *cmd;
3282 struct ice_aq_desc desc;
3283 enum ice_status status;
3285 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3286 cmd = &desc.params.recipe_to_profile;
3287 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3288 cmd->profile_id = CPU_TO_LE16(profile_id);
3290 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3292 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3293 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3299 * ice_alloc_recipe - add recipe resource
3300 * @hw: pointer to the hardware structure
3301 * @rid: recipe ID returned as response to AQ call
3303 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3305 struct ice_aqc_alloc_free_res_elem *sw_buf;
3306 enum ice_status status;
3309 buf_len = ice_struct_size(sw_buf, elem, 1);
3310 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3312 return ICE_ERR_NO_MEMORY;
3314 sw_buf->num_elems = CPU_TO_LE16(1);
3315 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3316 ICE_AQC_RES_TYPE_S) |
3317 ICE_AQC_RES_TYPE_FLAG_SHARED);
3318 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3319 ice_aqc_opc_alloc_res, NULL);
3321 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3322 ice_free(hw, sw_buf);
3327 /* ice_init_port_info - Initialize port_info with switch configuration data
3328 * @pi: pointer to port_info
3329 * @vsi_port_num: VSI number or port number
3330 * @type: Type of switch element (port or VSI)
3331 * @swid: switch ID of the switch the element is attached to
3332 * @pf_vf_num: PF or VF number
3333 * @is_vf: true if the element is a VF, false otherwise
3336 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3337 u16 swid, u16 pf_vf_num, bool is_vf)
3340 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3341 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3343 pi->pf_vf_num = pf_vf_num;
3345 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3346 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3349 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3354 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3355 * @hw: pointer to the hardware structure
3357 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3359 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3360 enum ice_status status;
3367 num_total_ports = 1;
3369 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3370 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3373 return ICE_ERR_NO_MEMORY;
3375 /* Multiple calls to ice_aq_get_sw_cfg may be required
3376 * to get all the switch configuration information. The need
3377 * for additional calls is indicated by ice_aq_get_sw_cfg
3378 * writing a non-zero value in req_desc
3381 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3383 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3384 &req_desc, &num_elems, NULL);
3389 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3390 u16 pf_vf_num, swid, vsi_port_num;
3394 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3395 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3397 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3398 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3400 swid = LE16_TO_CPU(ele->swid);
3402 if (LE16_TO_CPU(ele->pf_vf_num) &
3403 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3406 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3407 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3410 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3411 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3412 if (j == num_total_ports) {
3413 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3414 status = ICE_ERR_CFG;
3417 ice_init_port_info(hw->port_info,
3418 vsi_port_num, res_type, swid,
3426 } while (req_desc && !status);
3434 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3435 * @hw: pointer to the hardware structure
3436 * @fi: filter info structure to fill/update
3438 * This helper function populates the lb_en and lan_en elements of the provided
3439 * ice_fltr_info struct using the switch's type and characteristics of the
3440 * switch rule being configured.
3442 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3444 if ((fi->flag & ICE_FLTR_RX) &&
3445 (fi->fltr_act == ICE_FWD_TO_VSI ||
3446 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3447 fi->lkup_type == ICE_SW_LKUP_LAST)
3451 if ((fi->flag & ICE_FLTR_TX) &&
3452 (fi->fltr_act == ICE_FWD_TO_VSI ||
3453 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3454 fi->fltr_act == ICE_FWD_TO_Q ||
3455 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3456 /* Setting LB for prune actions will result in replicated
3457 * packets to the internal switch that will be dropped.
3459 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3462 /* Set lan_en to TRUE if
3463 * 1. The switch is a VEB AND
3465 * 2.1 The lookup is a directional lookup like ethertype,
3466 * promiscuous, ethertype-MAC, promiscuous-VLAN
3467 * and default-port OR
3468 * 2.2 The lookup is VLAN, OR
3469 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3470 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3474 * The switch is a VEPA.
3476 * In all other cases, the LAN enable has to be set to false.
3479 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3480 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3481 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3482 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3483 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3484 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3485 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3486 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3487 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3488 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3497 * ice_fill_sw_rule - Helper function to fill switch rule structure
3498 * @hw: pointer to the hardware structure
3499 * @f_info: entry containing packet forwarding information
3500 * @s_rule: switch rule structure to be filled in based on mac_entry
3501 * @opc: switch rules population command type - pass in the command opcode
3504 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3505 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3507 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3508 u16 vlan_tpid = ICE_ETH_P_8021Q;
3516 if (opc == ice_aqc_opc_remove_sw_rules) {
3517 s_rule->pdata.lkup_tx_rx.act = 0;
3518 s_rule->pdata.lkup_tx_rx.index =
3519 CPU_TO_LE16(f_info->fltr_rule_id);
3520 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3524 eth_hdr_sz = sizeof(dummy_eth_header);
3525 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3527 /* initialize the ether header with a dummy header */
3528 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3529 ice_fill_sw_info(hw, f_info);
3531 switch (f_info->fltr_act) {
3532 case ICE_FWD_TO_VSI:
3533 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3534 ICE_SINGLE_ACT_VSI_ID_M;
3535 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3536 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3537 ICE_SINGLE_ACT_VALID_BIT;
3539 case ICE_FWD_TO_VSI_LIST:
3540 act |= ICE_SINGLE_ACT_VSI_LIST;
3541 act |= (f_info->fwd_id.vsi_list_id <<
3542 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3543 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3544 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3545 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3546 ICE_SINGLE_ACT_VALID_BIT;
3549 act |= ICE_SINGLE_ACT_TO_Q;
3550 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3551 ICE_SINGLE_ACT_Q_INDEX_M;
3553 case ICE_DROP_PACKET:
3554 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3555 ICE_SINGLE_ACT_VALID_BIT;
3557 case ICE_FWD_TO_QGRP:
3558 q_rgn = f_info->qgrp_size > 0 ?
3559 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3560 act |= ICE_SINGLE_ACT_TO_Q;
3561 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3562 ICE_SINGLE_ACT_Q_INDEX_M;
3563 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3564 ICE_SINGLE_ACT_Q_REGION_M;
3571 act |= ICE_SINGLE_ACT_LB_ENABLE;
3573 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3575 switch (f_info->lkup_type) {
3576 case ICE_SW_LKUP_MAC:
3577 daddr = f_info->l_data.mac.mac_addr;
3579 case ICE_SW_LKUP_VLAN:
3580 vlan_id = f_info->l_data.vlan.vlan_id;
3581 if (f_info->l_data.vlan.tpid_valid)
3582 vlan_tpid = f_info->l_data.vlan.tpid;
3583 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3584 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3585 act |= ICE_SINGLE_ACT_PRUNE;
3586 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3589 case ICE_SW_LKUP_ETHERTYPE_MAC:
3590 daddr = f_info->l_data.ethertype_mac.mac_addr;
3592 case ICE_SW_LKUP_ETHERTYPE:
3593 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3594 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3596 case ICE_SW_LKUP_MAC_VLAN:
3597 daddr = f_info->l_data.mac_vlan.mac_addr;
3598 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3600 case ICE_SW_LKUP_PROMISC_VLAN:
3601 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3603 case ICE_SW_LKUP_PROMISC:
3604 daddr = f_info->l_data.mac_vlan.mac_addr;
3610 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3611 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3612 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3614 /* Recipe set depending on lookup type */
3615 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3616 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3617 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3620 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3621 ICE_NONDMA_TO_NONDMA);
3623 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3624 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3625 *off = CPU_TO_BE16(vlan_id);
3626 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3627 *off = CPU_TO_BE16(vlan_tpid);
3630 /* Create the switch rule with the final dummy Ethernet header */
3631 if (opc != ice_aqc_opc_update_sw_rules)
3632 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3636 * ice_add_marker_act
3637 * @hw: pointer to the hardware structure
3638 * @m_ent: the management entry for which sw marker needs to be added
3639 * @sw_marker: sw marker to tag the Rx descriptor with
3640 * @l_id: large action resource ID
3642 * Create a large action to hold software marker and update the switch rule
3643 * entry pointed by m_ent with newly created large action
3645 static enum ice_status
3646 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3647 u16 sw_marker, u16 l_id)
3649 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3650 /* For software marker we need 3 large actions
3651 * 1. FWD action: FWD TO VSI or VSI LIST
3652 * 2. GENERIC VALUE action to hold the profile ID
3653 * 3. GENERIC VALUE action to hold the software marker ID
3655 const u16 num_lg_acts = 3;
3656 enum ice_status status;
3662 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3663 return ICE_ERR_PARAM;
3665 /* Create two back-to-back switch rules and submit them to the HW using
3666 * one memory buffer:
3670 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3671 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3672 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3674 return ICE_ERR_NO_MEMORY;
3676 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3678 /* Fill in the first switch rule i.e. large action */
3679 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3680 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3681 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3683 /* First action VSI forwarding or VSI list forwarding depending on how
3686 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3687 m_ent->fltr_info.fwd_id.hw_vsi_id;
3689 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3690 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3691 if (m_ent->vsi_count > 1)
3692 act |= ICE_LG_ACT_VSI_LIST;
3693 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3695 /* Second action descriptor type */
3696 act = ICE_LG_ACT_GENERIC;
3698 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3699 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3701 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3702 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3704 /* Third action Marker value */
3705 act |= ICE_LG_ACT_GENERIC;
3706 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3707 ICE_LG_ACT_GENERIC_VALUE_M;
3709 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3711 /* call the fill switch rule to fill the lookup Tx Rx structure */
3712 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3713 ice_aqc_opc_update_sw_rules);
3715 /* Update the action to point to the large action ID */
3716 rx_tx->pdata.lkup_tx_rx.act =
3717 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3718 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3719 ICE_SINGLE_ACT_PTR_VAL_M));
3721 /* Use the filter rule ID of the previously created rule with single
3722 * act. Once the update happens, hardware will treat this as large
3725 rx_tx->pdata.lkup_tx_rx.index =
3726 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3728 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3729 ice_aqc_opc_update_sw_rules, NULL);
3731 m_ent->lg_act_idx = l_id;
3732 m_ent->sw_marker_id = sw_marker;
3735 ice_free(hw, lg_act);
3740 * ice_add_counter_act - add/update filter rule with counter action
3741 * @hw: pointer to the hardware structure
3742 * @m_ent: the management entry for which counter needs to be added
3743 * @counter_id: VLAN counter ID returned as part of allocate resource
3744 * @l_id: large action resource ID
3746 static enum ice_status
3747 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3748 u16 counter_id, u16 l_id)
3750 struct ice_aqc_sw_rules_elem *lg_act;
3751 struct ice_aqc_sw_rules_elem *rx_tx;
3752 enum ice_status status;
3753 /* 2 actions will be added while adding a large action counter */
3754 const int num_acts = 2;
3761 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3762 return ICE_ERR_PARAM;
3764 /* Create two back-to-back switch rules and submit them to the HW using
3765 * one memory buffer:
3769 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3770 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3771 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3773 return ICE_ERR_NO_MEMORY;
3775 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3777 /* Fill in the first switch rule i.e. large action */
3778 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3779 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3780 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3782 /* First action VSI forwarding or VSI list forwarding depending on how
3785 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3786 m_ent->fltr_info.fwd_id.hw_vsi_id;
3788 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3789 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3790 ICE_LG_ACT_VSI_LIST_ID_M;
3791 if (m_ent->vsi_count > 1)
3792 act |= ICE_LG_ACT_VSI_LIST;
3793 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3795 /* Second action counter ID */
3796 act = ICE_LG_ACT_STAT_COUNT;
3797 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3798 ICE_LG_ACT_STAT_COUNT_M;
3799 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3801 /* call the fill switch rule to fill the lookup Tx Rx structure */
3802 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3803 ice_aqc_opc_update_sw_rules);
3805 act = ICE_SINGLE_ACT_PTR;
3806 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3807 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3809 /* Use the filter rule ID of the previously created rule with single
3810 * act. Once the update happens, hardware will treat this as large
3813 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3814 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3816 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3817 ice_aqc_opc_update_sw_rules, NULL);
3819 m_ent->lg_act_idx = l_id;
3820 m_ent->counter_index = counter_id;
3823 ice_free(hw, lg_act);
3828 * ice_create_vsi_list_map
3829 * @hw: pointer to the hardware structure
3830 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3831 * @num_vsi: number of VSI handles in the array
3832 * @vsi_list_id: VSI list ID generated as part of allocate resource
3834 * Helper function to create a new entry of VSI list ID to VSI mapping
3835 * using the given VSI list ID
3837 static struct ice_vsi_list_map_info *
3838 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3841 struct ice_switch_info *sw = hw->switch_info;
3842 struct ice_vsi_list_map_info *v_map;
3845 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3849 v_map->vsi_list_id = vsi_list_id;
3851 for (i = 0; i < num_vsi; i++)
3852 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3854 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3859 * ice_update_vsi_list_rule
3860 * @hw: pointer to the hardware structure
3861 * @vsi_handle_arr: array of VSI handles to form a VSI list
3862 * @num_vsi: number of VSI handles in the array
3863 * @vsi_list_id: VSI list ID generated as part of allocate resource
3864 * @remove: Boolean value to indicate if this is a remove action
3865 * @opc: switch rules population command type - pass in the command opcode
3866 * @lkup_type: lookup type of the filter
3868 * Call AQ command to add a new switch rule or update existing switch rule
3869 * using the given VSI list ID
3871 static enum ice_status
3872 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3873 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3874 enum ice_sw_lkup_type lkup_type)
3876 struct ice_aqc_sw_rules_elem *s_rule;
3877 enum ice_status status;
3883 return ICE_ERR_PARAM;
3885 if (lkup_type == ICE_SW_LKUP_MAC ||
3886 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3887 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3888 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3889 lkup_type == ICE_SW_LKUP_PROMISC ||
3890 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3891 lkup_type == ICE_SW_LKUP_LAST)
3892 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3893 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3894 else if (lkup_type == ICE_SW_LKUP_VLAN)
3895 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3896 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3898 return ICE_ERR_PARAM;
3900 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3901 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3903 return ICE_ERR_NO_MEMORY;
3904 for (i = 0; i < num_vsi; i++) {
3905 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3906 status = ICE_ERR_PARAM;
3909 /* AQ call requires hw_vsi_id(s) */
3910 s_rule->pdata.vsi_list.vsi[i] =
3911 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3914 s_rule->type = CPU_TO_LE16(rule_type);
3915 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3916 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3918 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3921 ice_free(hw, s_rule);
3926 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3927 * @hw: pointer to the HW struct
3928 * @vsi_handle_arr: array of VSI handles to form a VSI list
3929 * @num_vsi: number of VSI handles in the array
3930 * @vsi_list_id: stores the ID of the VSI list to be created
3931 * @lkup_type: switch rule filter's lookup type
3933 static enum ice_status
3934 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3935 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3937 enum ice_status status;
3939 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3940 ice_aqc_opc_alloc_res);
3944 /* Update the newly created VSI list to include the specified VSIs */
3945 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3946 *vsi_list_id, false,
3947 ice_aqc_opc_add_sw_rules, lkup_type);
3951 * ice_create_pkt_fwd_rule
3952 * @hw: pointer to the hardware structure
3953 * @recp_list: corresponding filter management list
3954 * @f_entry: entry containing packet forwarding information
3956 * Create switch rule with given filter information and add an entry
3957 * to the corresponding filter management list to track this switch rule
3960 static enum ice_status
3961 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3962 struct ice_fltr_list_entry *f_entry)
3964 struct ice_fltr_mgmt_list_entry *fm_entry;
3965 struct ice_aqc_sw_rules_elem *s_rule;
3966 enum ice_status status;
3968 s_rule = (struct ice_aqc_sw_rules_elem *)
3969 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3971 return ICE_ERR_NO_MEMORY;
3972 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3973 ice_malloc(hw, sizeof(*fm_entry));
3975 status = ICE_ERR_NO_MEMORY;
3976 goto ice_create_pkt_fwd_rule_exit;
3979 fm_entry->fltr_info = f_entry->fltr_info;
3981 /* Initialize all the fields for the management entry */
3982 fm_entry->vsi_count = 1;
3983 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3984 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3985 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3987 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3988 ice_aqc_opc_add_sw_rules);
3990 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3991 ice_aqc_opc_add_sw_rules, NULL);
3993 ice_free(hw, fm_entry);
3994 goto ice_create_pkt_fwd_rule_exit;
3997 f_entry->fltr_info.fltr_rule_id =
3998 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3999 fm_entry->fltr_info.fltr_rule_id =
4000 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4002 /* The book keeping entries will get removed when base driver
4003 * calls remove filter AQ command
4005 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4007 ice_create_pkt_fwd_rule_exit:
4008 ice_free(hw, s_rule);
4013 * ice_update_pkt_fwd_rule
4014 * @hw: pointer to the hardware structure
4015 * @f_info: filter information for switch rule
4017 * Call AQ command to update a previously created switch rule with a
4020 static enum ice_status
4021 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4023 struct ice_aqc_sw_rules_elem *s_rule;
4024 enum ice_status status;
4026 s_rule = (struct ice_aqc_sw_rules_elem *)
4027 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4029 return ICE_ERR_NO_MEMORY;
4031 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4033 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4035 /* Update switch rule with new rule set to forward VSI list */
4036 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4037 ice_aqc_opc_update_sw_rules, NULL);
4039 ice_free(hw, s_rule);
4044 * ice_update_sw_rule_bridge_mode
4045 * @hw: pointer to the HW struct
4047 * Updates unicast switch filter rules based on VEB/VEPA mode
4049 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4051 struct ice_switch_info *sw = hw->switch_info;
4052 struct ice_fltr_mgmt_list_entry *fm_entry;
4053 enum ice_status status = ICE_SUCCESS;
4054 struct LIST_HEAD_TYPE *rule_head;
4055 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4057 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4058 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4060 ice_acquire_lock(rule_lock);
4061 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4063 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4064 u8 *addr = fi->l_data.mac.mac_addr;
4066 /* Update unicast Tx rules to reflect the selected
4069 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4070 (fi->fltr_act == ICE_FWD_TO_VSI ||
4071 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4072 fi->fltr_act == ICE_FWD_TO_Q ||
4073 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4074 status = ice_update_pkt_fwd_rule(hw, fi);
4080 ice_release_lock(rule_lock);
4086 * ice_add_update_vsi_list
4087 * @hw: pointer to the hardware structure
4088 * @m_entry: pointer to current filter management list entry
4089 * @cur_fltr: filter information from the book keeping entry
4090 * @new_fltr: filter information with the new VSI to be added
4092 * Call AQ command to add or update previously created VSI list with new VSI.
4094 * Helper function to do book keeping associated with adding filter information
4095 * The algorithm to do the book keeping is described below :
4096 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4097 * if only one VSI has been added till now
4098 * Allocate a new VSI list and add two VSIs
4099 * to this list using switch rule command
4100 * Update the previously created switch rule with the
4101 * newly created VSI list ID
4102 * if a VSI list was previously created
4103 * Add the new VSI to the previously created VSI list set
4104 * using the update switch rule command
4106 static enum ice_status
4107 ice_add_update_vsi_list(struct ice_hw *hw,
4108 struct ice_fltr_mgmt_list_entry *m_entry,
4109 struct ice_fltr_info *cur_fltr,
4110 struct ice_fltr_info *new_fltr)
4112 enum ice_status status = ICE_SUCCESS;
4113 u16 vsi_list_id = 0;
4115 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4116 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4117 return ICE_ERR_NOT_IMPL;
4119 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4120 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4121 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4122 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4123 return ICE_ERR_NOT_IMPL;
4125 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4126 /* Only one entry existed in the mapping and it was not already
4127 * a part of a VSI list. So, create a VSI list with the old and
4130 struct ice_fltr_info tmp_fltr;
4131 u16 vsi_handle_arr[2];
4133 /* A rule already exists with the new VSI being added */
4134 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4135 return ICE_ERR_ALREADY_EXISTS;
4137 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4138 vsi_handle_arr[1] = new_fltr->vsi_handle;
4139 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4141 new_fltr->lkup_type);
4145 tmp_fltr = *new_fltr;
4146 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4147 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4148 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4149 /* Update the previous switch rule of "MAC forward to VSI" to
4150 * "MAC fwd to VSI list"
4152 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4156 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4157 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4158 m_entry->vsi_list_info =
4159 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4162 if (!m_entry->vsi_list_info)
4163 return ICE_ERR_NO_MEMORY;
4165 /* If this entry was large action then the large action needs
4166 * to be updated to point to FWD to VSI list
4168 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4170 ice_add_marker_act(hw, m_entry,
4171 m_entry->sw_marker_id,
4172 m_entry->lg_act_idx);
4174 u16 vsi_handle = new_fltr->vsi_handle;
4175 enum ice_adminq_opc opcode;
4177 if (!m_entry->vsi_list_info)
4180 /* A rule already exists with the new VSI being added */
4181 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4184 /* Update the previously created VSI list set with
4185 * the new VSI ID passed in
4187 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4188 opcode = ice_aqc_opc_update_sw_rules;
4190 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4191 vsi_list_id, false, opcode,
4192 new_fltr->lkup_type);
4193 /* update VSI list mapping info with new VSI ID */
4195 ice_set_bit(vsi_handle,
4196 m_entry->vsi_list_info->vsi_map);
4199 m_entry->vsi_count++;
4204 * ice_find_rule_entry - Search a rule entry
4205 * @list_head: head of rule list
4206 * @f_info: rule information
4208 * Helper function to search for a given rule entry
4209 * Returns pointer to entry storing the rule if found
4211 static struct ice_fltr_mgmt_list_entry *
4212 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4213 struct ice_fltr_info *f_info)
4215 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4217 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4219 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4220 sizeof(f_info->l_data)) &&
4221 f_info->flag == list_itr->fltr_info.flag) {
4230 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4231 * @recp_list: VSI lists needs to be searched
4232 * @vsi_handle: VSI handle to be found in VSI list
4233 * @vsi_list_id: VSI list ID found containing vsi_handle
4235 * Helper function to search a VSI list with single entry containing given VSI
4236 * handle element. This can be extended further to search VSI list with more
4237 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4239 static struct ice_vsi_list_map_info *
4240 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4243 struct ice_vsi_list_map_info *map_info = NULL;
4244 struct LIST_HEAD_TYPE *list_head;
4246 list_head = &recp_list->filt_rules;
4247 if (recp_list->adv_rule) {
4248 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4250 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4251 ice_adv_fltr_mgmt_list_entry,
4253 if (list_itr->vsi_list_info) {
4254 map_info = list_itr->vsi_list_info;
4255 if (ice_is_bit_set(map_info->vsi_map,
4257 *vsi_list_id = map_info->vsi_list_id;
4263 struct ice_fltr_mgmt_list_entry *list_itr;
4265 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4266 ice_fltr_mgmt_list_entry,
4268 if (list_itr->vsi_count == 1 &&
4269 list_itr->vsi_list_info) {
4270 map_info = list_itr->vsi_list_info;
4271 if (ice_is_bit_set(map_info->vsi_map,
4273 *vsi_list_id = map_info->vsi_list_id;
4283 * ice_add_rule_internal - add rule for a given lookup type
4284 * @hw: pointer to the hardware structure
4285 * @recp_list: recipe list for which rule has to be added
4286 * @lport: logic port number on which function add rule
4287 * @f_entry: structure containing MAC forwarding information
4289 * Adds or updates the rule lists for a given recipe
4291 static enum ice_status
4292 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4293 u8 lport, struct ice_fltr_list_entry *f_entry)
4295 struct ice_fltr_info *new_fltr, *cur_fltr;
4296 struct ice_fltr_mgmt_list_entry *m_entry;
4297 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4298 enum ice_status status = ICE_SUCCESS;
4300 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4301 return ICE_ERR_PARAM;
4303 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4304 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4305 f_entry->fltr_info.fwd_id.hw_vsi_id =
4306 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4308 rule_lock = &recp_list->filt_rule_lock;
4310 ice_acquire_lock(rule_lock);
4311 new_fltr = &f_entry->fltr_info;
4312 if (new_fltr->flag & ICE_FLTR_RX)
4313 new_fltr->src = lport;
4314 else if (new_fltr->flag & ICE_FLTR_TX)
4316 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4318 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4320 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4321 goto exit_add_rule_internal;
4324 cur_fltr = &m_entry->fltr_info;
4325 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4327 exit_add_rule_internal:
4328 ice_release_lock(rule_lock);
4333 * ice_remove_vsi_list_rule
4334 * @hw: pointer to the hardware structure
4335 * @vsi_list_id: VSI list ID generated as part of allocate resource
4336 * @lkup_type: switch rule filter lookup type
4338 * The VSI list should be emptied before this function is called to remove the
4341 static enum ice_status
4342 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4343 enum ice_sw_lkup_type lkup_type)
4345 /* Free the vsi_list resource that we allocated. It is assumed that the
4346 * list is empty at this point.
4348 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4349 ice_aqc_opc_free_res);
4353 * ice_rem_update_vsi_list
4354 * @hw: pointer to the hardware structure
4355 * @vsi_handle: VSI handle of the VSI to remove
4356 * @fm_list: filter management entry for which the VSI list management needs to
4359 static enum ice_status
4360 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4361 struct ice_fltr_mgmt_list_entry *fm_list)
4363 enum ice_sw_lkup_type lkup_type;
4364 enum ice_status status = ICE_SUCCESS;
4367 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4368 fm_list->vsi_count == 0)
4369 return ICE_ERR_PARAM;
4371 /* A rule with the VSI being removed does not exist */
4372 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4373 return ICE_ERR_DOES_NOT_EXIST;
4375 lkup_type = fm_list->fltr_info.lkup_type;
4376 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4377 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4378 ice_aqc_opc_update_sw_rules,
4383 fm_list->vsi_count--;
4384 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4386 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4387 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4388 struct ice_vsi_list_map_info *vsi_list_info =
4389 fm_list->vsi_list_info;
4392 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4394 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4395 return ICE_ERR_OUT_OF_RANGE;
4397 /* Make sure VSI list is empty before removing it below */
4398 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4400 ice_aqc_opc_update_sw_rules,
4405 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4406 tmp_fltr_info.fwd_id.hw_vsi_id =
4407 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4408 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4409 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4411 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4412 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4416 fm_list->fltr_info = tmp_fltr_info;
4419 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4420 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4421 struct ice_vsi_list_map_info *vsi_list_info =
4422 fm_list->vsi_list_info;
4424 /* Remove the VSI list since it is no longer used */
4425 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4427 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4428 vsi_list_id, status);
4432 LIST_DEL(&vsi_list_info->list_entry);
4433 ice_free(hw, vsi_list_info);
4434 fm_list->vsi_list_info = NULL;
4441 * ice_remove_rule_internal - Remove a filter rule of a given type
4443 * @hw: pointer to the hardware structure
4444 * @recp_list: recipe list for which the rule needs to removed
4445 * @f_entry: rule entry containing filter information
4447 static enum ice_status
4448 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4449 struct ice_fltr_list_entry *f_entry)
4451 struct ice_fltr_mgmt_list_entry *list_elem;
4452 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4453 enum ice_status status = ICE_SUCCESS;
4454 bool remove_rule = false;
4457 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4458 return ICE_ERR_PARAM;
4459 f_entry->fltr_info.fwd_id.hw_vsi_id =
4460 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4462 rule_lock = &recp_list->filt_rule_lock;
4463 ice_acquire_lock(rule_lock);
4464 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4465 &f_entry->fltr_info);
4467 status = ICE_ERR_DOES_NOT_EXIST;
4471 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4473 } else if (!list_elem->vsi_list_info) {
4474 status = ICE_ERR_DOES_NOT_EXIST;
4476 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4477 /* a ref_cnt > 1 indicates that the vsi_list is being
4478 * shared by multiple rules. Decrement the ref_cnt and
4479 * remove this rule, but do not modify the list, as it
4480 * is in-use by other rules.
4482 list_elem->vsi_list_info->ref_cnt--;
4485 /* a ref_cnt of 1 indicates the vsi_list is only used
4486 * by one rule. However, the original removal request is only
4487 * for a single VSI. Update the vsi_list first, and only
4488 * remove the rule if there are no further VSIs in this list.
4490 vsi_handle = f_entry->fltr_info.vsi_handle;
4491 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4494 /* if VSI count goes to zero after updating the VSI list */
4495 if (list_elem->vsi_count == 0)
4500 /* Remove the lookup rule */
4501 struct ice_aqc_sw_rules_elem *s_rule;
4503 s_rule = (struct ice_aqc_sw_rules_elem *)
4504 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4506 status = ICE_ERR_NO_MEMORY;
4510 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4511 ice_aqc_opc_remove_sw_rules);
4513 status = ice_aq_sw_rules(hw, s_rule,
4514 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4515 ice_aqc_opc_remove_sw_rules, NULL);
4517 /* Remove a book keeping from the list */
4518 ice_free(hw, s_rule);
4523 LIST_DEL(&list_elem->list_entry);
4524 ice_free(hw, list_elem);
4527 ice_release_lock(rule_lock);
4532 * ice_aq_get_res_alloc - get allocated resources
4533 * @hw: pointer to the HW struct
4534 * @num_entries: pointer to u16 to store the number of resource entries returned
4535 * @buf: pointer to buffer
4536 * @buf_size: size of buf
4537 * @cd: pointer to command details structure or NULL
4539 * The caller-supplied buffer must be large enough to store the resource
4540 * information for all resource types. Each resource type is an
4541 * ice_aqc_get_res_resp_elem structure.
4544 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4545 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4546 struct ice_sq_cd *cd)
4548 struct ice_aqc_get_res_alloc *resp;
4549 enum ice_status status;
4550 struct ice_aq_desc desc;
4553 return ICE_ERR_BAD_PTR;
4555 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4556 return ICE_ERR_INVAL_SIZE;
4558 resp = &desc.params.get_res;
4560 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4561 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4563 if (!status && num_entries)
4564 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4570 * ice_aq_get_res_descs - get allocated resource descriptors
4571 * @hw: pointer to the hardware structure
4572 * @num_entries: number of resource entries in buffer
4573 * @buf: structure to hold response data buffer
4574 * @buf_size: size of buffer
4575 * @res_type: resource type
4576 * @res_shared: is resource shared
4577 * @desc_id: input - first desc ID to start; output - next desc ID
4578 * @cd: pointer to command details structure or NULL
4581 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4582 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4583 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4585 struct ice_aqc_get_allocd_res_desc *cmd;
4586 struct ice_aq_desc desc;
4587 enum ice_status status;
4589 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4591 cmd = &desc.params.get_res_desc;
4594 return ICE_ERR_PARAM;
4596 if (buf_size != (num_entries * sizeof(*buf)))
4597 return ICE_ERR_PARAM;
4599 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4601 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4602 ICE_AQC_RES_TYPE_M) | (res_shared ?
4603 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4604 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4606 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4608 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4614 * ice_add_mac_rule - Add a MAC address based filter rule
4615 * @hw: pointer to the hardware structure
4616 * @m_list: list of MAC addresses and forwarding information
4617 * @sw: pointer to switch info struct for which function add rule
4618 * @lport: logic port number on which function add rule
4620 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4621 * multiple unicast addresses, the function assumes that all the
4622 * addresses are unique in a given add_mac call. It doesn't
4623 * check for duplicates in this case, removing duplicates from a given
4624 * list should be taken care of in the caller of this function.
4626 static enum ice_status
4627 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4628 struct ice_switch_info *sw, u8 lport)
4630 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4631 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4632 struct ice_fltr_list_entry *m_list_itr;
4633 struct LIST_HEAD_TYPE *rule_head;
4634 u16 total_elem_left, s_rule_size;
4635 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4636 enum ice_status status = ICE_SUCCESS;
4637 u16 num_unicast = 0;
4641 rule_lock = &recp_list->filt_rule_lock;
4642 rule_head = &recp_list->filt_rules;
4644 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4646 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4650 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4651 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4652 if (!ice_is_vsi_valid(hw, vsi_handle))
4653 return ICE_ERR_PARAM;
4654 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4655 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4656 /* update the src in case it is VSI num */
4657 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4658 return ICE_ERR_PARAM;
4659 m_list_itr->fltr_info.src = hw_vsi_id;
4660 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4661 IS_ZERO_ETHER_ADDR(add))
4662 return ICE_ERR_PARAM;
4663 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4664 /* Don't overwrite the unicast address */
4665 ice_acquire_lock(rule_lock);
4666 if (ice_find_rule_entry(rule_head,
4667 &m_list_itr->fltr_info)) {
4668 ice_release_lock(rule_lock);
4669 return ICE_ERR_ALREADY_EXISTS;
4671 ice_release_lock(rule_lock);
4673 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4674 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4675 m_list_itr->status =
4676 ice_add_rule_internal(hw, recp_list, lport,
4678 if (m_list_itr->status)
4679 return m_list_itr->status;
4683 ice_acquire_lock(rule_lock);
4684 /* Exit if no suitable entries were found for adding bulk switch rule */
4686 status = ICE_SUCCESS;
4687 goto ice_add_mac_exit;
4690 /* Allocate switch rule buffer for the bulk update for unicast */
4691 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4692 s_rule = (struct ice_aqc_sw_rules_elem *)
4693 ice_calloc(hw, num_unicast, s_rule_size);
4695 status = ICE_ERR_NO_MEMORY;
4696 goto ice_add_mac_exit;
4700 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4702 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4703 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4705 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4706 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4707 ice_aqc_opc_add_sw_rules);
4708 r_iter = (struct ice_aqc_sw_rules_elem *)
4709 ((u8 *)r_iter + s_rule_size);
4713 /* Call AQ bulk switch rule update for all unicast addresses */
4715 /* Call AQ switch rule in AQ_MAX chunk */
4716 for (total_elem_left = num_unicast; total_elem_left > 0;
4717 total_elem_left -= elem_sent) {
4718 struct ice_aqc_sw_rules_elem *entry = r_iter;
4720 elem_sent = MIN_T(u8, total_elem_left,
4721 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4722 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4723 elem_sent, ice_aqc_opc_add_sw_rules,
4726 goto ice_add_mac_exit;
4727 r_iter = (struct ice_aqc_sw_rules_elem *)
4728 ((u8 *)r_iter + (elem_sent * s_rule_size));
4731 /* Fill up rule ID based on the value returned from FW */
4733 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4735 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4736 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4737 struct ice_fltr_mgmt_list_entry *fm_entry;
4739 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4740 f_info->fltr_rule_id =
4741 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4742 f_info->fltr_act = ICE_FWD_TO_VSI;
4743 /* Create an entry to track this MAC address */
4744 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4745 ice_malloc(hw, sizeof(*fm_entry));
4747 status = ICE_ERR_NO_MEMORY;
4748 goto ice_add_mac_exit;
4750 fm_entry->fltr_info = *f_info;
4751 fm_entry->vsi_count = 1;
4752 /* The book keeping entries will get removed when
4753 * base driver calls remove filter AQ command
4756 LIST_ADD(&fm_entry->list_entry, rule_head);
4757 r_iter = (struct ice_aqc_sw_rules_elem *)
4758 ((u8 *)r_iter + s_rule_size);
4763 ice_release_lock(rule_lock);
4765 ice_free(hw, s_rule);
4770 * ice_add_mac - Add a MAC address based filter rule
4771 * @hw: pointer to the hardware structure
4772 * @m_list: list of MAC addresses and forwarding information
4774 * Function add MAC rule for logical port from HW struct
4776 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4779 return ICE_ERR_PARAM;
4781 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4782 hw->port_info->lport);
4786 * ice_add_vlan_internal - Add one VLAN based filter rule
4787 * @hw: pointer to the hardware structure
4788 * @recp_list: recipe list for which rule has to be added
4789 * @f_entry: filter entry containing one VLAN information
4791 static enum ice_status
4792 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4793 struct ice_fltr_list_entry *f_entry)
4795 struct ice_fltr_mgmt_list_entry *v_list_itr;
4796 struct ice_fltr_info *new_fltr, *cur_fltr;
4797 enum ice_sw_lkup_type lkup_type;
4798 u16 vsi_list_id = 0, vsi_handle;
4799 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4800 enum ice_status status = ICE_SUCCESS;
4802 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4803 return ICE_ERR_PARAM;
4805 f_entry->fltr_info.fwd_id.hw_vsi_id =
4806 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4807 new_fltr = &f_entry->fltr_info;
4809 /* VLAN ID should only be 12 bits */
4810 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4811 return ICE_ERR_PARAM;
4813 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4814 return ICE_ERR_PARAM;
4816 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4817 lkup_type = new_fltr->lkup_type;
4818 vsi_handle = new_fltr->vsi_handle;
4819 rule_lock = &recp_list->filt_rule_lock;
4820 ice_acquire_lock(rule_lock);
4821 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4823 struct ice_vsi_list_map_info *map_info = NULL;
4825 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4826 /* All VLAN pruning rules use a VSI list. Check if
4827 * there is already a VSI list containing VSI that we
4828 * want to add. If found, use the same vsi_list_id for
4829 * this new VLAN rule or else create a new list.
4831 map_info = ice_find_vsi_list_entry(recp_list,
4835 status = ice_create_vsi_list_rule(hw,
4843 /* Convert the action to forwarding to a VSI list. */
4844 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4845 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4848 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4850 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4853 status = ICE_ERR_DOES_NOT_EXIST;
4856 /* reuse VSI list for new rule and increment ref_cnt */
4858 v_list_itr->vsi_list_info = map_info;
4859 map_info->ref_cnt++;
4861 v_list_itr->vsi_list_info =
4862 ice_create_vsi_list_map(hw, &vsi_handle,
4866 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4867 /* Update existing VSI list to add new VSI ID only if it used
4870 cur_fltr = &v_list_itr->fltr_info;
4871 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4874 /* If VLAN rule exists and VSI list being used by this rule is
4875 * referenced by more than 1 VLAN rule. Then create a new VSI
4876 * list appending previous VSI with new VSI and update existing
4877 * VLAN rule to point to new VSI list ID
4879 struct ice_fltr_info tmp_fltr;
4880 u16 vsi_handle_arr[2];
4883 /* Current implementation only supports reusing VSI list with
4884 * one VSI count. We should never hit below condition
4886 if (v_list_itr->vsi_count > 1 &&
4887 v_list_itr->vsi_list_info->ref_cnt > 1) {
4888 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4889 status = ICE_ERR_CFG;
4894 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4897 /* A rule already exists with the new VSI being added */
4898 if (cur_handle == vsi_handle) {
4899 status = ICE_ERR_ALREADY_EXISTS;
4903 vsi_handle_arr[0] = cur_handle;
4904 vsi_handle_arr[1] = vsi_handle;
4905 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4906 &vsi_list_id, lkup_type);
4910 tmp_fltr = v_list_itr->fltr_info;
4911 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4912 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4913 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4914 /* Update the previous switch rule to a new VSI list which
4915 * includes current VSI that is requested
4917 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4921 /* before overriding VSI list map info. decrement ref_cnt of
4924 v_list_itr->vsi_list_info->ref_cnt--;
4926 /* now update to newly created list */
4927 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4928 v_list_itr->vsi_list_info =
4929 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4931 v_list_itr->vsi_count++;
4935 ice_release_lock(rule_lock);
4940 * ice_add_vlan_rule - Add VLAN based filter rule
4941 * @hw: pointer to the hardware structure
4942 * @v_list: list of VLAN entries and forwarding information
4943 * @sw: pointer to switch info struct for which function add rule
4945 static enum ice_status
4946 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4947 struct ice_switch_info *sw)
4949 struct ice_fltr_list_entry *v_list_itr;
4950 struct ice_sw_recipe *recp_list;
4952 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4953 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4955 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4956 return ICE_ERR_PARAM;
4957 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4958 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4960 if (v_list_itr->status)
4961 return v_list_itr->status;
4967 * ice_add_vlan - Add a VLAN based filter rule
4968 * @hw: pointer to the hardware structure
4969 * @v_list: list of VLAN and forwarding information
4971 * Function add VLAN rule for logical port from HW struct
4973 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4976 return ICE_ERR_PARAM;
4978 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4982 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4983 * @hw: pointer to the hardware structure
4984 * @mv_list: list of MAC and VLAN filters
4985 * @sw: pointer to switch info struct for which function add rule
4986 * @lport: logic port number on which function add rule
4988 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4989 * pruning bits enabled, then it is the responsibility of the caller to make
4990 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4991 * VLAN won't be received on that VSI otherwise.
4993 static enum ice_status
4994 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4995 struct ice_switch_info *sw, u8 lport)
4997 struct ice_fltr_list_entry *mv_list_itr;
4998 struct ice_sw_recipe *recp_list;
5000 if (!mv_list || !hw)
5001 return ICE_ERR_PARAM;
5003 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5004 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5006 enum ice_sw_lkup_type l_type =
5007 mv_list_itr->fltr_info.lkup_type;
5009 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5010 return ICE_ERR_PARAM;
5011 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5012 mv_list_itr->status =
5013 ice_add_rule_internal(hw, recp_list, lport,
5015 if (mv_list_itr->status)
5016 return mv_list_itr->status;
5022 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5023 * @hw: pointer to the hardware structure
5024 * @mv_list: list of MAC VLAN addresses and forwarding information
5026 * Function add MAC VLAN rule for logical port from HW struct
5029 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5031 if (!mv_list || !hw)
5032 return ICE_ERR_PARAM;
5034 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5035 hw->port_info->lport);
5039 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5040 * @hw: pointer to the hardware structure
5041 * @em_list: list of ether type MAC filter, MAC is optional
5042 * @sw: pointer to switch info struct for which function add rule
5043 * @lport: logic port number on which function add rule
5045 * This function requires the caller to populate the entries in
5046 * the filter list with the necessary fields (including flags to
5047 * indicate Tx or Rx rules).
5049 static enum ice_status
5050 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5051 struct ice_switch_info *sw, u8 lport)
5053 struct ice_fltr_list_entry *em_list_itr;
5055 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5057 struct ice_sw_recipe *recp_list;
5058 enum ice_sw_lkup_type l_type;
5060 l_type = em_list_itr->fltr_info.lkup_type;
5061 recp_list = &sw->recp_list[l_type];
5063 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5064 l_type != ICE_SW_LKUP_ETHERTYPE)
5065 return ICE_ERR_PARAM;
5067 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5070 if (em_list_itr->status)
5071 return em_list_itr->status;
5077 * ice_add_eth_mac - Add a ethertype based filter rule
5078 * @hw: pointer to the hardware structure
5079 * @em_list: list of ethertype and forwarding information
5081 * Function add ethertype rule for logical port from HW struct
5084 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5086 if (!em_list || !hw)
5087 return ICE_ERR_PARAM;
5089 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5090 hw->port_info->lport);
5094 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5095 * @hw: pointer to the hardware structure
5096 * @em_list: list of ethertype or ethertype MAC entries
5097 * @sw: pointer to switch info struct for which function add rule
5099 static enum ice_status
5100 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5101 struct ice_switch_info *sw)
5103 struct ice_fltr_list_entry *em_list_itr, *tmp;
5105 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5107 struct ice_sw_recipe *recp_list;
5108 enum ice_sw_lkup_type l_type;
5110 l_type = em_list_itr->fltr_info.lkup_type;
5112 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5113 l_type != ICE_SW_LKUP_ETHERTYPE)
5114 return ICE_ERR_PARAM;
5116 recp_list = &sw->recp_list[l_type];
5117 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5119 if (em_list_itr->status)
5120 return em_list_itr->status;
5126 * ice_remove_eth_mac - remove a ethertype based filter rule
5127 * @hw: pointer to the hardware structure
5128 * @em_list: list of ethertype and forwarding information
5132 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5134 if (!em_list || !hw)
5135 return ICE_ERR_PARAM;
5137 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5141 * ice_rem_sw_rule_info
5142 * @hw: pointer to the hardware structure
5143 * @rule_head: pointer to the switch list structure that we want to delete
5146 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5148 if (!LIST_EMPTY(rule_head)) {
5149 struct ice_fltr_mgmt_list_entry *entry;
5150 struct ice_fltr_mgmt_list_entry *tmp;
5152 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5153 ice_fltr_mgmt_list_entry, list_entry) {
5154 LIST_DEL(&entry->list_entry);
5155 ice_free(hw, entry);
5161 * ice_rem_adv_rule_info
5162 * @hw: pointer to the hardware structure
5163 * @rule_head: pointer to the switch list structure that we want to delete
5166 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5168 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5169 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5171 if (LIST_EMPTY(rule_head))
5174 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5175 ice_adv_fltr_mgmt_list_entry, list_entry) {
5176 LIST_DEL(&lst_itr->list_entry);
5177 ice_free(hw, lst_itr->lkups);
5178 ice_free(hw, lst_itr);
5183 * ice_rem_all_sw_rules_info
5184 * @hw: pointer to the hardware structure
5186 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5188 struct ice_switch_info *sw = hw->switch_info;
5191 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5192 struct LIST_HEAD_TYPE *rule_head;
5194 rule_head = &sw->recp_list[i].filt_rules;
5195 if (!sw->recp_list[i].adv_rule)
5196 ice_rem_sw_rule_info(hw, rule_head);
5198 ice_rem_adv_rule_info(hw, rule_head);
5199 if (sw->recp_list[i].adv_rule &&
5200 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5201 sw->recp_list[i].adv_rule = false;
5206 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5207 * @pi: pointer to the port_info structure
5208 * @vsi_handle: VSI handle to set as default
5209 * @set: true to add the above mentioned switch rule, false to remove it
5210 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5212 * add filter rule to set/unset given VSI as default VSI for the switch
5213 * (represented by swid)
5216 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5219 struct ice_aqc_sw_rules_elem *s_rule;
5220 struct ice_fltr_info f_info;
5221 struct ice_hw *hw = pi->hw;
5222 enum ice_adminq_opc opcode;
5223 enum ice_status status;
5227 if (!ice_is_vsi_valid(hw, vsi_handle))
5228 return ICE_ERR_PARAM;
5229 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5231 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5232 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5234 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5236 return ICE_ERR_NO_MEMORY;
5238 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5240 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5241 f_info.flag = direction;
5242 f_info.fltr_act = ICE_FWD_TO_VSI;
5243 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5245 if (f_info.flag & ICE_FLTR_RX) {
5246 f_info.src = pi->lport;
5247 f_info.src_id = ICE_SRC_ID_LPORT;
5249 f_info.fltr_rule_id =
5250 pi->dflt_rx_vsi_rule_id;
5251 } else if (f_info.flag & ICE_FLTR_TX) {
5252 f_info.src_id = ICE_SRC_ID_VSI;
5253 f_info.src = hw_vsi_id;
5255 f_info.fltr_rule_id =
5256 pi->dflt_tx_vsi_rule_id;
5260 opcode = ice_aqc_opc_add_sw_rules;
5262 opcode = ice_aqc_opc_remove_sw_rules;
5264 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5266 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5267 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5270 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5272 if (f_info.flag & ICE_FLTR_TX) {
5273 pi->dflt_tx_vsi_num = hw_vsi_id;
5274 pi->dflt_tx_vsi_rule_id = index;
5275 } else if (f_info.flag & ICE_FLTR_RX) {
5276 pi->dflt_rx_vsi_num = hw_vsi_id;
5277 pi->dflt_rx_vsi_rule_id = index;
5280 if (f_info.flag & ICE_FLTR_TX) {
5281 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5282 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5283 } else if (f_info.flag & ICE_FLTR_RX) {
5284 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5285 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5290 ice_free(hw, s_rule);
5295 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5296 * @list_head: head of rule list
5297 * @f_info: rule information
5299 * Helper function to search for a unicast rule entry - this is to be used
5300 * to remove unicast MAC filter that is not shared with other VSIs on the
5303 * Returns pointer to entry storing the rule if found
5305 static struct ice_fltr_mgmt_list_entry *
5306 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5307 struct ice_fltr_info *f_info)
5309 struct ice_fltr_mgmt_list_entry *list_itr;
5311 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5313 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5314 sizeof(f_info->l_data)) &&
5315 f_info->fwd_id.hw_vsi_id ==
5316 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5317 f_info->flag == list_itr->fltr_info.flag)
5324 * ice_remove_mac_rule - remove a MAC based filter rule
5325 * @hw: pointer to the hardware structure
5326 * @m_list: list of MAC addresses and forwarding information
5327 * @recp_list: list from which function remove MAC address
5329 * This function removes either a MAC filter rule or a specific VSI from a
5330 * VSI list for a multicast MAC address.
5332 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5333 * ice_add_mac. Caller should be aware that this call will only work if all
5334 * the entries passed into m_list were added previously. It will not attempt to
5335 * do a partial remove of entries that were found.
5337 static enum ice_status
5338 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5339 struct ice_sw_recipe *recp_list)
5341 struct ice_fltr_list_entry *list_itr, *tmp;
5342 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5345 return ICE_ERR_PARAM;
5347 rule_lock = &recp_list->filt_rule_lock;
5348 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5350 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5351 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5354 if (l_type != ICE_SW_LKUP_MAC)
5355 return ICE_ERR_PARAM;
5357 vsi_handle = list_itr->fltr_info.vsi_handle;
5358 if (!ice_is_vsi_valid(hw, vsi_handle))
5359 return ICE_ERR_PARAM;
5361 list_itr->fltr_info.fwd_id.hw_vsi_id =
5362 ice_get_hw_vsi_num(hw, vsi_handle);
5363 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5364 /* Don't remove the unicast address that belongs to
5365 * another VSI on the switch, since it is not being
5368 ice_acquire_lock(rule_lock);
5369 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5370 &list_itr->fltr_info)) {
5371 ice_release_lock(rule_lock);
5372 return ICE_ERR_DOES_NOT_EXIST;
5374 ice_release_lock(rule_lock);
5376 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5378 if (list_itr->status)
5379 return list_itr->status;
5385 * ice_remove_mac - remove a MAC address based filter rule
5386 * @hw: pointer to the hardware structure
5387 * @m_list: list of MAC addresses and forwarding information
5390 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5392 struct ice_sw_recipe *recp_list;
5394 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5395 return ice_remove_mac_rule(hw, m_list, recp_list);
5399 * ice_remove_vlan_rule - Remove VLAN based filter rule
5400 * @hw: pointer to the hardware structure
5401 * @v_list: list of VLAN entries and forwarding information
5402 * @recp_list: list from which function remove VLAN
5404 static enum ice_status
5405 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5406 struct ice_sw_recipe *recp_list)
5408 struct ice_fltr_list_entry *v_list_itr, *tmp;
5410 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5412 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5414 if (l_type != ICE_SW_LKUP_VLAN)
5415 return ICE_ERR_PARAM;
5416 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5418 if (v_list_itr->status)
5419 return v_list_itr->status;
5425 * ice_remove_vlan - remove a VLAN address based filter rule
5426 * @hw: pointer to the hardware structure
5427 * @v_list: list of VLAN and forwarding information
5431 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5433 struct ice_sw_recipe *recp_list;
5436 return ICE_ERR_PARAM;
5438 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5439 return ice_remove_vlan_rule(hw, v_list, recp_list);
5443 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5444 * @hw: pointer to the hardware structure
5445 * @v_list: list of MAC VLAN entries and forwarding information
5446 * @recp_list: list from which function remove MAC VLAN
5448 static enum ice_status
5449 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5450 struct ice_sw_recipe *recp_list)
5452 struct ice_fltr_list_entry *v_list_itr, *tmp;
5454 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5455 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5457 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5459 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5460 return ICE_ERR_PARAM;
5461 v_list_itr->status =
5462 ice_remove_rule_internal(hw, recp_list,
5464 if (v_list_itr->status)
5465 return v_list_itr->status;
5471 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5472 * @hw: pointer to the hardware structure
5473 * @mv_list: list of MAC VLAN and forwarding information
5476 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5478 struct ice_sw_recipe *recp_list;
5480 if (!mv_list || !hw)
5481 return ICE_ERR_PARAM;
5483 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5484 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5488 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5489 * @fm_entry: filter entry to inspect
5490 * @vsi_handle: VSI handle to compare with filter info
5493 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5495 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5496 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5497 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5498 fm_entry->vsi_list_info &&
5499 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5504 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5505 * @hw: pointer to the hardware structure
5506 * @vsi_handle: VSI handle to remove filters from
5507 * @vsi_list_head: pointer to the list to add entry to
5508 * @fi: pointer to fltr_info of filter entry to copy & add
5510 * Helper function, used when creating a list of filters to remove from
5511 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5512 * original filter entry, with the exception of fltr_info.fltr_act and
5513 * fltr_info.fwd_id fields. These are set such that later logic can
5514 * extract which VSI to remove the fltr from, and pass on that information.
5516 static enum ice_status
5517 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5518 struct LIST_HEAD_TYPE *vsi_list_head,
5519 struct ice_fltr_info *fi)
5521 struct ice_fltr_list_entry *tmp;
5523 /* this memory is freed up in the caller function
5524 * once filters for this VSI are removed
5526 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5528 return ICE_ERR_NO_MEMORY;
5530 tmp->fltr_info = *fi;
5532 /* Overwrite these fields to indicate which VSI to remove filter from,
5533 * so find and remove logic can extract the information from the
5534 * list entries. Note that original entries will still have proper
5537 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5538 tmp->fltr_info.vsi_handle = vsi_handle;
5539 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5541 LIST_ADD(&tmp->list_entry, vsi_list_head);
5547 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5548 * @hw: pointer to the hardware structure
5549 * @vsi_handle: VSI handle to remove filters from
5550 * @lkup_list_head: pointer to the list that has certain lookup type filters
5551 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5553 * Locates all filters in lkup_list_head that are used by the given VSI,
5554 * and adds COPIES of those entries to vsi_list_head (intended to be used
5555 * to remove the listed filters).
5556 * Note that this means all entries in vsi_list_head must be explicitly
5557 * deallocated by the caller when done with list.
5559 static enum ice_status
5560 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5561 struct LIST_HEAD_TYPE *lkup_list_head,
5562 struct LIST_HEAD_TYPE *vsi_list_head)
5564 struct ice_fltr_mgmt_list_entry *fm_entry;
5565 enum ice_status status = ICE_SUCCESS;
5567 /* check to make sure VSI ID is valid and within boundary */
5568 if (!ice_is_vsi_valid(hw, vsi_handle))
5569 return ICE_ERR_PARAM;
5571 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5572 ice_fltr_mgmt_list_entry, list_entry) {
5573 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5576 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5578 &fm_entry->fltr_info);
5586 * ice_determine_promisc_mask
5587 * @fi: filter info to parse
5589 * Helper function to determine which ICE_PROMISC_ mask corresponds
5590 * to given filter into.
5592 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5594 u16 vid = fi->l_data.mac_vlan.vlan_id;
5595 u8 *macaddr = fi->l_data.mac.mac_addr;
5596 bool is_tx_fltr = false;
5597 u8 promisc_mask = 0;
5599 if (fi->flag == ICE_FLTR_TX)
5602 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5603 promisc_mask |= is_tx_fltr ?
5604 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5605 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5606 promisc_mask |= is_tx_fltr ?
5607 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5608 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5609 promisc_mask |= is_tx_fltr ?
5610 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5612 promisc_mask |= is_tx_fltr ?
5613 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5615 return promisc_mask;
5619 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5620 * @hw: pointer to the hardware structure
5621 * @vsi_handle: VSI handle to retrieve info from
5622 * @promisc_mask: pointer to mask to be filled in
5623 * @vid: VLAN ID of promisc VLAN VSI
5624 * @sw: pointer to switch info struct for which function add rule
5626 static enum ice_status
5627 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5628 u16 *vid, struct ice_switch_info *sw)
5630 struct ice_fltr_mgmt_list_entry *itr;
5631 struct LIST_HEAD_TYPE *rule_head;
5632 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5634 if (!ice_is_vsi_valid(hw, vsi_handle))
5635 return ICE_ERR_PARAM;
5639 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5640 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5642 ice_acquire_lock(rule_lock);
5643 LIST_FOR_EACH_ENTRY(itr, rule_head,
5644 ice_fltr_mgmt_list_entry, list_entry) {
5645 /* Continue if this filter doesn't apply to this VSI or the
5646 * VSI ID is not in the VSI map for this filter
5648 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5651 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5653 ice_release_lock(rule_lock);
5659 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5660 * @hw: pointer to the hardware structure
5661 * @vsi_handle: VSI handle to retrieve info from
5662 * @promisc_mask: pointer to mask to be filled in
5663 * @vid: VLAN ID of promisc VLAN VSI
5666 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5669 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5670 vid, hw->switch_info);
5674 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5675 * @hw: pointer to the hardware structure
5676 * @vsi_handle: VSI handle to retrieve info from
5677 * @promisc_mask: pointer to mask to be filled in
5678 * @vid: VLAN ID of promisc VLAN VSI
5679 * @sw: pointer to switch info struct for which function add rule
5681 static enum ice_status
5682 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5683 u16 *vid, struct ice_switch_info *sw)
5685 struct ice_fltr_mgmt_list_entry *itr;
5686 struct LIST_HEAD_TYPE *rule_head;
5687 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5689 if (!ice_is_vsi_valid(hw, vsi_handle))
5690 return ICE_ERR_PARAM;
5694 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5695 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5697 ice_acquire_lock(rule_lock);
5698 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5700 /* Continue if this filter doesn't apply to this VSI or the
5701 * VSI ID is not in the VSI map for this filter
5703 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5706 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5708 ice_release_lock(rule_lock);
5714 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5715 * @hw: pointer to the hardware structure
5716 * @vsi_handle: VSI handle to retrieve info from
5717 * @promisc_mask: pointer to mask to be filled in
5718 * @vid: VLAN ID of promisc VLAN VSI
5721 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5724 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5725 vid, hw->switch_info);
5729 * ice_remove_promisc - Remove promisc based filter rules
5730 * @hw: pointer to the hardware structure
5731 * @recp_id: recipe ID for which the rule needs to removed
5732 * @v_list: list of promisc entries
5734 static enum ice_status
5735 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5736 struct LIST_HEAD_TYPE *v_list)
5738 struct ice_fltr_list_entry *v_list_itr, *tmp;
5739 struct ice_sw_recipe *recp_list;
5741 recp_list = &hw->switch_info->recp_list[recp_id];
5742 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5744 v_list_itr->status =
5745 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5746 if (v_list_itr->status)
5747 return v_list_itr->status;
5753 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5754 * @hw: pointer to the hardware structure
5755 * @vsi_handle: VSI handle to clear mode
5756 * @promisc_mask: mask of promiscuous config bits to clear
5757 * @vid: VLAN ID to clear VLAN promiscuous
5758 * @sw: pointer to switch info struct for which function add rule
5760 static enum ice_status
5761 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5762 u16 vid, struct ice_switch_info *sw)
5764 struct ice_fltr_list_entry *fm_entry, *tmp;
5765 struct LIST_HEAD_TYPE remove_list_head;
5766 struct ice_fltr_mgmt_list_entry *itr;
5767 struct LIST_HEAD_TYPE *rule_head;
5768 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5769 enum ice_status status = ICE_SUCCESS;
5772 if (!ice_is_vsi_valid(hw, vsi_handle))
5773 return ICE_ERR_PARAM;
5775 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5776 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5778 recipe_id = ICE_SW_LKUP_PROMISC;
5780 rule_head = &sw->recp_list[recipe_id].filt_rules;
5781 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5783 INIT_LIST_HEAD(&remove_list_head);
5785 ice_acquire_lock(rule_lock);
5786 LIST_FOR_EACH_ENTRY(itr, rule_head,
5787 ice_fltr_mgmt_list_entry, list_entry) {
5788 struct ice_fltr_info *fltr_info;
5789 u8 fltr_promisc_mask = 0;
5791 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5793 fltr_info = &itr->fltr_info;
5795 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5796 vid != fltr_info->l_data.mac_vlan.vlan_id)
5799 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5801 /* Skip if filter is not completely specified by given mask */
5802 if (fltr_promisc_mask & ~promisc_mask)
5805 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5809 ice_release_lock(rule_lock);
5810 goto free_fltr_list;
5813 ice_release_lock(rule_lock);
5815 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5818 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5819 ice_fltr_list_entry, list_entry) {
5820 LIST_DEL(&fm_entry->list_entry);
5821 ice_free(hw, fm_entry);
5828 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5829 * @hw: pointer to the hardware structure
5830 * @vsi_handle: VSI handle to clear mode
5831 * @promisc_mask: mask of promiscuous config bits to clear
5832 * @vid: VLAN ID to clear VLAN promiscuous
5835 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5836 u8 promisc_mask, u16 vid)
5838 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5839 vid, hw->switch_info);
5843 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5844 * @hw: pointer to the hardware structure
5845 * @vsi_handle: VSI handle to configure
5846 * @promisc_mask: mask of promiscuous config bits
5847 * @vid: VLAN ID to set VLAN promiscuous
5848 * @lport: logical port number to configure promisc mode
5849 * @sw: pointer to switch info struct for which function add rule
5851 static enum ice_status
5852 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5853 u16 vid, u8 lport, struct ice_switch_info *sw)
5855 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5856 struct ice_fltr_list_entry f_list_entry;
5857 struct ice_fltr_info new_fltr;
5858 enum ice_status status = ICE_SUCCESS;
5864 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5866 if (!ice_is_vsi_valid(hw, vsi_handle))
5867 return ICE_ERR_PARAM;
5868 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5870 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5872 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5873 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5874 new_fltr.l_data.mac_vlan.vlan_id = vid;
5875 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5877 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5878 recipe_id = ICE_SW_LKUP_PROMISC;
5881 /* Separate filters must be set for each direction/packet type
5882 * combination, so we will loop over the mask value, store the
5883 * individual type, and clear it out in the input mask as it
5886 while (promisc_mask) {
5887 struct ice_sw_recipe *recp_list;
5893 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5894 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5895 pkt_type = UCAST_FLTR;
5896 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5897 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5898 pkt_type = UCAST_FLTR;
5900 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5901 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5902 pkt_type = MCAST_FLTR;
5903 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5904 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5905 pkt_type = MCAST_FLTR;
5907 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5908 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5909 pkt_type = BCAST_FLTR;
5910 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5911 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5912 pkt_type = BCAST_FLTR;
5916 /* Check for VLAN promiscuous flag */
5917 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5918 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5919 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5920 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5924 /* Set filter DA based on packet type */
5925 mac_addr = new_fltr.l_data.mac.mac_addr;
5926 if (pkt_type == BCAST_FLTR) {
5927 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5928 } else if (pkt_type == MCAST_FLTR ||
5929 pkt_type == UCAST_FLTR) {
5930 /* Use the dummy ether header DA */
5931 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5932 ICE_NONDMA_TO_NONDMA);
5933 if (pkt_type == MCAST_FLTR)
5934 mac_addr[0] |= 0x1; /* Set multicast bit */
5937 /* Need to reset this to zero for all iterations */
5940 new_fltr.flag |= ICE_FLTR_TX;
5941 new_fltr.src = hw_vsi_id;
5943 new_fltr.flag |= ICE_FLTR_RX;
5944 new_fltr.src = lport;
5947 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5948 new_fltr.vsi_handle = vsi_handle;
5949 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5950 f_list_entry.fltr_info = new_fltr;
5951 recp_list = &sw->recp_list[recipe_id];
5953 status = ice_add_rule_internal(hw, recp_list, lport,
5955 if (status != ICE_SUCCESS)
5956 goto set_promisc_exit;
5964 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5965 * @hw: pointer to the hardware structure
5966 * @vsi_handle: VSI handle to configure
5967 * @promisc_mask: mask of promiscuous config bits
5968 * @vid: VLAN ID to set VLAN promiscuous
5971 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5974 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5975 hw->port_info->lport,
5980 * _ice_set_vlan_vsi_promisc
5981 * @hw: pointer to the hardware structure
5982 * @vsi_handle: VSI handle to configure
5983 * @promisc_mask: mask of promiscuous config bits
5984 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5985 * @lport: logical port number to configure promisc mode
5986 * @sw: pointer to switch info struct for which function add rule
5988 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5990 static enum ice_status
5991 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5992 bool rm_vlan_promisc, u8 lport,
5993 struct ice_switch_info *sw)
5995 struct ice_fltr_list_entry *list_itr, *tmp;
5996 struct LIST_HEAD_TYPE vsi_list_head;
5997 struct LIST_HEAD_TYPE *vlan_head;
5998 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5999 enum ice_status status;
6002 INIT_LIST_HEAD(&vsi_list_head);
6003 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6004 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6005 ice_acquire_lock(vlan_lock);
6006 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6008 ice_release_lock(vlan_lock);
6010 goto free_fltr_list;
6012 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6014 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6015 if (rm_vlan_promisc)
6016 status = _ice_clear_vsi_promisc(hw, vsi_handle,
6020 status = _ice_set_vsi_promisc(hw, vsi_handle,
6021 promisc_mask, vlan_id,
6028 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6029 ice_fltr_list_entry, list_entry) {
6030 LIST_DEL(&list_itr->list_entry);
6031 ice_free(hw, list_itr);
6037 * ice_set_vlan_vsi_promisc
6038 * @hw: pointer to the hardware structure
6039 * @vsi_handle: VSI handle to configure
6040 * @promisc_mask: mask of promiscuous config bits
6041 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6043 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6046 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6047 bool rm_vlan_promisc)
6049 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6050 rm_vlan_promisc, hw->port_info->lport,
6055 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6056 * @hw: pointer to the hardware structure
6057 * @vsi_handle: VSI handle to remove filters from
6058 * @recp_list: recipe list from which function remove fltr
6059 * @lkup: switch rule filter lookup type
6062 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6063 struct ice_sw_recipe *recp_list,
6064 enum ice_sw_lkup_type lkup)
6066 struct ice_fltr_list_entry *fm_entry;
6067 struct LIST_HEAD_TYPE remove_list_head;
6068 struct LIST_HEAD_TYPE *rule_head;
6069 struct ice_fltr_list_entry *tmp;
6070 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6071 enum ice_status status;
6073 INIT_LIST_HEAD(&remove_list_head);
6074 rule_lock = &recp_list[lkup].filt_rule_lock;
6075 rule_head = &recp_list[lkup].filt_rules;
6076 ice_acquire_lock(rule_lock);
6077 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6079 ice_release_lock(rule_lock);
6084 case ICE_SW_LKUP_MAC:
6085 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6087 case ICE_SW_LKUP_VLAN:
6088 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6090 case ICE_SW_LKUP_PROMISC:
6091 case ICE_SW_LKUP_PROMISC_VLAN:
6092 ice_remove_promisc(hw, lkup, &remove_list_head);
6094 case ICE_SW_LKUP_MAC_VLAN:
6095 ice_remove_mac_vlan(hw, &remove_list_head);
6097 case ICE_SW_LKUP_ETHERTYPE:
6098 case ICE_SW_LKUP_ETHERTYPE_MAC:
6099 ice_remove_eth_mac(hw, &remove_list_head);
6101 case ICE_SW_LKUP_DFLT:
6102 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6104 case ICE_SW_LKUP_LAST:
6105 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6109 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6110 ice_fltr_list_entry, list_entry) {
6111 LIST_DEL(&fm_entry->list_entry);
6112 ice_free(hw, fm_entry);
6117 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6118 * @hw: pointer to the hardware structure
6119 * @vsi_handle: VSI handle to remove filters from
6120 * @sw: pointer to switch info struct
6123 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6124 struct ice_switch_info *sw)
6126 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6128 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6129 sw->recp_list, ICE_SW_LKUP_MAC);
6130 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6131 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6132 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6133 sw->recp_list, ICE_SW_LKUP_PROMISC);
6134 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6135 sw->recp_list, ICE_SW_LKUP_VLAN);
6136 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6137 sw->recp_list, ICE_SW_LKUP_DFLT);
6138 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6139 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6140 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6141 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6142 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6143 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6147 * ice_remove_vsi_fltr - Remove all filters for a VSI
6148 * @hw: pointer to the hardware structure
6149 * @vsi_handle: VSI handle to remove filters from
6151 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6153 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6157 * ice_alloc_res_cntr - allocating resource counter
6158 * @hw: pointer to the hardware structure
6159 * @type: type of resource
6160 * @alloc_shared: if set it is shared else dedicated
6161 * @num_items: number of entries requested for FD resource type
6162 * @counter_id: counter index returned by AQ call
6165 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6168 struct ice_aqc_alloc_free_res_elem *buf;
6169 enum ice_status status;
6172 /* Allocate resource */
6173 buf_len = ice_struct_size(buf, elem, 1);
6174 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6176 return ICE_ERR_NO_MEMORY;
6178 buf->num_elems = CPU_TO_LE16(num_items);
6179 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6180 ICE_AQC_RES_TYPE_M) | alloc_shared);
6182 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6183 ice_aqc_opc_alloc_res, NULL);
6187 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6195 * ice_free_res_cntr - free resource counter
6196 * @hw: pointer to the hardware structure
6197 * @type: type of resource
6198 * @alloc_shared: if set it is shared else dedicated
6199 * @num_items: number of entries to be freed for FD resource type
6200 * @counter_id: counter ID resource which needs to be freed
6203 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6206 struct ice_aqc_alloc_free_res_elem *buf;
6207 enum ice_status status;
6211 buf_len = ice_struct_size(buf, elem, 1);
6212 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6214 return ICE_ERR_NO_MEMORY;
6216 buf->num_elems = CPU_TO_LE16(num_items);
6217 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6218 ICE_AQC_RES_TYPE_M) | alloc_shared);
6219 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6221 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6222 ice_aqc_opc_free_res, NULL);
6224 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6231 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6232 * @hw: pointer to the hardware structure
6233 * @counter_id: returns counter index
6235 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6237 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6238 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6243 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6244 * @hw: pointer to the hardware structure
6245 * @counter_id: counter index to be freed
6247 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6249 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6250 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6255 * ice_alloc_res_lg_act - add large action resource
6256 * @hw: pointer to the hardware structure
6257 * @l_id: large action ID to fill it in
6258 * @num_acts: number of actions to hold with a large action entry
6260 static enum ice_status
6261 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6263 struct ice_aqc_alloc_free_res_elem *sw_buf;
6264 enum ice_status status;
6267 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6268 return ICE_ERR_PARAM;
6270 /* Allocate resource for large action */
6271 buf_len = ice_struct_size(sw_buf, elem, 1);
6272 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6274 return ICE_ERR_NO_MEMORY;
6276 sw_buf->num_elems = CPU_TO_LE16(1);
6278 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6279 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6280 * If num_acts is greater than 2, then use
6281 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6282 * The num_acts cannot exceed 4. This was ensured at the
6283 * beginning of the function.
6286 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6287 else if (num_acts == 2)
6288 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6290 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6292 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6293 ice_aqc_opc_alloc_res, NULL);
6295 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6297 ice_free(hw, sw_buf);
6302 * ice_add_mac_with_sw_marker - add filter with sw marker
6303 * @hw: pointer to the hardware structure
6304 * @f_info: filter info structure containing the MAC filter information
6305 * @sw_marker: sw marker to tag the Rx descriptor with
6308 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6311 struct ice_fltr_mgmt_list_entry *m_entry;
6312 struct ice_fltr_list_entry fl_info;
6313 struct ice_sw_recipe *recp_list;
6314 struct LIST_HEAD_TYPE l_head;
6315 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6316 enum ice_status ret;
6320 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6321 return ICE_ERR_PARAM;
6323 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6324 return ICE_ERR_PARAM;
6326 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6327 return ICE_ERR_PARAM;
6329 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6330 return ICE_ERR_PARAM;
6331 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6333 /* Add filter if it doesn't exist so then the adding of large
6334 * action always results in update
6337 INIT_LIST_HEAD(&l_head);
6338 fl_info.fltr_info = *f_info;
6339 LIST_ADD(&fl_info.list_entry, &l_head);
6341 entry_exists = false;
6342 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6343 hw->port_info->lport);
6344 if (ret == ICE_ERR_ALREADY_EXISTS)
6345 entry_exists = true;
6349 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6350 rule_lock = &recp_list->filt_rule_lock;
6351 ice_acquire_lock(rule_lock);
6352 /* Get the book keeping entry for the filter */
6353 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6357 /* If counter action was enabled for this rule then don't enable
6358 * sw marker large action
6360 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6361 ret = ICE_ERR_PARAM;
6365 /* if same marker was added before */
6366 if (m_entry->sw_marker_id == sw_marker) {
6367 ret = ICE_ERR_ALREADY_EXISTS;
6371 /* Allocate a hardware table entry to hold large act. Three actions
6372 * for marker based large action
6374 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6378 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6381 /* Update the switch rule to add the marker action */
6382 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6384 ice_release_lock(rule_lock);
6389 ice_release_lock(rule_lock);
6390 /* only remove entry if it did not exist previously */
6392 ret = ice_remove_mac(hw, &l_head);
6398 * ice_add_mac_with_counter - add filter with counter enabled
6399 * @hw: pointer to the hardware structure
6400 * @f_info: pointer to filter info structure containing the MAC filter
6404 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6406 struct ice_fltr_mgmt_list_entry *m_entry;
6407 struct ice_fltr_list_entry fl_info;
6408 struct ice_sw_recipe *recp_list;
6409 struct LIST_HEAD_TYPE l_head;
6410 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6411 enum ice_status ret;
6416 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6417 return ICE_ERR_PARAM;
6419 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6420 return ICE_ERR_PARAM;
6422 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6423 return ICE_ERR_PARAM;
6424 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6425 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6427 entry_exist = false;
6429 rule_lock = &recp_list->filt_rule_lock;
6431 /* Add filter if it doesn't exist so then the adding of large
6432 * action always results in update
6434 INIT_LIST_HEAD(&l_head);
6436 fl_info.fltr_info = *f_info;
6437 LIST_ADD(&fl_info.list_entry, &l_head);
6439 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6440 hw->port_info->lport);
6441 if (ret == ICE_ERR_ALREADY_EXISTS)
6446 ice_acquire_lock(rule_lock);
6447 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6449 ret = ICE_ERR_BAD_PTR;
6453 /* Don't enable counter for a filter for which sw marker was enabled */
6454 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6455 ret = ICE_ERR_PARAM;
6459 /* If a counter was already enabled then don't need to add again */
6460 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6461 ret = ICE_ERR_ALREADY_EXISTS;
6465 /* Allocate a hardware table entry to VLAN counter */
6466 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6470 /* Allocate a hardware table entry to hold large act. Two actions for
6471 * counter based large action
6473 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6477 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6480 /* Update the switch rule to add the counter action */
6481 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6483 ice_release_lock(rule_lock);
6488 ice_release_lock(rule_lock);
6489 /* only remove entry if it did not exist previously */
6491 ret = ice_remove_mac(hw, &l_head);
6496 /* This is mapping table entry that maps every word within a given protocol
6497 * structure to the real byte offset as per the specification of that
6499 * for example dst address is 3 words in ethertype header and corresponding
6500 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6501 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6502 * matching entry describing its field. This needs to be updated if new
6503 * structure is added to that union.
6505 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6506 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6507 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6508 { ICE_ETYPE_OL, { 0 } },
6509 { ICE_VLAN_OFOS, { 0, 2 } },
6510 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6511 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6512 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6513 26, 28, 30, 32, 34, 36, 38 } },
6514 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6515 26, 28, 30, 32, 34, 36, 38 } },
6516 { ICE_TCP_IL, { 0, 2 } },
6517 { ICE_UDP_OF, { 0, 2 } },
6518 { ICE_UDP_ILOS, { 0, 2 } },
6519 { ICE_SCTP_IL, { 0, 2 } },
6520 { ICE_VXLAN, { 8, 10, 12, 14 } },
6521 { ICE_GENEVE, { 8, 10, 12, 14 } },
6522 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6523 { ICE_NVGRE, { 0, 2, 4, 6 } },
6524 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6525 { ICE_PPPOE, { 0, 2, 4, 6 } },
6526 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6527 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6528 { ICE_ESP, { 0, 2, 4, 6 } },
6529 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6530 { ICE_NAT_T, { 8, 10, 12, 14 } },
6531 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6532 { ICE_VLAN_EX, { 0, 2 } },
6535 /* The following table describes preferred grouping of recipes.
6536 * If a recipe that needs to be programmed is a superset or matches one of the
6537 * following combinations, then the recipe needs to be chained as per the
6541 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6542 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6543 { ICE_MAC_IL, ICE_MAC_IL_HW },
6544 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6545 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6546 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6547 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6548 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6549 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6550 { ICE_TCP_IL, ICE_TCP_IL_HW },
6551 { ICE_UDP_OF, ICE_UDP_OF_HW },
6552 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6553 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6554 { ICE_VXLAN, ICE_UDP_OF_HW },
6555 { ICE_GENEVE, ICE_UDP_OF_HW },
6556 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6557 { ICE_NVGRE, ICE_GRE_OF_HW },
6558 { ICE_GTP, ICE_UDP_OF_HW },
6559 { ICE_PPPOE, ICE_PPPOE_HW },
6560 { ICE_PFCP, ICE_UDP_ILOS_HW },
6561 { ICE_L2TPV3, ICE_L2TPV3_HW },
6562 { ICE_ESP, ICE_ESP_HW },
6563 { ICE_AH, ICE_AH_HW },
6564 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6565 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6566 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6570 * ice_find_recp - find a recipe
6571 * @hw: pointer to the hardware structure
6572 * @lkup_exts: extension sequence to match
6574 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6576 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6577 enum ice_sw_tunnel_type tun_type)
6579 bool refresh_required = true;
6580 struct ice_sw_recipe *recp;
6583 /* Walk through existing recipes to find a match */
6584 recp = hw->switch_info->recp_list;
6585 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6586 /* If recipe was not created for this ID, in SW bookkeeping,
6587 * check if FW has an entry for this recipe. If the FW has an
6588 * entry update it in our SW bookkeeping and continue with the
6591 if (!recp[i].recp_created)
6592 if (ice_get_recp_frm_fw(hw,
6593 hw->switch_info->recp_list, i,
6597 /* Skip inverse action recipes */
6598 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6599 ICE_AQ_RECIPE_ACT_INV_ACT)
6602 /* if number of words we are looking for match */
6603 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6604 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6605 struct ice_fv_word *be = lkup_exts->fv_words;
6606 u16 *cr = recp[i].lkup_exts.field_mask;
6607 u16 *de = lkup_exts->field_mask;
6611 /* ar, cr, and qr are related to the recipe words, while
6612 * be, de, and pe are related to the lookup words
6614 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6615 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6617 if (ar[qr].off == be[pe].off &&
6618 ar[qr].prot_id == be[pe].prot_id &&
6620 /* Found the "pe"th word in the
6625 /* After walking through all the words in the
6626 * "i"th recipe if "p"th word was not found then
6627 * this recipe is not what we are looking for.
6628 * So break out from this loop and try the next
6631 if (qr >= recp[i].lkup_exts.n_val_words) {
6636 /* If for "i"th recipe the found was never set to false
6637 * then it means we found our match
6639 if (tun_type == recp[i].tun_type && found)
6640 return i; /* Return the recipe ID */
6643 return ICE_MAX_NUM_RECIPES;
6647 * ice_prot_type_to_id - get protocol ID from protocol type
6648 * @type: protocol type
6649 * @id: pointer to variable that will receive the ID
6651 * Returns true if found, false otherwise
6653 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6657 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6658 if (ice_prot_id_tbl[i].type == type) {
6659 *id = ice_prot_id_tbl[i].protocol_id;
6666 * ice_find_valid_words - count valid words
6667 * @rule: advanced rule with lookup information
6668 * @lkup_exts: byte offset extractions of the words that are valid
6670 * calculate valid words in a lookup rule using mask value
6673 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6674 struct ice_prot_lkup_ext *lkup_exts)
6676 u8 j, word, prot_id, ret_val;
6678 if (!ice_prot_type_to_id(rule->type, &prot_id))
6681 word = lkup_exts->n_val_words;
6683 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6684 if (((u16 *)&rule->m_u)[j] &&
6685 rule->type < ARRAY_SIZE(ice_prot_ext)) {
6686 /* No more space to accommodate */
6687 if (word >= ICE_MAX_CHAIN_WORDS)
6689 lkup_exts->fv_words[word].off =
6690 ice_prot_ext[rule->type].offs[j];
6691 lkup_exts->fv_words[word].prot_id =
6692 ice_prot_id_tbl[rule->type].protocol_id;
6693 lkup_exts->field_mask[word] =
6694 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6698 ret_val = word - lkup_exts->n_val_words;
6699 lkup_exts->n_val_words = word;
6705 * ice_create_first_fit_recp_def - Create a recipe grouping
6706 * @hw: pointer to the hardware structure
6707 * @lkup_exts: an array of protocol header extractions
6708 * @rg_list: pointer to a list that stores new recipe groups
6709 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6711 * Using first fit algorithm, take all the words that are still not done
6712 * and start grouping them in 4-word groups. Each group makes up one
6715 static enum ice_status
6716 ice_create_first_fit_recp_def(struct ice_hw *hw,
6717 struct ice_prot_lkup_ext *lkup_exts,
6718 struct LIST_HEAD_TYPE *rg_list,
6721 struct ice_pref_recipe_group *grp = NULL;
6726 if (!lkup_exts->n_val_words) {
6727 struct ice_recp_grp_entry *entry;
6729 entry = (struct ice_recp_grp_entry *)
6730 ice_malloc(hw, sizeof(*entry));
6732 return ICE_ERR_NO_MEMORY;
6733 LIST_ADD(&entry->l_entry, rg_list);
6734 grp = &entry->r_group;
6736 grp->n_val_pairs = 0;
6739 /* Walk through every word in the rule to check if it is not done. If so
6740 * then this word needs to be part of a new recipe.
6742 for (j = 0; j < lkup_exts->n_val_words; j++)
6743 if (!ice_is_bit_set(lkup_exts->done, j)) {
6745 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6746 struct ice_recp_grp_entry *entry;
6748 entry = (struct ice_recp_grp_entry *)
6749 ice_malloc(hw, sizeof(*entry));
6751 return ICE_ERR_NO_MEMORY;
6752 LIST_ADD(&entry->l_entry, rg_list);
6753 grp = &entry->r_group;
6757 grp->pairs[grp->n_val_pairs].prot_id =
6758 lkup_exts->fv_words[j].prot_id;
6759 grp->pairs[grp->n_val_pairs].off =
6760 lkup_exts->fv_words[j].off;
6761 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6769 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6770 * @hw: pointer to the hardware structure
6771 * @fv_list: field vector with the extraction sequence information
6772 * @rg_list: recipe groupings with protocol-offset pairs
6774 * Helper function to fill in the field vector indices for protocol-offset
6775 * pairs. These indexes are then ultimately programmed into a recipe.
6777 static enum ice_status
6778 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6779 struct LIST_HEAD_TYPE *rg_list)
6781 struct ice_sw_fv_list_entry *fv;
6782 struct ice_recp_grp_entry *rg;
6783 struct ice_fv_word *fv_ext;
6785 if (LIST_EMPTY(fv_list))
6788 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6789 fv_ext = fv->fv_ptr->ew;
6791 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6794 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6795 struct ice_fv_word *pr;
6800 pr = &rg->r_group.pairs[i];
6801 mask = rg->r_group.mask[i];
6803 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6804 if (fv_ext[j].prot_id == pr->prot_id &&
6805 fv_ext[j].off == pr->off) {
6808 /* Store index of field vector */
6810 rg->fv_mask[i] = mask;
6814 /* Protocol/offset could not be found, caller gave an
6818 return ICE_ERR_PARAM;
6826 * ice_find_free_recp_res_idx - find free result indexes for recipe
6827 * @hw: pointer to hardware structure
6828 * @profiles: bitmap of profiles that will be associated with the new recipe
6829 * @free_idx: pointer to variable to receive the free index bitmap
6831 * The algorithm used here is:
6832 * 1. When creating a new recipe, create a set P which contains all
6833 * Profiles that will be associated with our new recipe
6835 * 2. For each Profile p in set P:
6836 * a. Add all recipes associated with Profile p into set R
6837 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6838 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6839 * i. Or just assume they all have the same possible indexes:
6841 * i.e., PossibleIndexes = 0x0000F00000000000
6843 * 3. For each Recipe r in set R:
6844 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6845 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6847 * FreeIndexes will contain the bits indicating the indexes free for use,
6848 * then the code needs to update the recipe[r].used_result_idx_bits to
6849 * indicate which indexes were selected for use by this recipe.
6852 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6853 ice_bitmap_t *free_idx)
6855 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6856 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6857 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6860 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6861 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6862 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6863 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6865 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6867 /* For each profile we are going to associate the recipe with, add the
6868 * recipes that are associated with that profile. This will give us
6869 * the set of recipes that our recipe may collide with. Also, determine
6870 * what possible result indexes are usable given this set of profiles.
6872 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6873 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6874 ICE_MAX_NUM_RECIPES);
6875 ice_and_bitmap(possible_idx, possible_idx,
6876 hw->switch_info->prof_res_bm[bit],
6880 /* For each recipe that our new recipe may collide with, determine
6881 * which indexes have been used.
6883 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6884 ice_or_bitmap(used_idx, used_idx,
6885 hw->switch_info->recp_list[bit].res_idxs,
6888 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6890 /* return number of free indexes */
6891 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6895 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6896 * @hw: pointer to hardware structure
6897 * @rm: recipe management list entry
6898 * @profiles: bitmap of profiles that will be associated.
6900 static enum ice_status
6901 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6902 ice_bitmap_t *profiles)
6904 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6905 struct ice_aqc_recipe_data_elem *tmp;
6906 struct ice_aqc_recipe_data_elem *buf;
6907 struct ice_recp_grp_entry *entry;
6908 enum ice_status status;
6914 /* When more than one recipe are required, another recipe is needed to
6915 * chain them together. Matching a tunnel metadata ID takes up one of
6916 * the match fields in the chaining recipe reducing the number of
6917 * chained recipes by one.
6919 /* check number of free result indices */
6920 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6921 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6923 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6924 free_res_idx, rm->n_grp_count);
6926 if (rm->n_grp_count > 1) {
6927 if (rm->n_grp_count > free_res_idx)
6928 return ICE_ERR_MAX_LIMIT;
6933 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6934 return ICE_ERR_MAX_LIMIT;
6936 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6937 ICE_MAX_NUM_RECIPES,
6940 return ICE_ERR_NO_MEMORY;
6942 buf = (struct ice_aqc_recipe_data_elem *)
6943 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6945 status = ICE_ERR_NO_MEMORY;
6949 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6950 recipe_count = ICE_MAX_NUM_RECIPES;
6951 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6953 if (status || recipe_count == 0)
6956 /* Allocate the recipe resources, and configure them according to the
6957 * match fields from protocol headers and extracted field vectors.
6959 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6960 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6963 status = ice_alloc_recipe(hw, &entry->rid);
6967 /* Clear the result index of the located recipe, as this will be
6968 * updated, if needed, later in the recipe creation process.
6970 tmp[0].content.result_indx = 0;
6972 buf[recps] = tmp[0];
6973 buf[recps].recipe_indx = (u8)entry->rid;
6974 /* if the recipe is a non-root recipe RID should be programmed
6975 * as 0 for the rules to be applied correctly.
6977 buf[recps].content.rid = 0;
6978 ice_memset(&buf[recps].content.lkup_indx, 0,
6979 sizeof(buf[recps].content.lkup_indx),
6982 /* All recipes use look-up index 0 to match switch ID. */
6983 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6984 buf[recps].content.mask[0] =
6985 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6986 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6989 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6990 buf[recps].content.lkup_indx[i] = 0x80;
6991 buf[recps].content.mask[i] = 0;
6994 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6995 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6996 buf[recps].content.mask[i + 1] =
6997 CPU_TO_LE16(entry->fv_mask[i]);
7000 if (rm->n_grp_count > 1) {
7001 /* Checks to see if there really is a valid result index
7004 if (chain_idx >= ICE_MAX_FV_WORDS) {
7005 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7006 status = ICE_ERR_MAX_LIMIT;
7010 entry->chain_idx = chain_idx;
7011 buf[recps].content.result_indx =
7012 ICE_AQ_RECIPE_RESULT_EN |
7013 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7014 ICE_AQ_RECIPE_RESULT_DATA_M);
7015 ice_clear_bit(chain_idx, result_idx_bm);
7016 chain_idx = ice_find_first_bit(result_idx_bm,
7020 /* fill recipe dependencies */
7021 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7022 ICE_MAX_NUM_RECIPES);
7023 ice_set_bit(buf[recps].recipe_indx,
7024 (ice_bitmap_t *)buf[recps].recipe_bitmap);
7025 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7029 if (rm->n_grp_count == 1) {
7030 rm->root_rid = buf[0].recipe_indx;
7031 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7032 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7033 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7034 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7035 sizeof(buf[0].recipe_bitmap),
7036 ICE_NONDMA_TO_NONDMA);
7038 status = ICE_ERR_BAD_PTR;
7041 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7042 * the recipe which is getting created if specified
7043 * by user. Usually any advanced switch filter, which results
7044 * into new extraction sequence, ended up creating a new recipe
7045 * of type ROOT and usually recipes are associated with profiles
7046 * Switch rule referreing newly created recipe, needs to have
7047 * either/or 'fwd' or 'join' priority, otherwise switch rule
7048 * evaluation will not happen correctly. In other words, if
7049 * switch rule to be evaluated on priority basis, then recipe
7050 * needs to have priority, otherwise it will be evaluated last.
7052 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7054 struct ice_recp_grp_entry *last_chain_entry;
7057 /* Allocate the last recipe that will chain the outcomes of the
7058 * other recipes together
7060 status = ice_alloc_recipe(hw, &rid);
7064 buf[recps].recipe_indx = (u8)rid;
7065 buf[recps].content.rid = (u8)rid;
7066 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7067 /* the new entry created should also be part of rg_list to
7068 * make sure we have complete recipe
7070 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7071 sizeof(*last_chain_entry));
7072 if (!last_chain_entry) {
7073 status = ICE_ERR_NO_MEMORY;
7076 last_chain_entry->rid = rid;
7077 ice_memset(&buf[recps].content.lkup_indx, 0,
7078 sizeof(buf[recps].content.lkup_indx),
7080 /* All recipes use look-up index 0 to match switch ID. */
7081 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7082 buf[recps].content.mask[0] =
7083 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7084 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7085 buf[recps].content.lkup_indx[i] =
7086 ICE_AQ_RECIPE_LKUP_IGNORE;
7087 buf[recps].content.mask[i] = 0;
7091 /* update r_bitmap with the recp that is used for chaining */
7092 ice_set_bit(rid, rm->r_bitmap);
7093 /* this is the recipe that chains all the other recipes so it
7094 * should not have a chaining ID to indicate the same
7096 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7097 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7099 last_chain_entry->fv_idx[i] = entry->chain_idx;
7100 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7101 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7102 ice_set_bit(entry->rid, rm->r_bitmap);
7104 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7105 if (sizeof(buf[recps].recipe_bitmap) >=
7106 sizeof(rm->r_bitmap)) {
7107 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7108 sizeof(buf[recps].recipe_bitmap),
7109 ICE_NONDMA_TO_NONDMA);
7111 status = ICE_ERR_BAD_PTR;
7114 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7117 rm->root_rid = (u8)rid;
7119 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7123 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7124 ice_release_change_lock(hw);
7128 /* Every recipe that just got created add it to the recipe
7131 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7132 struct ice_switch_info *sw = hw->switch_info;
7133 bool is_root, idx_found = false;
7134 struct ice_sw_recipe *recp;
7135 u16 idx, buf_idx = 0;
7137 /* find buffer index for copying some data */
7138 for (idx = 0; idx < rm->n_grp_count; idx++)
7139 if (buf[idx].recipe_indx == entry->rid) {
7145 status = ICE_ERR_OUT_OF_RANGE;
7149 recp = &sw->recp_list[entry->rid];
7150 is_root = (rm->root_rid == entry->rid);
7151 recp->is_root = is_root;
7153 recp->root_rid = entry->rid;
7154 recp->big_recp = (is_root && rm->n_grp_count > 1);
7156 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7157 entry->r_group.n_val_pairs *
7158 sizeof(struct ice_fv_word),
7159 ICE_NONDMA_TO_NONDMA);
7161 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7162 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7164 /* Copy non-result fv index values and masks to recipe. This
7165 * call will also update the result recipe bitmask.
7167 ice_collect_result_idx(&buf[buf_idx], recp);
7169 /* for non-root recipes, also copy to the root, this allows
7170 * easier matching of a complete chained recipe
7173 ice_collect_result_idx(&buf[buf_idx],
7174 &sw->recp_list[rm->root_rid]);
7176 recp->n_ext_words = entry->r_group.n_val_pairs;
7177 recp->chain_idx = entry->chain_idx;
7178 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7179 recp->n_grp_count = rm->n_grp_count;
7180 recp->tun_type = rm->tun_type;
7181 recp->recp_created = true;
7195 * ice_create_recipe_group - creates recipe group
7196 * @hw: pointer to hardware structure
7197 * @rm: recipe management list entry
7198 * @lkup_exts: lookup elements
7200 static enum ice_status
7201 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7202 struct ice_prot_lkup_ext *lkup_exts)
7204 enum ice_status status;
7207 rm->n_grp_count = 0;
7209 /* Create recipes for words that are marked not done by packing them
7212 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7213 &rm->rg_list, &recp_count);
7215 rm->n_grp_count += recp_count;
7216 rm->n_ext_words = lkup_exts->n_val_words;
7217 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7218 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7219 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7220 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7227 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7228 * @hw: pointer to hardware structure
7229 * @lkups: lookup elements or match criteria for the advanced recipe, one
7230 * structure per protocol header
7231 * @lkups_cnt: number of protocols
7232 * @bm: bitmap of field vectors to consider
7233 * @fv_list: pointer to a list that holds the returned field vectors
7235 static enum ice_status
7236 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7237 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7239 enum ice_status status;
7246 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7248 return ICE_ERR_NO_MEMORY;
7250 for (i = 0; i < lkups_cnt; i++)
7251 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7252 status = ICE_ERR_CFG;
7256 /* Find field vectors that include all specified protocol types */
7257 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7260 ice_free(hw, prot_ids);
7265 * ice_tun_type_match_mask - determine if tun type needs a match mask
7266 * @tun_type: tunnel type
7267 * @mask: mask to be used for the tunnel
7269 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7272 case ICE_SW_TUN_VXLAN_GPE:
7273 case ICE_SW_TUN_GENEVE:
7274 case ICE_SW_TUN_VXLAN:
7275 case ICE_SW_TUN_NVGRE:
7276 case ICE_SW_TUN_UDP:
7277 case ICE_ALL_TUNNELS:
7278 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7279 case ICE_NON_TUN_QINQ:
7280 case ICE_SW_TUN_PPPOE_QINQ:
7281 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7282 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7283 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7284 *mask = ICE_TUN_FLAG_MASK;
7287 case ICE_SW_TUN_GENEVE_VLAN:
7288 case ICE_SW_TUN_VXLAN_VLAN:
7289 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7299 * ice_add_special_words - Add words that are not protocols, such as metadata
7300 * @rinfo: other information regarding the rule e.g. priority and action info
7301 * @lkup_exts: lookup word structure
7303 static enum ice_status
7304 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7305 struct ice_prot_lkup_ext *lkup_exts)
7309 /* If this is a tunneled packet, then add recipe index to match the
7310 * tunnel bit in the packet metadata flags.
7312 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7313 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7314 u8 word = lkup_exts->n_val_words++;
7316 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7317 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7318 lkup_exts->field_mask[word] = mask;
7320 return ICE_ERR_MAX_LIMIT;
7327 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7328 * @hw: pointer to hardware structure
7329 * @rinfo: other information regarding the rule e.g. priority and action info
7330 * @bm: pointer to memory for returning the bitmap of field vectors
7333 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7336 enum ice_prof_type prof_type;
7338 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7340 switch (rinfo->tun_type) {
7342 case ICE_NON_TUN_QINQ:
7343 prof_type = ICE_PROF_NON_TUN;
7345 case ICE_ALL_TUNNELS:
7346 prof_type = ICE_PROF_TUN_ALL;
7348 case ICE_SW_TUN_VXLAN_GPE:
7349 case ICE_SW_TUN_GENEVE:
7350 case ICE_SW_TUN_GENEVE_VLAN:
7351 case ICE_SW_TUN_VXLAN:
7352 case ICE_SW_TUN_VXLAN_VLAN:
7353 case ICE_SW_TUN_UDP:
7354 case ICE_SW_TUN_GTP:
7355 prof_type = ICE_PROF_TUN_UDP;
7357 case ICE_SW_TUN_NVGRE:
7358 prof_type = ICE_PROF_TUN_GRE;
7360 case ICE_SW_TUN_PPPOE:
7361 case ICE_SW_TUN_PPPOE_QINQ:
7362 prof_type = ICE_PROF_TUN_PPPOE;
7364 case ICE_SW_TUN_PPPOE_PAY:
7365 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7366 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7368 case ICE_SW_TUN_PPPOE_IPV4:
7369 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7370 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7371 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7372 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7374 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7375 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7377 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7378 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7380 case ICE_SW_TUN_PPPOE_IPV6:
7381 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7382 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7383 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7384 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7386 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7387 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7389 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7390 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7392 case ICE_SW_TUN_PROFID_IPV6_ESP:
7393 case ICE_SW_TUN_IPV6_ESP:
7394 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7396 case ICE_SW_TUN_PROFID_IPV6_AH:
7397 case ICE_SW_TUN_IPV6_AH:
7398 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7400 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7401 case ICE_SW_TUN_IPV6_L2TPV3:
7402 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7404 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7405 case ICE_SW_TUN_IPV6_NAT_T:
7406 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7408 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7409 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7411 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7412 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7414 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7415 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7417 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7418 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7420 case ICE_SW_TUN_IPV4_NAT_T:
7421 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7423 case ICE_SW_TUN_IPV4_L2TPV3:
7424 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7426 case ICE_SW_TUN_IPV4_ESP:
7427 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7429 case ICE_SW_TUN_IPV4_AH:
7430 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7432 case ICE_SW_IPV4_TCP:
7433 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7435 case ICE_SW_IPV4_UDP:
7436 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7438 case ICE_SW_IPV6_TCP:
7439 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7441 case ICE_SW_IPV6_UDP:
7442 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7444 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7445 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7446 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7447 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7448 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7449 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7450 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7452 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7453 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7454 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7455 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7456 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7457 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7458 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7460 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7461 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7462 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7463 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7464 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7465 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7466 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7468 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7469 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7470 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7471 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7472 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7473 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7474 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7476 case ICE_SW_TUN_AND_NON_TUN:
7477 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7479 prof_type = ICE_PROF_ALL;
7483 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7487 * ice_is_prof_rule - determine if rule type is a profile rule
7488 * @type: the rule type
7490 * if the rule type is a profile rule, that means that there no field value
7491 * match required, in this case just a profile hit is required.
7493 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7496 case ICE_SW_TUN_PROFID_IPV6_ESP:
7497 case ICE_SW_TUN_PROFID_IPV6_AH:
7498 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7499 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7500 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7501 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7502 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7503 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7513 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7514 * @hw: pointer to hardware structure
7515 * @lkups: lookup elements or match criteria for the advanced recipe, one
7516 * structure per protocol header
7517 * @lkups_cnt: number of protocols
7518 * @rinfo: other information regarding the rule e.g. priority and action info
7519 * @rid: return the recipe ID of the recipe created
7521 static enum ice_status
7522 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7523 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7525 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7526 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7527 struct ice_prot_lkup_ext *lkup_exts;
7528 struct ice_recp_grp_entry *r_entry;
7529 struct ice_sw_fv_list_entry *fvit;
7530 struct ice_recp_grp_entry *r_tmp;
7531 struct ice_sw_fv_list_entry *tmp;
7532 enum ice_status status = ICE_SUCCESS;
7533 struct ice_sw_recipe *rm;
7536 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7537 return ICE_ERR_PARAM;
7539 lkup_exts = (struct ice_prot_lkup_ext *)
7540 ice_malloc(hw, sizeof(*lkup_exts));
7542 return ICE_ERR_NO_MEMORY;
7544 /* Determine the number of words to be matched and if it exceeds a
7545 * recipe's restrictions
7547 for (i = 0; i < lkups_cnt; i++) {
7550 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7551 status = ICE_ERR_CFG;
7552 goto err_free_lkup_exts;
7555 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7557 status = ICE_ERR_CFG;
7558 goto err_free_lkup_exts;
7562 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7564 status = ICE_ERR_NO_MEMORY;
7565 goto err_free_lkup_exts;
7568 /* Get field vectors that contain fields extracted from all the protocol
7569 * headers being programmed.
7571 INIT_LIST_HEAD(&rm->fv_list);
7572 INIT_LIST_HEAD(&rm->rg_list);
7574 /* Get bitmap of field vectors (profiles) that are compatible with the
7575 * rule request; only these will be searched in the subsequent call to
7578 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7580 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7584 /* Create any special protocol/offset pairs, such as looking at tunnel
7585 * bits by extracting metadata
7587 status = ice_add_special_words(rinfo, lkup_exts);
7589 goto err_free_lkup_exts;
7591 /* Group match words into recipes using preferred recipe grouping
7594 status = ice_create_recipe_group(hw, rm, lkup_exts);
7598 /* set the recipe priority if specified */
7599 rm->priority = (u8)rinfo->priority;
7601 /* Find offsets from the field vector. Pick the first one for all the
7604 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7608 /* An empty FV list means to use all the profiles returned in the
7611 if (LIST_EMPTY(&rm->fv_list)) {
7614 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7615 struct ice_sw_fv_list_entry *fvl;
7617 fvl = (struct ice_sw_fv_list_entry *)
7618 ice_malloc(hw, sizeof(*fvl));
7622 fvl->profile_id = j;
7623 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7627 /* get bitmap of all profiles the recipe will be associated with */
7628 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7629 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7631 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7632 ice_set_bit((u16)fvit->profile_id, profiles);
7635 /* Look for a recipe which matches our requested fv / mask list */
7636 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7637 if (*rid < ICE_MAX_NUM_RECIPES)
7638 /* Success if found a recipe that match the existing criteria */
7641 rm->tun_type = rinfo->tun_type;
7642 /* Recipe we need does not exist, add a recipe */
7643 status = ice_add_sw_recipe(hw, rm, profiles);
7647 /* Associate all the recipes created with all the profiles in the
7648 * common field vector.
7650 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7652 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7655 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7656 (u8 *)r_bitmap, NULL);
7660 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7661 ICE_MAX_NUM_RECIPES);
7662 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7666 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7669 ice_release_change_lock(hw);
7674 /* Update profile to recipe bitmap array */
7675 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7676 ICE_MAX_NUM_RECIPES);
7678 /* Update recipe to profile bitmap array */
7679 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7680 ice_set_bit((u16)fvit->profile_id,
7681 recipe_to_profile[j]);
7684 *rid = rm->root_rid;
7685 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7686 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7688 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7689 ice_recp_grp_entry, l_entry) {
7690 LIST_DEL(&r_entry->l_entry);
7691 ice_free(hw, r_entry);
7694 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7696 LIST_DEL(&fvit->list_entry);
7701 ice_free(hw, rm->root_buf);
7706 ice_free(hw, lkup_exts);
7712 * ice_find_dummy_packet - find dummy packet by tunnel type
7714 * @lkups: lookup elements or match criteria for the advanced recipe, one
7715 * structure per protocol header
7716 * @lkups_cnt: number of protocols
7717 * @tun_type: tunnel type from the match criteria
7718 * @pkt: dummy packet to fill according to filter match criteria
7719 * @pkt_len: packet length of dummy packet
7720 * @offsets: pointer to receive the pointer to the offsets for the packet
7723 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7724 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7726 const struct ice_dummy_pkt_offsets **offsets)
7728 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7732 for (i = 0; i < lkups_cnt; i++) {
7733 if (lkups[i].type == ICE_UDP_ILOS)
7735 else if (lkups[i].type == ICE_TCP_IL)
7737 else if (lkups[i].type == ICE_IPV6_OFOS)
7739 else if (lkups[i].type == ICE_VLAN_OFOS)
7741 else if (lkups[i].type == ICE_IPV4_OFOS &&
7742 lkups[i].h_u.ipv4_hdr.protocol ==
7743 ICE_IPV4_NVGRE_PROTO_ID &&
7744 lkups[i].m_u.ipv4_hdr.protocol ==
7747 else if (lkups[i].type == ICE_PPPOE &&
7748 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7749 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7750 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7753 else if (lkups[i].type == ICE_ETYPE_OL &&
7754 lkups[i].h_u.ethertype.ethtype_id ==
7755 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7756 lkups[i].m_u.ethertype.ethtype_id ==
7759 else if (lkups[i].type == ICE_IPV4_IL &&
7760 lkups[i].h_u.ipv4_hdr.protocol ==
7762 lkups[i].m_u.ipv4_hdr.protocol ==
7767 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7768 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7769 *pkt = dummy_qinq_ipv6_pkt;
7770 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7771 *offsets = dummy_qinq_ipv6_packet_offsets;
7773 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7774 tun_type == ICE_NON_TUN_QINQ) {
7775 *pkt = dummy_qinq_ipv4_pkt;
7776 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7777 *offsets = dummy_qinq_ipv4_packet_offsets;
7781 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7782 *pkt = dummy_qinq_pppoe_ipv6_packet;
7783 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7784 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7786 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7787 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7788 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7789 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7791 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7792 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7793 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7794 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7795 *offsets = dummy_qinq_pppoe_packet_offsets;
7799 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7800 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7801 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7802 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7804 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7805 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7806 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7807 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7811 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7812 *pkt = dummy_ipv4_esp_pkt;
7813 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7814 *offsets = dummy_ipv4_esp_packet_offsets;
7818 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7819 *pkt = dummy_ipv6_esp_pkt;
7820 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7821 *offsets = dummy_ipv6_esp_packet_offsets;
7825 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7826 *pkt = dummy_ipv4_ah_pkt;
7827 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7828 *offsets = dummy_ipv4_ah_packet_offsets;
7832 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7833 *pkt = dummy_ipv6_ah_pkt;
7834 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7835 *offsets = dummy_ipv6_ah_packet_offsets;
7839 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7840 *pkt = dummy_ipv4_nat_pkt;
7841 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7842 *offsets = dummy_ipv4_nat_packet_offsets;
7846 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7847 *pkt = dummy_ipv6_nat_pkt;
7848 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7849 *offsets = dummy_ipv6_nat_packet_offsets;
7853 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7854 *pkt = dummy_ipv4_l2tpv3_pkt;
7855 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7856 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7860 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7861 *pkt = dummy_ipv6_l2tpv3_pkt;
7862 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7863 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7867 if (tun_type == ICE_SW_TUN_GTP) {
7868 *pkt = dummy_udp_gtp_packet;
7869 *pkt_len = sizeof(dummy_udp_gtp_packet);
7870 *offsets = dummy_udp_gtp_packet_offsets;
7874 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7875 *pkt = dummy_pppoe_ipv6_packet;
7876 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7877 *offsets = dummy_pppoe_packet_offsets;
7879 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7880 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7881 *pkt = dummy_pppoe_ipv4_packet;
7882 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7883 *offsets = dummy_pppoe_packet_offsets;
7887 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7888 *pkt = dummy_pppoe_ipv4_packet;
7889 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7890 *offsets = dummy_pppoe_packet_ipv4_offsets;
7894 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7895 *pkt = dummy_pppoe_ipv4_tcp_packet;
7896 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7897 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7901 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7902 *pkt = dummy_pppoe_ipv4_udp_packet;
7903 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7904 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7908 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7909 *pkt = dummy_pppoe_ipv6_packet;
7910 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7911 *offsets = dummy_pppoe_packet_ipv6_offsets;
7915 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7916 *pkt = dummy_pppoe_ipv6_tcp_packet;
7917 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7918 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7922 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7923 *pkt = dummy_pppoe_ipv6_udp_packet;
7924 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7925 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7929 if (tun_type == ICE_SW_IPV4_TCP) {
7930 *pkt = dummy_tcp_packet;
7931 *pkt_len = sizeof(dummy_tcp_packet);
7932 *offsets = dummy_tcp_packet_offsets;
7936 if (tun_type == ICE_SW_IPV4_UDP) {
7937 *pkt = dummy_udp_packet;
7938 *pkt_len = sizeof(dummy_udp_packet);
7939 *offsets = dummy_udp_packet_offsets;
7943 if (tun_type == ICE_SW_IPV6_TCP) {
7944 *pkt = dummy_tcp_ipv6_packet;
7945 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7946 *offsets = dummy_tcp_ipv6_packet_offsets;
7950 if (tun_type == ICE_SW_IPV6_UDP) {
7951 *pkt = dummy_udp_ipv6_packet;
7952 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7953 *offsets = dummy_udp_ipv6_packet_offsets;
7957 /* Support GTP tunnel + L3 */
7958 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7959 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7960 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7961 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7964 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7965 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7966 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7967 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7970 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7971 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7972 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7973 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7976 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7977 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7978 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7979 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7983 if (tun_type == ICE_ALL_TUNNELS) {
7984 *pkt = dummy_gre_udp_packet;
7985 *pkt_len = sizeof(dummy_gre_udp_packet);
7986 *offsets = dummy_gre_udp_packet_offsets;
7990 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7992 *pkt = dummy_gre_tcp_packet;
7993 *pkt_len = sizeof(dummy_gre_tcp_packet);
7994 *offsets = dummy_gre_tcp_packet_offsets;
7998 *pkt = dummy_gre_udp_packet;
7999 *pkt_len = sizeof(dummy_gre_udp_packet);
8000 *offsets = dummy_gre_udp_packet_offsets;
8004 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8005 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8006 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8007 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8009 *pkt = dummy_udp_tun_tcp_packet;
8010 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8011 *offsets = dummy_udp_tun_tcp_packet_offsets;
8015 *pkt = dummy_udp_tun_udp_packet;
8016 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8017 *offsets = dummy_udp_tun_udp_packet_offsets;
8023 *pkt = dummy_vlan_udp_packet;
8024 *pkt_len = sizeof(dummy_vlan_udp_packet);
8025 *offsets = dummy_vlan_udp_packet_offsets;
8028 *pkt = dummy_udp_packet;
8029 *pkt_len = sizeof(dummy_udp_packet);
8030 *offsets = dummy_udp_packet_offsets;
8032 } else if (udp && ipv6) {
8034 *pkt = dummy_vlan_udp_ipv6_packet;
8035 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8036 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8039 *pkt = dummy_udp_ipv6_packet;
8040 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8041 *offsets = dummy_udp_ipv6_packet_offsets;
8043 } else if ((tcp && ipv6) || ipv6) {
8045 *pkt = dummy_vlan_tcp_ipv6_packet;
8046 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8047 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8050 *pkt = dummy_tcp_ipv6_packet;
8051 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8052 *offsets = dummy_tcp_ipv6_packet_offsets;
8057 *pkt = dummy_vlan_tcp_packet;
8058 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8059 *offsets = dummy_vlan_tcp_packet_offsets;
8061 *pkt = dummy_tcp_packet;
8062 *pkt_len = sizeof(dummy_tcp_packet);
8063 *offsets = dummy_tcp_packet_offsets;
8068 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8070 * @lkups: lookup elements or match criteria for the advanced recipe, one
8071 * structure per protocol header
8072 * @lkups_cnt: number of protocols
8073 * @s_rule: stores rule information from the match criteria
8074 * @dummy_pkt: dummy packet to fill according to filter match criteria
8075 * @pkt_len: packet length of dummy packet
8076 * @offsets: offset info for the dummy packet
8078 static enum ice_status
8079 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8080 struct ice_aqc_sw_rules_elem *s_rule,
8081 const u8 *dummy_pkt, u16 pkt_len,
8082 const struct ice_dummy_pkt_offsets *offsets)
8087 /* Start with a packet with a pre-defined/dummy content. Then, fill
8088 * in the header values to be looked up or matched.
8090 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8092 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8094 for (i = 0; i < lkups_cnt; i++) {
8095 enum ice_protocol_type type;
8096 u16 offset = 0, len = 0, j;
8099 /* find the start of this layer; it should be found since this
8100 * was already checked when search for the dummy packet
8102 type = lkups[i].type;
8103 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8104 if (type == offsets[j].type) {
8105 offset = offsets[j].offset;
8110 /* this should never happen in a correct calling sequence */
8112 return ICE_ERR_PARAM;
8114 switch (lkups[i].type) {
8117 len = sizeof(struct ice_ether_hdr);
8120 len = sizeof(struct ice_ethtype_hdr);
8124 len = sizeof(struct ice_vlan_hdr);
8128 len = sizeof(struct ice_ipv4_hdr);
8132 len = sizeof(struct ice_ipv6_hdr);
8137 len = sizeof(struct ice_l4_hdr);
8140 len = sizeof(struct ice_sctp_hdr);
8143 len = sizeof(struct ice_nvgre);
8148 len = sizeof(struct ice_udp_tnl_hdr);
8152 case ICE_GTP_NO_PAY:
8153 len = sizeof(struct ice_udp_gtp_hdr);
8156 len = sizeof(struct ice_pppoe_hdr);
8159 len = sizeof(struct ice_esp_hdr);
8162 len = sizeof(struct ice_nat_t_hdr);
8165 len = sizeof(struct ice_ah_hdr);
8168 len = sizeof(struct ice_l2tpv3_sess_hdr);
8171 return ICE_ERR_PARAM;
8174 /* the length should be a word multiple */
8175 if (len % ICE_BYTES_PER_WORD)
8178 /* We have the offset to the header start, the length, the
8179 * caller's header values and mask. Use this information to
8180 * copy the data into the dummy packet appropriately based on
8181 * the mask. Note that we need to only write the bits as
8182 * indicated by the mask to make sure we don't improperly write
8183 * over any significant packet data.
8185 for (j = 0; j < len / sizeof(u16); j++)
8186 if (((u16 *)&lkups[i].m_u)[j])
8187 ((u16 *)(pkt + offset))[j] =
8188 (((u16 *)(pkt + offset))[j] &
8189 ~((u16 *)&lkups[i].m_u)[j]) |
8190 (((u16 *)&lkups[i].h_u)[j] &
8191 ((u16 *)&lkups[i].m_u)[j]);
8194 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8200 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8201 * @hw: pointer to the hardware structure
8202 * @tun_type: tunnel type
8203 * @pkt: dummy packet to fill in
8204 * @offsets: offset info for the dummy packet
8206 static enum ice_status
8207 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8208 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8213 case ICE_SW_TUN_AND_NON_TUN:
8214 case ICE_SW_TUN_VXLAN_GPE:
8215 case ICE_SW_TUN_VXLAN:
8216 case ICE_SW_TUN_VXLAN_VLAN:
8217 case ICE_SW_TUN_UDP:
8218 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8222 case ICE_SW_TUN_GENEVE:
8223 case ICE_SW_TUN_GENEVE_VLAN:
8224 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8229 /* Nothing needs to be done for this tunnel type */
8233 /* Find the outer UDP protocol header and insert the port number */
8234 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8235 if (offsets[i].type == ICE_UDP_OF) {
8236 struct ice_l4_hdr *hdr;
8239 offset = offsets[i].offset;
8240 hdr = (struct ice_l4_hdr *)&pkt[offset];
8241 hdr->dst_port = CPU_TO_BE16(open_port);
8251 * ice_find_adv_rule_entry - Search a rule entry
8252 * @hw: pointer to the hardware structure
8253 * @lkups: lookup elements or match criteria for the advanced recipe, one
8254 * structure per protocol header
8255 * @lkups_cnt: number of protocols
8256 * @recp_id: recipe ID for which we are finding the rule
8257 * @rinfo: other information regarding the rule e.g. priority and action info
8259 * Helper function to search for a given advance rule entry
8260 * Returns pointer to entry storing the rule if found
8262 static struct ice_adv_fltr_mgmt_list_entry *
8263 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8264 u16 lkups_cnt, u16 recp_id,
8265 struct ice_adv_rule_info *rinfo)
8267 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8268 struct ice_switch_info *sw = hw->switch_info;
8271 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8272 ice_adv_fltr_mgmt_list_entry, list_entry) {
8273 bool lkups_matched = true;
8275 if (lkups_cnt != list_itr->lkups_cnt)
8277 for (i = 0; i < list_itr->lkups_cnt; i++)
8278 if (memcmp(&list_itr->lkups[i], &lkups[i],
8280 lkups_matched = false;
8283 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8284 rinfo->tun_type == list_itr->rule_info.tun_type &&
8292 * ice_adv_add_update_vsi_list
8293 * @hw: pointer to the hardware structure
8294 * @m_entry: pointer to current adv filter management list entry
8295 * @cur_fltr: filter information from the book keeping entry
8296 * @new_fltr: filter information with the new VSI to be added
8298 * Call AQ command to add or update previously created VSI list with new VSI.
8300 * Helper function to do book keeping associated with adding filter information
8301 * The algorithm to do the booking keeping is described below :
8302 * When a VSI needs to subscribe to a given advanced filter
8303 * if only one VSI has been added till now
8304 * Allocate a new VSI list and add two VSIs
8305 * to this list using switch rule command
8306 * Update the previously created switch rule with the
8307 * newly created VSI list ID
8308 * if a VSI list was previously created
8309 * Add the new VSI to the previously created VSI list set
8310 * using the update switch rule command
8312 static enum ice_status
8313 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8314 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8315 struct ice_adv_rule_info *cur_fltr,
8316 struct ice_adv_rule_info *new_fltr)
8318 enum ice_status status;
8319 u16 vsi_list_id = 0;
8321 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8322 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8323 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8324 return ICE_ERR_NOT_IMPL;
8326 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8327 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8328 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8329 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8330 return ICE_ERR_NOT_IMPL;
8332 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8333 /* Only one entry existed in the mapping and it was not already
8334 * a part of a VSI list. So, create a VSI list with the old and
8337 struct ice_fltr_info tmp_fltr;
8338 u16 vsi_handle_arr[2];
8340 /* A rule already exists with the new VSI being added */
8341 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8342 new_fltr->sw_act.fwd_id.hw_vsi_id)
8343 return ICE_ERR_ALREADY_EXISTS;
8345 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8346 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8347 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8353 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8354 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8355 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8356 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8357 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8358 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8360 /* Update the previous switch rule of "forward to VSI" to
8363 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8367 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8368 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8369 m_entry->vsi_list_info =
8370 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8373 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8375 if (!m_entry->vsi_list_info)
8378 /* A rule already exists with the new VSI being added */
8379 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8382 /* Update the previously created VSI list set with
8383 * the new VSI ID passed in
8385 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8387 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8389 ice_aqc_opc_update_sw_rules,
8391 /* update VSI list mapping info with new VSI ID */
8393 ice_set_bit(vsi_handle,
8394 m_entry->vsi_list_info->vsi_map);
8397 m_entry->vsi_count++;
8402 * ice_add_adv_rule - helper function to create an advanced switch rule
8403 * @hw: pointer to the hardware structure
8404 * @lkups: information on the words that needs to be looked up. All words
8405 * together makes one recipe
8406 * @lkups_cnt: num of entries in the lkups array
8407 * @rinfo: other information related to the rule that needs to be programmed
8408 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8409 * ignored is case of error.
8411 * This function can program only 1 rule at a time. The lkups is used to
8412 * describe the all the words that forms the "lookup" portion of the recipe.
8413 * These words can span multiple protocols. Callers to this function need to
8414 * pass in a list of protocol headers with lookup information along and mask
8415 * that determines which words are valid from the given protocol header.
8416 * rinfo describes other information related to this rule such as forwarding
8417 * IDs, priority of this rule, etc.
8420 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8421 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8422 struct ice_rule_query_data *added_entry)
8424 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8425 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8426 const struct ice_dummy_pkt_offsets *pkt_offsets;
8427 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8428 struct LIST_HEAD_TYPE *rule_head;
8429 struct ice_switch_info *sw;
8430 enum ice_status status;
8431 const u8 *pkt = NULL;
8437 /* Initialize profile to result index bitmap */
8438 if (!hw->switch_info->prof_res_bm_init) {
8439 hw->switch_info->prof_res_bm_init = 1;
8440 ice_init_prof_result_bm(hw);
8443 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8444 if (!prof_rule && !lkups_cnt)
8445 return ICE_ERR_PARAM;
8447 /* get # of words we need to match */
8449 for (i = 0; i < lkups_cnt; i++) {
8452 ptr = (u16 *)&lkups[i].m_u;
8453 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8459 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8460 return ICE_ERR_PARAM;
8462 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8463 return ICE_ERR_PARAM;
8466 /* make sure that we can locate a dummy packet */
8467 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8470 status = ICE_ERR_PARAM;
8471 goto err_ice_add_adv_rule;
8474 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8475 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8476 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8477 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8480 vsi_handle = rinfo->sw_act.vsi_handle;
8481 if (!ice_is_vsi_valid(hw, vsi_handle))
8482 return ICE_ERR_PARAM;
8484 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8485 rinfo->sw_act.fwd_id.hw_vsi_id =
8486 ice_get_hw_vsi_num(hw, vsi_handle);
8487 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8488 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8490 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8493 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8495 /* we have to add VSI to VSI_LIST and increment vsi_count.
8496 * Also Update VSI list so that we can change forwarding rule
8497 * if the rule already exists, we will check if it exists with
8498 * same vsi_id, if not then add it to the VSI list if it already
8499 * exists if not then create a VSI list and add the existing VSI
8500 * ID and the new VSI ID to the list
8501 * We will add that VSI to the list
8503 status = ice_adv_add_update_vsi_list(hw, m_entry,
8504 &m_entry->rule_info,
8507 added_entry->rid = rid;
8508 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8509 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8513 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8514 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8516 return ICE_ERR_NO_MEMORY;
8517 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8518 switch (rinfo->sw_act.fltr_act) {
8519 case ICE_FWD_TO_VSI:
8520 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8521 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8522 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8525 act |= ICE_SINGLE_ACT_TO_Q;
8526 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8527 ICE_SINGLE_ACT_Q_INDEX_M;
8529 case ICE_FWD_TO_QGRP:
8530 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8531 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8532 act |= ICE_SINGLE_ACT_TO_Q;
8533 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8534 ICE_SINGLE_ACT_Q_INDEX_M;
8535 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8536 ICE_SINGLE_ACT_Q_REGION_M;
8538 case ICE_DROP_PACKET:
8539 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8540 ICE_SINGLE_ACT_VALID_BIT;
8543 status = ICE_ERR_CFG;
8544 goto err_ice_add_adv_rule;
8547 /* set the rule LOOKUP type based on caller specified 'RX'
8548 * instead of hardcoding it to be either LOOKUP_TX/RX
8550 * for 'RX' set the source to be the port number
8551 * for 'TX' set the source to be the source HW VSI number (determined
8555 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8556 s_rule->pdata.lkup_tx_rx.src =
8557 CPU_TO_LE16(hw->port_info->lport);
8559 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8560 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8563 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8564 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8566 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8567 pkt_len, pkt_offsets);
8569 goto err_ice_add_adv_rule;
8571 if (rinfo->tun_type != ICE_NON_TUN &&
8572 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8573 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8574 s_rule->pdata.lkup_tx_rx.hdr,
8577 goto err_ice_add_adv_rule;
8580 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8581 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8584 goto err_ice_add_adv_rule;
8585 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8586 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8588 status = ICE_ERR_NO_MEMORY;
8589 goto err_ice_add_adv_rule;
8592 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8593 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8594 ICE_NONDMA_TO_NONDMA);
8595 if (!adv_fltr->lkups && !prof_rule) {
8596 status = ICE_ERR_NO_MEMORY;
8597 goto err_ice_add_adv_rule;
8600 adv_fltr->lkups_cnt = lkups_cnt;
8601 adv_fltr->rule_info = *rinfo;
8602 adv_fltr->rule_info.fltr_rule_id =
8603 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8604 sw = hw->switch_info;
8605 sw->recp_list[rid].adv_rule = true;
8606 rule_head = &sw->recp_list[rid].filt_rules;
8608 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8609 adv_fltr->vsi_count = 1;
8611 /* Add rule entry to book keeping list */
8612 LIST_ADD(&adv_fltr->list_entry, rule_head);
8614 added_entry->rid = rid;
8615 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8616 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8618 err_ice_add_adv_rule:
8619 if (status && adv_fltr) {
8620 ice_free(hw, adv_fltr->lkups);
8621 ice_free(hw, adv_fltr);
8624 ice_free(hw, s_rule);
8630 * ice_adv_rem_update_vsi_list
8631 * @hw: pointer to the hardware structure
8632 * @vsi_handle: VSI handle of the VSI to remove
8633 * @fm_list: filter management entry for which the VSI list management needs to
8636 static enum ice_status
8637 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8638 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8640 struct ice_vsi_list_map_info *vsi_list_info;
8641 enum ice_sw_lkup_type lkup_type;
8642 enum ice_status status;
8645 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8646 fm_list->vsi_count == 0)
8647 return ICE_ERR_PARAM;
8649 /* A rule with the VSI being removed does not exist */
8650 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8651 return ICE_ERR_DOES_NOT_EXIST;
8653 lkup_type = ICE_SW_LKUP_LAST;
8654 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8655 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8656 ice_aqc_opc_update_sw_rules,
8661 fm_list->vsi_count--;
8662 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8663 vsi_list_info = fm_list->vsi_list_info;
8664 if (fm_list->vsi_count == 1) {
8665 struct ice_fltr_info tmp_fltr;
8668 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8670 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8671 return ICE_ERR_OUT_OF_RANGE;
8673 /* Make sure VSI list is empty before removing it below */
8674 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8676 ice_aqc_opc_update_sw_rules,
8681 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8682 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8683 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8684 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8685 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8686 tmp_fltr.fwd_id.hw_vsi_id =
8687 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8688 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8689 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8690 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8692 /* Update the previous switch rule of "MAC forward to VSI" to
8693 * "MAC fwd to VSI list"
8695 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8697 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8698 tmp_fltr.fwd_id.hw_vsi_id, status);
8701 fm_list->vsi_list_info->ref_cnt--;
8703 /* Remove the VSI list since it is no longer used */
8704 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8706 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8707 vsi_list_id, status);
8711 LIST_DEL(&vsi_list_info->list_entry);
8712 ice_free(hw, vsi_list_info);
8713 fm_list->vsi_list_info = NULL;
8720 * ice_rem_adv_rule - removes existing advanced switch rule
8721 * @hw: pointer to the hardware structure
8722 * @lkups: information on the words that needs to be looked up. All words
8723 * together makes one recipe
8724 * @lkups_cnt: num of entries in the lkups array
8725 * @rinfo: Its the pointer to the rule information for the rule
8727 * This function can be used to remove 1 rule at a time. The lkups is
8728 * used to describe all the words that forms the "lookup" portion of the
8729 * rule. These words can span multiple protocols. Callers to this function
8730 * need to pass in a list of protocol headers with lookup information along
8731 * and mask that determines which words are valid from the given protocol
8732 * header. rinfo describes other information related to this rule such as
8733 * forwarding IDs, priority of this rule, etc.
8736 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8737 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8739 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8740 struct ice_prot_lkup_ext lkup_exts;
8741 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8742 enum ice_status status = ICE_SUCCESS;
8743 bool remove_rule = false;
8744 u16 i, rid, vsi_handle;
8746 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8747 for (i = 0; i < lkups_cnt; i++) {
8750 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8753 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8758 /* Create any special protocol/offset pairs, such as looking at tunnel
8759 * bits by extracting metadata
8761 status = ice_add_special_words(rinfo, &lkup_exts);
8765 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8766 /* If did not find a recipe that match the existing criteria */
8767 if (rid == ICE_MAX_NUM_RECIPES)
8768 return ICE_ERR_PARAM;
8770 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8771 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8772 /* the rule is already removed */
8775 ice_acquire_lock(rule_lock);
8776 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8778 } else if (list_elem->vsi_count > 1) {
8779 remove_rule = false;
8780 vsi_handle = rinfo->sw_act.vsi_handle;
8781 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8783 vsi_handle = rinfo->sw_act.vsi_handle;
8784 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8786 ice_release_lock(rule_lock);
8789 if (list_elem->vsi_count == 0)
8792 ice_release_lock(rule_lock);
8794 struct ice_aqc_sw_rules_elem *s_rule;
8797 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8798 s_rule = (struct ice_aqc_sw_rules_elem *)
8799 ice_malloc(hw, rule_buf_sz);
8801 return ICE_ERR_NO_MEMORY;
8802 s_rule->pdata.lkup_tx_rx.act = 0;
8803 s_rule->pdata.lkup_tx_rx.index =
8804 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8805 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8806 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8808 ice_aqc_opc_remove_sw_rules, NULL);
8809 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8810 struct ice_switch_info *sw = hw->switch_info;
8812 ice_acquire_lock(rule_lock);
8813 LIST_DEL(&list_elem->list_entry);
8814 ice_free(hw, list_elem->lkups);
8815 ice_free(hw, list_elem);
8816 ice_release_lock(rule_lock);
8817 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8818 sw->recp_list[rid].adv_rule = false;
8820 ice_free(hw, s_rule);
8826 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8827 * @hw: pointer to the hardware structure
8828 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8830 * This function is used to remove 1 rule at a time. The removal is based on
8831 * the remove_entry parameter. This function will remove rule for a given
8832 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8835 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8836 struct ice_rule_query_data *remove_entry)
8838 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8839 struct LIST_HEAD_TYPE *list_head;
8840 struct ice_adv_rule_info rinfo;
8841 struct ice_switch_info *sw;
8843 sw = hw->switch_info;
8844 if (!sw->recp_list[remove_entry->rid].recp_created)
8845 return ICE_ERR_PARAM;
8846 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8847 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8849 if (list_itr->rule_info.fltr_rule_id ==
8850 remove_entry->rule_id) {
8851 rinfo = list_itr->rule_info;
8852 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8853 return ice_rem_adv_rule(hw, list_itr->lkups,
8854 list_itr->lkups_cnt, &rinfo);
8857 /* either list is empty or unable to find rule */
8858 return ICE_ERR_DOES_NOT_EXIST;
8862 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8864 * @hw: pointer to the hardware structure
8865 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8867 * This function is used to remove all the rules for a given VSI and as soon
8868 * as removing a rule fails, it will return immediately with the error code,
8869 * else it will return ICE_SUCCESS
8871 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8873 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8874 struct ice_vsi_list_map_info *map_info;
8875 struct LIST_HEAD_TYPE *list_head;
8876 struct ice_adv_rule_info rinfo;
8877 struct ice_switch_info *sw;
8878 enum ice_status status;
8881 sw = hw->switch_info;
8882 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8883 if (!sw->recp_list[rid].recp_created)
8885 if (!sw->recp_list[rid].adv_rule)
8888 list_head = &sw->recp_list[rid].filt_rules;
8889 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8890 ice_adv_fltr_mgmt_list_entry,
8892 rinfo = list_itr->rule_info;
8894 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8895 map_info = list_itr->vsi_list_info;
8899 if (!ice_is_bit_set(map_info->vsi_map,
8902 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8906 rinfo.sw_act.vsi_handle = vsi_handle;
8907 status = ice_rem_adv_rule(hw, list_itr->lkups,
8908 list_itr->lkups_cnt, &rinfo);
8918 * ice_replay_fltr - Replay all the filters stored by a specific list head
8919 * @hw: pointer to the hardware structure
8920 * @list_head: list for which filters needs to be replayed
8921 * @recp_id: Recipe ID for which rules need to be replayed
8923 static enum ice_status
8924 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8926 struct ice_fltr_mgmt_list_entry *itr;
8927 enum ice_status status = ICE_SUCCESS;
8928 struct ice_sw_recipe *recp_list;
8929 u8 lport = hw->port_info->lport;
8930 struct LIST_HEAD_TYPE l_head;
8932 if (LIST_EMPTY(list_head))
8935 recp_list = &hw->switch_info->recp_list[recp_id];
8936 /* Move entries from the given list_head to a temporary l_head so that
8937 * they can be replayed. Otherwise when trying to re-add the same
8938 * filter, the function will return already exists
8940 LIST_REPLACE_INIT(list_head, &l_head);
8942 /* Mark the given list_head empty by reinitializing it so filters
8943 * could be added again by *handler
8945 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8947 struct ice_fltr_list_entry f_entry;
8950 f_entry.fltr_info = itr->fltr_info;
8951 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8952 status = ice_add_rule_internal(hw, recp_list, lport,
8954 if (status != ICE_SUCCESS)
8959 /* Add a filter per VSI separately */
8960 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8962 if (!ice_is_vsi_valid(hw, vsi_handle))
8965 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8966 f_entry.fltr_info.vsi_handle = vsi_handle;
8967 f_entry.fltr_info.fwd_id.hw_vsi_id =
8968 ice_get_hw_vsi_num(hw, vsi_handle);
8969 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8970 if (recp_id == ICE_SW_LKUP_VLAN)
8971 status = ice_add_vlan_internal(hw, recp_list,
8974 status = ice_add_rule_internal(hw, recp_list,
8977 if (status != ICE_SUCCESS)
8982 /* Clear the filter management list */
8983 ice_rem_sw_rule_info(hw, &l_head);
8988 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8989 * @hw: pointer to the hardware structure
8991 * NOTE: This function does not clean up partially added filters on error.
8992 * It is up to caller of the function to issue a reset or fail early.
8994 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8996 struct ice_switch_info *sw = hw->switch_info;
8997 enum ice_status status = ICE_SUCCESS;
9000 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9001 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9003 status = ice_replay_fltr(hw, i, head);
9004 if (status != ICE_SUCCESS)
9011 * ice_replay_vsi_fltr - Replay filters for requested VSI
9012 * @hw: pointer to the hardware structure
9013 * @pi: pointer to port information structure
9014 * @sw: pointer to switch info struct for which function replays filters
9015 * @vsi_handle: driver VSI handle
9016 * @recp_id: Recipe ID for which rules need to be replayed
9017 * @list_head: list for which filters need to be replayed
9019 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9020 * It is required to pass valid VSI handle.
9022 static enum ice_status
9023 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9024 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9025 struct LIST_HEAD_TYPE *list_head)
9027 struct ice_fltr_mgmt_list_entry *itr;
9028 enum ice_status status = ICE_SUCCESS;
9029 struct ice_sw_recipe *recp_list;
9032 if (LIST_EMPTY(list_head))
9034 recp_list = &sw->recp_list[recp_id];
9035 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9037 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9039 struct ice_fltr_list_entry f_entry;
9041 f_entry.fltr_info = itr->fltr_info;
9042 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9043 itr->fltr_info.vsi_handle == vsi_handle) {
9044 /* update the src in case it is VSI num */
9045 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9046 f_entry.fltr_info.src = hw_vsi_id;
9047 status = ice_add_rule_internal(hw, recp_list,
9050 if (status != ICE_SUCCESS)
9054 if (!itr->vsi_list_info ||
9055 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9057 /* Clearing it so that the logic can add it back */
9058 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9059 f_entry.fltr_info.vsi_handle = vsi_handle;
9060 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9061 /* update the src in case it is VSI num */
9062 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9063 f_entry.fltr_info.src = hw_vsi_id;
9064 if (recp_id == ICE_SW_LKUP_VLAN)
9065 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9067 status = ice_add_rule_internal(hw, recp_list,
9070 if (status != ICE_SUCCESS)
9078 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9079 * @hw: pointer to the hardware structure
9080 * @vsi_handle: driver VSI handle
9081 * @list_head: list for which filters need to be replayed
9083 * Replay the advanced rule for the given VSI.
9085 static enum ice_status
9086 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9087 struct LIST_HEAD_TYPE *list_head)
9089 struct ice_rule_query_data added_entry = { 0 };
9090 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9091 enum ice_status status = ICE_SUCCESS;
9093 if (LIST_EMPTY(list_head))
9095 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9097 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9098 u16 lk_cnt = adv_fltr->lkups_cnt;
9100 if (vsi_handle != rinfo->sw_act.vsi_handle)
9102 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9111 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9112 * @hw: pointer to the hardware structure
9113 * @pi: pointer to port information structure
9114 * @vsi_handle: driver VSI handle
9116 * Replays filters for requested VSI via vsi_handle.
9119 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9122 struct ice_switch_info *sw = hw->switch_info;
9123 enum ice_status status;
9126 /* Update the recipes that were created */
9127 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9128 struct LIST_HEAD_TYPE *head;
9130 head = &sw->recp_list[i].filt_replay_rules;
9131 if (!sw->recp_list[i].adv_rule)
9132 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9135 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9136 if (status != ICE_SUCCESS)
9144 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
9145 * @hw: pointer to the HW struct
9146 * @sw: pointer to switch info struct for which function removes filters
9148 * Deletes the filter replay rules for given switch
9150 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9157 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9158 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9159 struct LIST_HEAD_TYPE *l_head;
9161 l_head = &sw->recp_list[i].filt_replay_rules;
9162 if (!sw->recp_list[i].adv_rule)
9163 ice_rem_sw_rule_info(hw, l_head);
9165 ice_rem_adv_rule_info(hw, l_head);
9171 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9172 * @hw: pointer to the HW struct
9174 * Deletes the filter replay rules.
9176 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9178 ice_rm_sw_replay_rule_info(hw, hw->switch_info);