1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_ETH_P_8021Q 0x8100
19 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
20 * struct to configure any switch filter rules.
21 * {DA (6 bytes), SA(6 bytes),
22 * Ether type (2 bytes for header without VLAN tag) OR
23 * VLAN tag (4 bytes for header with VLAN tag) }
25 * Word on Hardcoded values
26 * byte 0 = 0x2: to identify it as locally administered DA MAC
27 * byte 6 = 0x2: to identify it as locally administered SA MAC
28 * byte 12 = 0x81 & byte 13 = 0x00:
29 * In case of VLAN filter first two bytes defines ether type (0x8100)
30 * and remaining two bytes are placeholder for programming a given VLAN ID
31 * In case of Ether type filter it is treated as header without VLAN tag
32 * and byte 12 and 13 is used to program a given Ether type instead
34 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
38 struct ice_dummy_pkt_offsets {
39 enum ice_protocol_type type;
40 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46 { ICE_IPV4_OFOS, 14 },
51 { ICE_PROTOCOL_LAST, 0 },
54 static const u8 dummy_gre_tcp_packet[] = {
55 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
56 0x00, 0x00, 0x00, 0x00,
57 0x00, 0x00, 0x00, 0x00,
59 0x08, 0x00, /* ICE_ETYPE_OL 12 */
61 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x2F, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x00, 0x00, 0x00,
67 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
68 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
71 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00,
75 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x06, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x50, 0x02, 0x20, 0x00,
85 0x00, 0x00, 0x00, 0x00
88 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91 { ICE_IPV4_OFOS, 14 },
96 { ICE_PROTOCOL_LAST, 0 },
99 static const u8 dummy_gre_udp_packet[] = {
100 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
104 0x08, 0x00, /* ICE_ETYPE_OL 12 */
106 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x2F, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
112 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
113 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
120 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x11, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
127 0x00, 0x08, 0x00, 0x00,
130 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
132 { ICE_ETYPE_OL, 12 },
133 { ICE_IPV4_OFOS, 14 },
137 { ICE_VXLAN_GPE, 42 },
141 { ICE_PROTOCOL_LAST, 0 },
144 static const u8 dummy_udp_tun_tcp_packet[] = {
145 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
146 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00,
149 0x08, 0x00, /* ICE_ETYPE_OL 12 */
151 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
152 0x00, 0x01, 0x00, 0x00,
153 0x40, 0x11, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
158 0x00, 0x46, 0x00, 0x00,
160 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
161 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
164 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
168 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x06, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x50, 0x02, 0x20, 0x00,
178 0x00, 0x00, 0x00, 0x00
181 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
183 { ICE_ETYPE_OL, 12 },
184 { ICE_IPV4_OFOS, 14 },
188 { ICE_VXLAN_GPE, 42 },
191 { ICE_UDP_ILOS, 84 },
192 { ICE_PROTOCOL_LAST, 0 },
195 static const u8 dummy_udp_tun_udp_packet[] = {
196 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
200 0x08, 0x00, /* ICE_ETYPE_OL 12 */
202 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
203 0x00, 0x01, 0x00, 0x00,
204 0x00, 0x11, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
209 0x00, 0x3a, 0x00, 0x00,
211 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
212 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
215 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00,
219 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
226 0x00, 0x08, 0x00, 0x00,
229 /* offset info for MAC + IPv4 + UDP dummy packet */
230 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
232 { ICE_ETYPE_OL, 12 },
233 { ICE_IPV4_OFOS, 14 },
234 { ICE_UDP_ILOS, 34 },
235 { ICE_PROTOCOL_LAST, 0 },
238 /* Dummy packet for MAC + IPv4 + UDP */
239 static const u8 dummy_udp_packet[] = {
240 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
241 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00,
244 0x08, 0x00, /* ICE_ETYPE_OL 12 */
246 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
247 0x00, 0x01, 0x00, 0x00,
248 0x00, 0x11, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
253 0x00, 0x08, 0x00, 0x00,
255 0x00, 0x00, /* 2 bytes for 4 byte alignment */
258 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
259 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
261 { ICE_ETYPE_OL, 12 },
262 { ICE_VLAN_OFOS, 14 },
263 { ICE_IPV4_OFOS, 18 },
264 { ICE_UDP_ILOS, 38 },
265 { ICE_PROTOCOL_LAST, 0 },
268 /* C-tag (801.1Q), IPv4:UDP dummy packet */
269 static const u8 dummy_vlan_udp_packet[] = {
270 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x81, 0x00, /* ICE_ETYPE_OL 12 */
276 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
278 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
279 0x00, 0x01, 0x00, 0x00,
280 0x00, 0x11, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
285 0x00, 0x08, 0x00, 0x00,
287 0x00, 0x00, /* 2 bytes for 4 byte alignment */
290 /* offset info for MAC + IPv4 + TCP dummy packet */
291 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
293 { ICE_ETYPE_OL, 12 },
294 { ICE_IPV4_OFOS, 14 },
296 { ICE_PROTOCOL_LAST, 0 },
299 /* Dummy packet for MAC + IPv4 + TCP */
300 static const u8 dummy_tcp_packet[] = {
301 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x08, 0x00, /* ICE_ETYPE_OL 12 */
307 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
308 0x00, 0x01, 0x00, 0x00,
309 0x00, 0x06, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
314 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
316 0x50, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
319 0x00, 0x00, /* 2 bytes for 4 byte alignment */
322 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
323 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
325 { ICE_ETYPE_OL, 12 },
326 { ICE_VLAN_OFOS, 14 },
327 { ICE_IPV4_OFOS, 18 },
329 { ICE_PROTOCOL_LAST, 0 },
332 /* C-tag (801.1Q), IPv4:TCP dummy packet */
333 static const u8 dummy_vlan_tcp_packet[] = {
334 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
338 0x81, 0x00, /* ICE_ETYPE_OL 12 */
340 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
342 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
343 0x00, 0x01, 0x00, 0x00,
344 0x00, 0x06, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
349 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
351 0x50, 0x00, 0x00, 0x00,
352 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, /* 2 bytes for 4 byte alignment */
357 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
359 { ICE_ETYPE_OL, 12 },
360 { ICE_IPV6_OFOS, 14 },
362 { ICE_PROTOCOL_LAST, 0 },
365 static const u8 dummy_tcp_ipv6_packet[] = {
366 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
367 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00,
370 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
372 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
373 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
386 0x50, 0x00, 0x00, 0x00,
387 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, /* 2 bytes for 4 byte alignment */
392 /* C-tag (802.1Q): IPv6 + TCP */
393 static const struct ice_dummy_pkt_offsets
394 dummy_vlan_tcp_ipv6_packet_offsets[] = {
396 { ICE_ETYPE_OL, 12 },
397 { ICE_VLAN_OFOS, 14 },
398 { ICE_IPV6_OFOS, 18 },
400 { ICE_PROTOCOL_LAST, 0 },
403 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
404 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
405 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x81, 0x00, /* ICE_ETYPE_OL 12 */
411 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
413 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
414 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
425 0x00, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
427 0x50, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
430 0x00, 0x00, /* 2 bytes for 4 byte alignment */
434 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
436 { ICE_ETYPE_OL, 12 },
437 { ICE_IPV6_OFOS, 14 },
438 { ICE_UDP_ILOS, 54 },
439 { ICE_PROTOCOL_LAST, 0 },
442 /* IPv6 + UDP dummy packet */
443 static const u8 dummy_udp_ipv6_packet[] = {
444 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
448 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
450 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
451 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00,
461 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
462 0x00, 0x10, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
465 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, /* 2 bytes for 4 byte alignment */
470 /* C-tag (802.1Q): IPv6 + UDP */
471 static const struct ice_dummy_pkt_offsets
472 dummy_vlan_udp_ipv6_packet_offsets[] = {
474 { ICE_ETYPE_OL, 12 },
475 { ICE_VLAN_OFOS, 14 },
476 { ICE_IPV6_OFOS, 18 },
477 { ICE_UDP_ILOS, 58 },
478 { ICE_PROTOCOL_LAST, 0 },
481 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
482 static const u8 dummy_vlan_udp_ipv6_packet[] = {
483 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
484 0x00, 0x00, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00,
487 0x81, 0x00, /* ICE_ETYPE_OL 12 */
489 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
491 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
492 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00,
502 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
503 0x00, 0x08, 0x00, 0x00,
505 0x00, 0x00, /* 2 bytes for 4 byte alignment */
508 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
509 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
511 { ICE_IPV4_OFOS, 14 },
516 { ICE_PROTOCOL_LAST, 0 },
519 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
520 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
525 0x45, 0x00, 0x00, 0x58, /* IP 14 */
526 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x11, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x00,
529 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
532 0x00, 0x44, 0x00, 0x00,
534 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
535 0x00, 0x00, 0x00, 0x00,
536 0x00, 0x00, 0x00, 0x85,
538 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
539 0x00, 0x00, 0x00, 0x00,
541 0x45, 0x00, 0x00, 0x28, /* IP 62 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x06, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
548 0x00, 0x00, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x50, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
553 0x00, 0x00, /* 2 bytes for 4 byte alignment */
556 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
557 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
559 { ICE_IPV4_OFOS, 14 },
563 { ICE_UDP_ILOS, 82 },
564 { ICE_PROTOCOL_LAST, 0 },
567 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
568 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
573 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
574 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x11, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
580 0x00, 0x38, 0x00, 0x00,
582 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x85,
586 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
587 0x00, 0x00, 0x00, 0x00,
589 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x11, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00,
595 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
596 0x00, 0x08, 0x00, 0x00,
598 0x00, 0x00, /* 2 bytes for 4 byte alignment */
601 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
602 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
604 { ICE_IPV4_OFOS, 14 },
609 { ICE_PROTOCOL_LAST, 0 },
612 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
613 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
618 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x11, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
625 0x00, 0x58, 0x00, 0x00,
627 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
628 0x00, 0x00, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x85,
631 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
632 0x00, 0x00, 0x00, 0x00,
634 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
635 0x00, 0x14, 0x06, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x50, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 byte alignment */
654 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
656 { ICE_IPV4_OFOS, 14 },
660 { ICE_UDP_ILOS, 102 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
670 0x45, 0x00, 0x00, 0x60, /* IP 14 */
671 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x11, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00,
676 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
677 0x00, 0x4c, 0x00, 0x00,
679 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x85,
683 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
684 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
687 0x00, 0x08, 0x11, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
698 0x00, 0x08, 0x00, 0x00,
700 0x00, 0x00, /* 2 bytes for 4 byte alignment */
703 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
705 { ICE_IPV6_OFOS, 14 },
710 { ICE_PROTOCOL_LAST, 0 },
713 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
714 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
719 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
720 0x00, 0x44, 0x11, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
731 0x00, 0x44, 0x00, 0x00,
733 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
734 0x00, 0x00, 0x00, 0x00,
735 0x00, 0x00, 0x00, 0x85,
737 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
738 0x00, 0x00, 0x00, 0x00,
740 0x45, 0x00, 0x00, 0x28, /* IP 82 */
741 0x00, 0x00, 0x00, 0x00,
742 0x00, 0x06, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x50, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, /* 2 bytes for 4 byte alignment */
755 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
757 { ICE_IPV6_OFOS, 14 },
761 { ICE_UDP_ILOS, 102 },
762 { ICE_PROTOCOL_LAST, 0 },
765 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
766 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
771 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
772 0x00, 0x38, 0x11, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
783 0x00, 0x38, 0x00, 0x00,
785 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
786 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x85,
789 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
790 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
793 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x11, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
799 0x00, 0x08, 0x00, 0x00,
801 0x00, 0x00, /* 2 bytes for 4 byte alignment */
804 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
806 { ICE_IPV6_OFOS, 14 },
811 { ICE_PROTOCOL_LAST, 0 },
814 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
815 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
820 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
821 0x00, 0x58, 0x11, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, 0x00, 0x00,
831 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
832 0x00, 0x58, 0x00, 0x00,
834 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
835 0x00, 0x00, 0x00, 0x00,
836 0x00, 0x00, 0x00, 0x85,
838 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
839 0x00, 0x00, 0x00, 0x00,
841 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
842 0x00, 0x14, 0x06, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
855 0x50, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x00, 0x00,
858 0x00, 0x00, /* 2 bytes for 4 byte alignment */
861 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
863 { ICE_IPV6_OFOS, 14 },
867 { ICE_UDP_ILOS, 102 },
868 { ICE_PROTOCOL_LAST, 0 },
871 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
872 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
877 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
878 0x00, 0x4c, 0x11, 0x00,
879 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
889 0x00, 0x4c, 0x00, 0x00,
891 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
892 0x00, 0x00, 0x00, 0x00,
893 0x00, 0x00, 0x00, 0x85,
895 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
896 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
899 0x00, 0x08, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
910 0x00, 0x08, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 byte alignment */
915 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
917 { ICE_IPV4_OFOS, 14 },
921 { ICE_PROTOCOL_LAST, 0 },
924 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
925 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
930 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
931 0x00, 0x00, 0x40, 0x00,
932 0x40, 0x11, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
936 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
937 0x00, 0x00, 0x00, 0x00,
939 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
940 0x00, 0x00, 0x00, 0x00,
941 0x00, 0x00, 0x00, 0x85,
943 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
944 0x00, 0x00, 0x00, 0x00,
946 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
947 0x00, 0x00, 0x40, 0x00,
948 0x40, 0x00, 0x00, 0x00,
949 0x00, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
955 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
957 { ICE_IPV4_OFOS, 14 },
961 { ICE_PROTOCOL_LAST, 0 },
964 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
965 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
966 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00,
970 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
971 0x00, 0x00, 0x40, 0x00,
972 0x40, 0x11, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
976 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
977 0x00, 0x00, 0x00, 0x00,
979 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
980 0x00, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x85,
983 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
984 0x00, 0x00, 0x00, 0x00,
986 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
987 0x00, 0x00, 0x3b, 0x00,
988 0x00, 0x00, 0x00, 0x00,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
1001 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1002 { ICE_MAC_OFOS, 0 },
1003 { ICE_IPV6_OFOS, 14 },
1006 { ICE_IPV4_IL, 82 },
1007 { ICE_PROTOCOL_LAST, 0 },
1010 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1011 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x00,
1016 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1017 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1018 0x00, 0x00, 0x00, 0x00,
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1027 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1028 0x00, 0x00, 0x00, 0x00,
1030 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1031 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x85,
1034 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1035 0x00, 0x00, 0x00, 0x00,
1037 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1038 0x00, 0x00, 0x40, 0x00,
1039 0x40, 0x00, 0x00, 0x00,
1040 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00,
1047 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1048 { ICE_MAC_OFOS, 0 },
1049 { ICE_IPV6_OFOS, 14 },
1052 { ICE_IPV6_IL, 82 },
1053 { ICE_PROTOCOL_LAST, 0 },
1056 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1057 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1058 0x00, 0x00, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00,
1062 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1063 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1064 0x00, 0x00, 0x00, 0x00,
1065 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x00,
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1069 0x00, 0x00, 0x00, 0x00,
1070 0x00, 0x00, 0x00, 0x00,
1071 0x00, 0x00, 0x00, 0x00,
1073 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1074 0x00, 0x00, 0x00, 0x00,
1076 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1077 0x00, 0x00, 0x00, 0x00,
1078 0x00, 0x00, 0x00, 0x85,
1080 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1081 0x00, 0x00, 0x00, 0x00,
1083 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1084 0x00, 0x00, 0x3b, 0x00,
1085 0x00, 0x00, 0x00, 0x00,
1086 0x00, 0x00, 0x00, 0x00,
1087 0x00, 0x00, 0x00, 0x00,
1088 0x00, 0x00, 0x00, 0x00,
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1092 0x00, 0x00, 0x00, 0x00,
1097 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1098 { ICE_MAC_OFOS, 0 },
1099 { ICE_IPV4_OFOS, 14 },
1102 { ICE_PROTOCOL_LAST, 0 },
1105 static const u8 dummy_udp_gtp_packet[] = {
1106 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1107 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x00, 0x00,
1111 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x11, 0x00, 0x00,
1114 0x00, 0x00, 0x00, 0x00,
1115 0x00, 0x00, 0x00, 0x00,
1117 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1118 0x00, 0x1c, 0x00, 0x00,
1120 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1121 0x00, 0x00, 0x00, 0x00,
1122 0x00, 0x00, 0x00, 0x85,
1124 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1125 0x00, 0x00, 0x00, 0x00,
1128 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1129 { ICE_MAC_OFOS, 0 },
1130 { ICE_IPV4_OFOS, 14 },
1132 { ICE_GTP_NO_PAY, 42 },
1133 { ICE_PROTOCOL_LAST, 0 },
1137 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1138 { ICE_MAC_OFOS, 0 },
1139 { ICE_IPV6_OFOS, 14 },
1141 { ICE_GTP_NO_PAY, 62 },
1142 { ICE_PROTOCOL_LAST, 0 },
1145 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1146 { ICE_MAC_OFOS, 0 },
1147 { ICE_ETYPE_OL, 12 },
1148 { ICE_VLAN_OFOS, 14},
1150 { ICE_PROTOCOL_LAST, 0 },
1153 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1154 { ICE_MAC_OFOS, 0 },
1155 { ICE_ETYPE_OL, 12 },
1156 { ICE_VLAN_OFOS, 14},
1158 { ICE_IPV4_OFOS, 26 },
1159 { ICE_PROTOCOL_LAST, 0 },
1162 static const u8 dummy_pppoe_ipv4_packet[] = {
1163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1164 0x00, 0x00, 0x00, 0x00,
1165 0x00, 0x00, 0x00, 0x00,
1167 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1169 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1171 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1174 0x00, 0x21, /* PPP Link Layer 24 */
1176 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1177 0x00, 0x00, 0x00, 0x00,
1178 0x00, 0x00, 0x00, 0x00,
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1182 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1186 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1187 { ICE_MAC_OFOS, 0 },
1188 { ICE_ETYPE_OL, 12 },
1189 { ICE_VLAN_OFOS, 14},
1191 { ICE_IPV4_OFOS, 26 },
1193 { ICE_PROTOCOL_LAST, 0 },
1196 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1197 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1198 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00,
1201 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1203 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1205 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1208 0x00, 0x21, /* PPP Link Layer 24 */
1210 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1211 0x00, 0x01, 0x00, 0x00,
1212 0x00, 0x06, 0x00, 0x00,
1213 0x00, 0x00, 0x00, 0x00,
1214 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1217 0x00, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00,
1219 0x50, 0x00, 0x00, 0x00,
1220 0x00, 0x00, 0x00, 0x00,
1222 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1226 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1227 { ICE_MAC_OFOS, 0 },
1228 { ICE_ETYPE_OL, 12 },
1229 { ICE_VLAN_OFOS, 14},
1231 { ICE_IPV4_OFOS, 26 },
1232 { ICE_UDP_ILOS, 46 },
1233 { ICE_PROTOCOL_LAST, 0 },
1236 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1237 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1238 0x00, 0x00, 0x00, 0x00,
1239 0x00, 0x00, 0x00, 0x00,
1241 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1243 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1245 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1248 0x00, 0x21, /* PPP Link Layer 24 */
1250 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1251 0x00, 0x01, 0x00, 0x00,
1252 0x00, 0x11, 0x00, 0x00,
1253 0x00, 0x00, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1257 0x00, 0x08, 0x00, 0x00,
1259 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1262 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1263 { ICE_MAC_OFOS, 0 },
1264 { ICE_ETYPE_OL, 12 },
1265 { ICE_VLAN_OFOS, 14},
1267 { ICE_IPV6_OFOS, 26 },
1268 { ICE_PROTOCOL_LAST, 0 },
1271 static const u8 dummy_pppoe_ipv6_packet[] = {
1272 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1273 0x00, 0x00, 0x00, 0x00,
1274 0x00, 0x00, 0x00, 0x00,
1276 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1278 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1280 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1283 0x00, 0x57, /* PPP Link Layer 24 */
1285 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1286 0x00, 0x00, 0x3b, 0x00,
1287 0x00, 0x00, 0x00, 0x00,
1288 0x00, 0x00, 0x00, 0x00,
1289 0x00, 0x00, 0x00, 0x00,
1290 0x00, 0x00, 0x00, 0x00,
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1293 0x00, 0x00, 0x00, 0x00,
1294 0x00, 0x00, 0x00, 0x00,
1296 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1300 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1301 { ICE_MAC_OFOS, 0 },
1302 { ICE_ETYPE_OL, 12 },
1303 { ICE_VLAN_OFOS, 14},
1305 { ICE_IPV6_OFOS, 26 },
1307 { ICE_PROTOCOL_LAST, 0 },
1310 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1311 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1312 0x00, 0x00, 0x00, 0x00,
1313 0x00, 0x00, 0x00, 0x00,
1315 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1317 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1319 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1322 0x00, 0x57, /* PPP Link Layer 24 */
1324 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1325 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1326 0x00, 0x00, 0x00, 0x00,
1327 0x00, 0x00, 0x00, 0x00,
1328 0x00, 0x00, 0x00, 0x00,
1329 0x00, 0x00, 0x00, 0x00,
1330 0x00, 0x00, 0x00, 0x00,
1331 0x00, 0x00, 0x00, 0x00,
1332 0x00, 0x00, 0x00, 0x00,
1333 0x00, 0x00, 0x00, 0x00,
1335 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1336 0x00, 0x00, 0x00, 0x00,
1337 0x00, 0x00, 0x00, 0x00,
1338 0x50, 0x00, 0x00, 0x00,
1339 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1345 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1346 { ICE_MAC_OFOS, 0 },
1347 { ICE_ETYPE_OL, 12 },
1348 { ICE_VLAN_OFOS, 14},
1350 { ICE_IPV6_OFOS, 26 },
1351 { ICE_UDP_ILOS, 66 },
1352 { ICE_PROTOCOL_LAST, 0 },
1355 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1356 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1357 0x00, 0x00, 0x00, 0x00,
1358 0x00, 0x00, 0x00, 0x00,
1360 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1362 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1364 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1367 0x00, 0x57, /* PPP Link Layer 24 */
1369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1371 0x00, 0x00, 0x00, 0x00,
1372 0x00, 0x00, 0x00, 0x00,
1373 0x00, 0x00, 0x00, 0x00,
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1381 0x00, 0x08, 0x00, 0x00,
1383 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1386 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1387 { ICE_MAC_OFOS, 0 },
1388 { ICE_IPV4_OFOS, 14 },
1390 { ICE_PROTOCOL_LAST, 0 },
1393 static const u8 dummy_ipv4_esp_pkt[] = {
1394 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1395 0x00, 0x00, 0x00, 0x00,
1396 0x00, 0x00, 0x00, 0x00,
1399 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1400 0x00, 0x00, 0x40, 0x00,
1401 0x40, 0x32, 0x00, 0x00,
1402 0x00, 0x00, 0x00, 0x00,
1403 0x00, 0x00, 0x00, 0x00,
1405 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1406 0x00, 0x00, 0x00, 0x00,
1407 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1410 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1411 { ICE_MAC_OFOS, 0 },
1412 { ICE_IPV6_OFOS, 14 },
1414 { ICE_PROTOCOL_LAST, 0 },
1417 static const u8 dummy_ipv6_esp_pkt[] = {
1418 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1419 0x00, 0x00, 0x00, 0x00,
1420 0x00, 0x00, 0x00, 0x00,
1423 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1424 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1425 0x00, 0x00, 0x00, 0x00,
1426 0x00, 0x00, 0x00, 0x00,
1427 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00,
1429 0x00, 0x00, 0x00, 0x00,
1430 0x00, 0x00, 0x00, 0x00,
1431 0x00, 0x00, 0x00, 0x00,
1432 0x00, 0x00, 0x00, 0x00,
1434 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1435 0x00, 0x00, 0x00, 0x00,
1436 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1439 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1440 { ICE_MAC_OFOS, 0 },
1441 { ICE_IPV4_OFOS, 14 },
1443 { ICE_PROTOCOL_LAST, 0 },
1446 static const u8 dummy_ipv4_ah_pkt[] = {
1447 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1448 0x00, 0x00, 0x00, 0x00,
1449 0x00, 0x00, 0x00, 0x00,
1452 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1453 0x00, 0x00, 0x40, 0x00,
1454 0x40, 0x33, 0x00, 0x00,
1455 0x00, 0x00, 0x00, 0x00,
1456 0x00, 0x00, 0x00, 0x00,
1458 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1459 0x00, 0x00, 0x00, 0x00,
1460 0x00, 0x00, 0x00, 0x00,
1461 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1464 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1465 { ICE_MAC_OFOS, 0 },
1466 { ICE_IPV6_OFOS, 14 },
1468 { ICE_PROTOCOL_LAST, 0 },
1471 static const u8 dummy_ipv6_ah_pkt[] = {
1472 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1473 0x00, 0x00, 0x00, 0x00,
1474 0x00, 0x00, 0x00, 0x00,
1477 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1478 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1479 0x00, 0x00, 0x00, 0x00,
1480 0x00, 0x00, 0x00, 0x00,
1481 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, 0x00, 0x00,
1485 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x00,
1488 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1489 0x00, 0x00, 0x00, 0x00,
1490 0x00, 0x00, 0x00, 0x00,
1491 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1494 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1495 { ICE_MAC_OFOS, 0 },
1496 { ICE_IPV4_OFOS, 14 },
1497 { ICE_UDP_ILOS, 34 },
1499 { ICE_PROTOCOL_LAST, 0 },
1502 static const u8 dummy_ipv4_nat_pkt[] = {
1503 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1504 0x00, 0x00, 0x00, 0x00,
1505 0x00, 0x00, 0x00, 0x00,
1508 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1509 0x00, 0x00, 0x40, 0x00,
1510 0x40, 0x11, 0x00, 0x00,
1511 0x00, 0x00, 0x00, 0x00,
1512 0x00, 0x00, 0x00, 0x00,
1514 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1515 0x00, 0x00, 0x00, 0x00,
1517 0x00, 0x00, 0x00, 0x00,
1518 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1522 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1523 { ICE_MAC_OFOS, 0 },
1524 { ICE_IPV6_OFOS, 14 },
1525 { ICE_UDP_ILOS, 54 },
1527 { ICE_PROTOCOL_LAST, 0 },
1530 static const u8 dummy_ipv6_nat_pkt[] = {
1531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1532 0x00, 0x00, 0x00, 0x00,
1533 0x00, 0x00, 0x00, 0x00,
1536 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1537 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1538 0x00, 0x00, 0x00, 0x00,
1539 0x00, 0x00, 0x00, 0x00,
1540 0x00, 0x00, 0x00, 0x00,
1541 0x00, 0x00, 0x00, 0x00,
1542 0x00, 0x00, 0x00, 0x00,
1543 0x00, 0x00, 0x00, 0x00,
1544 0x00, 0x00, 0x00, 0x00,
1545 0x00, 0x00, 0x00, 0x00,
1547 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1548 0x00, 0x00, 0x00, 0x00,
1550 0x00, 0x00, 0x00, 0x00,
1551 0x00, 0x00, 0x00, 0x00,
1552 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1556 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1557 { ICE_MAC_OFOS, 0 },
1558 { ICE_IPV4_OFOS, 14 },
1560 { ICE_PROTOCOL_LAST, 0 },
1563 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1564 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1565 0x00, 0x00, 0x00, 0x00,
1566 0x00, 0x00, 0x00, 0x00,
1569 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1570 0x00, 0x00, 0x40, 0x00,
1571 0x40, 0x73, 0x00, 0x00,
1572 0x00, 0x00, 0x00, 0x00,
1573 0x00, 0x00, 0x00, 0x00,
1575 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1576 0x00, 0x00, 0x00, 0x00,
1577 0x00, 0x00, 0x00, 0x00,
1578 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1581 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1582 { ICE_MAC_OFOS, 0 },
1583 { ICE_IPV6_OFOS, 14 },
1585 { ICE_PROTOCOL_LAST, 0 },
1588 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1590 0x00, 0x00, 0x00, 0x00,
1591 0x00, 0x00, 0x00, 0x00,
1594 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1595 0x00, 0x0c, 0x73, 0x40,
1596 0x00, 0x00, 0x00, 0x00,
1597 0x00, 0x00, 0x00, 0x00,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, 0x00, 0x00,
1601 0x00, 0x00, 0x00, 0x00,
1602 0x00, 0x00, 0x00, 0x00,
1603 0x00, 0x00, 0x00, 0x00,
1605 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1606 0x00, 0x00, 0x00, 0x00,
1607 0x00, 0x00, 0x00, 0x00,
1608 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1611 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1612 { ICE_MAC_OFOS, 0 },
1613 { ICE_ETYPE_OL, 12 },
1614 { ICE_VLAN_EX, 14 },
1615 { ICE_VLAN_OFOS, 18 },
1616 { ICE_IPV4_OFOS, 22 },
1617 { ICE_PROTOCOL_LAST, 0 },
1620 static const u8 dummy_qinq_ipv4_pkt[] = {
1621 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1622 0x00, 0x00, 0x00, 0x00,
1623 0x00, 0x00, 0x00, 0x00,
1625 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1627 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1628 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1630 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1631 0x00, 0x01, 0x00, 0x00,
1632 0x00, 0x11, 0x00, 0x00,
1633 0x00, 0x00, 0x00, 0x00,
1634 0x00, 0x00, 0x00, 0x00,
1636 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1637 0x00, 0x08, 0x00, 0x00,
1639 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1642 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1643 { ICE_MAC_OFOS, 0 },
1644 { ICE_ETYPE_OL, 12 },
1645 { ICE_VLAN_EX, 14 },
1646 { ICE_VLAN_OFOS, 18 },
1647 { ICE_IPV6_OFOS, 22 },
1648 { ICE_PROTOCOL_LAST, 0 },
1651 static const u8 dummy_qinq_ipv6_pkt[] = {
1652 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1653 0x00, 0x00, 0x00, 0x00,
1654 0x00, 0x00, 0x00, 0x00,
1656 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1658 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1659 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1661 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1662 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1663 0x00, 0x00, 0x00, 0x00,
1664 0x00, 0x00, 0x00, 0x00,
1665 0x00, 0x00, 0x00, 0x00,
1666 0x00, 0x00, 0x00, 0x00,
1667 0x00, 0x00, 0x00, 0x00,
1668 0x00, 0x00, 0x00, 0x00,
1669 0x00, 0x00, 0x00, 0x00,
1670 0x00, 0x00, 0x00, 0x00,
1672 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1673 0x00, 0x10, 0x00, 0x00,
1675 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1676 0x00, 0x00, 0x00, 0x00,
1678 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1681 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1682 { ICE_MAC_OFOS, 0 },
1683 { ICE_ETYPE_OL, 12 },
1684 { ICE_VLAN_EX, 14 },
1685 { ICE_VLAN_OFOS, 18 },
1687 { ICE_PROTOCOL_LAST, 0 },
1691 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1692 { ICE_MAC_OFOS, 0 },
1693 { ICE_ETYPE_OL, 12 },
1694 { ICE_VLAN_EX, 14 },
1695 { ICE_VLAN_OFOS, 18 },
1697 { ICE_IPV4_OFOS, 30 },
1698 { ICE_PROTOCOL_LAST, 0 },
1701 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1702 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1703 0x00, 0x00, 0x00, 0x00,
1704 0x00, 0x00, 0x00, 0x00,
1706 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1708 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1709 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1711 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1714 0x00, 0x21, /* PPP Link Layer 28 */
1716 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1717 0x00, 0x00, 0x00, 0x00,
1718 0x00, 0x00, 0x00, 0x00,
1719 0x00, 0x00, 0x00, 0x00,
1720 0x00, 0x00, 0x00, 0x00,
1722 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1726 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1727 { ICE_MAC_OFOS, 0 },
1728 { ICE_ETYPE_OL, 12 },
1730 { ICE_VLAN_OFOS, 18 },
1732 { ICE_IPV6_OFOS, 30 },
1733 { ICE_PROTOCOL_LAST, 0 },
1736 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1737 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1738 0x00, 0x00, 0x00, 0x00,
1739 0x00, 0x00, 0x00, 0x00,
1741 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1743 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1744 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1746 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1749 0x00, 0x57, /* PPP Link Layer 28*/
1751 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1752 0x00, 0x00, 0x3b, 0x00,
1753 0x00, 0x00, 0x00, 0x00,
1754 0x00, 0x00, 0x00, 0x00,
1755 0x00, 0x00, 0x00, 0x00,
1756 0x00, 0x00, 0x00, 0x00,
1757 0x00, 0x00, 0x00, 0x00,
1758 0x00, 0x00, 0x00, 0x00,
1759 0x00, 0x00, 0x00, 0x00,
1760 0x00, 0x00, 0x00, 0x00,
1762 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1765 /* this is a recipe to profile association bitmap */
1766 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1767 ICE_MAX_NUM_PROFILES);
1769 /* this is a profile to recipe association bitmap */
1770 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1771 ICE_MAX_NUM_RECIPES);
1773 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1776 * ice_collect_result_idx - copy result index values
1777 * @buf: buffer that contains the result index
1778 * @recp: the recipe struct to copy data into
1780 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1781 struct ice_sw_recipe *recp)
1783 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1784 ice_set_bit(buf->content.result_indx &
1785 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1789 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1790 * @rid: recipe ID that we are populating
1792 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1794 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1795 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1796 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1797 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1798 enum ice_sw_tunnel_type tun_type;
1799 u16 i, j, profile_num = 0;
1800 bool non_tun_valid = false;
1801 bool pppoe_valid = false;
1802 bool vxlan_valid = false;
1803 bool gre_valid = false;
1804 bool gtp_valid = false;
1805 bool flag_valid = false;
1807 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1808 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1813 for (i = 0; i < 12; i++) {
1814 if (gre_profile[i] == j)
1818 for (i = 0; i < 12; i++) {
1819 if (vxlan_profile[i] == j)
1823 for (i = 0; i < 7; i++) {
1824 if (pppoe_profile[i] == j)
1828 for (i = 0; i < 6; i++) {
1829 if (non_tun_profile[i] == j)
1830 non_tun_valid = true;
1833 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1834 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1837 if ((j >= ICE_PROFID_IPV4_ESP &&
1838 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1839 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1840 j <= ICE_PROFID_IPV6_GTPU_TEID))
1844 if (!non_tun_valid && vxlan_valid)
1845 tun_type = ICE_SW_TUN_VXLAN;
1846 else if (!non_tun_valid && gre_valid)
1847 tun_type = ICE_SW_TUN_NVGRE;
1848 else if (!non_tun_valid && pppoe_valid)
1849 tun_type = ICE_SW_TUN_PPPOE;
1850 else if (!non_tun_valid && gtp_valid)
1851 tun_type = ICE_SW_TUN_GTP;
1852 else if (non_tun_valid &&
1853 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1854 tun_type = ICE_SW_TUN_AND_NON_TUN;
1855 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1857 tun_type = ICE_NON_TUN;
1859 tun_type = ICE_NON_TUN;
1861 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1862 i = ice_is_bit_set(recipe_to_profile[rid],
1863 ICE_PROFID_PPPOE_IPV4_OTHER);
1864 j = ice_is_bit_set(recipe_to_profile[rid],
1865 ICE_PROFID_PPPOE_IPV6_OTHER);
1867 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1869 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1872 if (tun_type == ICE_SW_TUN_GTP) {
1873 if (ice_is_bit_set(recipe_to_profile[rid],
1874 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1875 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1876 else if (ice_is_bit_set(recipe_to_profile[rid],
1877 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1878 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1879 else if (ice_is_bit_set(recipe_to_profile[rid],
1880 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1881 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1882 else if (ice_is_bit_set(recipe_to_profile[rid],
1883 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1884 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1887 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1888 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1889 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1891 case ICE_PROFID_IPV4_TCP:
1892 tun_type = ICE_SW_IPV4_TCP;
1894 case ICE_PROFID_IPV4_UDP:
1895 tun_type = ICE_SW_IPV4_UDP;
1897 case ICE_PROFID_IPV6_TCP:
1898 tun_type = ICE_SW_IPV6_TCP;
1900 case ICE_PROFID_IPV6_UDP:
1901 tun_type = ICE_SW_IPV6_UDP;
1903 case ICE_PROFID_PPPOE_PAY:
1904 tun_type = ICE_SW_TUN_PPPOE_PAY;
1906 case ICE_PROFID_PPPOE_IPV4_TCP:
1907 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1909 case ICE_PROFID_PPPOE_IPV4_UDP:
1910 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1912 case ICE_PROFID_PPPOE_IPV4_OTHER:
1913 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1915 case ICE_PROFID_PPPOE_IPV6_TCP:
1916 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1918 case ICE_PROFID_PPPOE_IPV6_UDP:
1919 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1921 case ICE_PROFID_PPPOE_IPV6_OTHER:
1922 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1924 case ICE_PROFID_IPV4_ESP:
1925 tun_type = ICE_SW_TUN_IPV4_ESP;
1927 case ICE_PROFID_IPV6_ESP:
1928 tun_type = ICE_SW_TUN_IPV6_ESP;
1930 case ICE_PROFID_IPV4_AH:
1931 tun_type = ICE_SW_TUN_IPV4_AH;
1933 case ICE_PROFID_IPV6_AH:
1934 tun_type = ICE_SW_TUN_IPV6_AH;
1936 case ICE_PROFID_IPV4_NAT_T:
1937 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1939 case ICE_PROFID_IPV6_NAT_T:
1940 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1942 case ICE_PROFID_IPV4_PFCP_NODE:
1944 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1946 case ICE_PROFID_IPV6_PFCP_NODE:
1948 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1950 case ICE_PROFID_IPV4_PFCP_SESSION:
1952 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1954 case ICE_PROFID_IPV6_PFCP_SESSION:
1956 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1958 case ICE_PROFID_MAC_IPV4_L2TPV3:
1959 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1961 case ICE_PROFID_MAC_IPV6_L2TPV3:
1962 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1964 case ICE_PROFID_IPV4_GTPU_TEID:
1965 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1967 case ICE_PROFID_IPV6_GTPU_TEID:
1968 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1979 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1980 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1981 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1982 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1983 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1984 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1985 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1986 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1987 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1988 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1989 else if (vlan && tun_type == ICE_NON_TUN)
1990 tun_type = ICE_NON_TUN_QINQ;
1996 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1997 * @hw: pointer to hardware structure
1998 * @recps: struct that we need to populate
1999 * @rid: recipe ID that we are populating
2000 * @refresh_required: true if we should get recipe to profile mapping from FW
2002 * This function is used to populate all the necessary entries into our
2003 * bookkeeping so that we have a current list of all the recipes that are
2004 * programmed in the firmware.
2006 static enum ice_status
2007 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2008 bool *refresh_required)
2010 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2011 struct ice_aqc_recipe_data_elem *tmp;
2012 u16 num_recps = ICE_MAX_NUM_RECIPES;
2013 struct ice_prot_lkup_ext *lkup_exts;
2014 enum ice_status status;
2019 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2021 /* we need a buffer big enough to accommodate all the recipes */
2022 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2023 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2025 return ICE_ERR_NO_MEMORY;
2027 tmp[0].recipe_indx = rid;
2028 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2029 /* non-zero status meaning recipe doesn't exist */
2033 /* Get recipe to profile map so that we can get the fv from lkups that
2034 * we read for a recipe from FW. Since we want to minimize the number of
2035 * times we make this FW call, just make one call and cache the copy
2036 * until a new recipe is added. This operation is only required the
2037 * first time to get the changes from FW. Then to search existing
2038 * entries we don't need to update the cache again until another recipe
2041 if (*refresh_required) {
2042 ice_get_recp_to_prof_map(hw);
2043 *refresh_required = false;
2046 /* Start populating all the entries for recps[rid] based on lkups from
2047 * firmware. Note that we are only creating the root recipe in our
2050 lkup_exts = &recps[rid].lkup_exts;
2052 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2053 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2054 struct ice_recp_grp_entry *rg_entry;
2055 u8 i, prof, idx, prot = 0;
2059 rg_entry = (struct ice_recp_grp_entry *)
2060 ice_malloc(hw, sizeof(*rg_entry));
2062 status = ICE_ERR_NO_MEMORY;
2066 idx = root_bufs.recipe_indx;
2067 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2069 /* Mark all result indices in this chain */
2070 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2071 ice_set_bit(root_bufs.content.result_indx &
2072 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2074 /* get the first profile that is associated with rid */
2075 prof = ice_find_first_bit(recipe_to_profile[idx],
2076 ICE_MAX_NUM_PROFILES);
2077 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2078 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2080 rg_entry->fv_idx[i] = lkup_indx;
2081 rg_entry->fv_mask[i] =
2082 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2084 /* If the recipe is a chained recipe then all its
2085 * child recipe's result will have a result index.
2086 * To fill fv_words we should not use those result
2087 * index, we only need the protocol ids and offsets.
2088 * We will skip all the fv_idx which stores result
2089 * index in them. We also need to skip any fv_idx which
2090 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2091 * valid offset value.
2093 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2094 rg_entry->fv_idx[i]) ||
2095 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2096 rg_entry->fv_idx[i] == 0)
2099 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2100 rg_entry->fv_idx[i], &prot, &off);
2101 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2102 lkup_exts->fv_words[fv_word_idx].off = off;
2103 lkup_exts->field_mask[fv_word_idx] =
2104 rg_entry->fv_mask[i];
2105 if (prot == ICE_META_DATA_ID_HW &&
2106 off == ICE_TUN_FLAG_MDID_OFF)
2110 /* populate rg_list with the data from the child entry of this
2113 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2115 /* Propagate some data to the recipe database */
2116 recps[idx].is_root = !!is_root;
2117 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2118 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2119 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2120 recps[idx].chain_idx = root_bufs.content.result_indx &
2121 ~ICE_AQ_RECIPE_RESULT_EN;
2122 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2124 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2130 /* Only do the following for root recipes entries */
2131 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2132 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2133 recps[idx].root_rid = root_bufs.content.rid &
2134 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2135 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2138 /* Complete initialization of the root recipe entry */
2139 lkup_exts->n_val_words = fv_word_idx;
2140 recps[rid].big_recp = (num_recps > 1);
2141 recps[rid].n_grp_count = (u8)num_recps;
2142 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2143 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2144 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2145 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2146 if (!recps[rid].root_buf)
2149 /* Copy result indexes */
2150 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2151 recps[rid].recp_created = true;
2159 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2160 * @hw: pointer to hardware structure
2162 * This function is used to populate recipe_to_profile matrix where index to
2163 * this array is the recipe ID and the element is the mapping of which profiles
2164 * is this recipe mapped to.
2166 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2168 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2171 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2174 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2175 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2176 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2178 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2179 ICE_MAX_NUM_RECIPES);
2180 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2181 ice_set_bit(i, recipe_to_profile[j]);
2186 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2187 * @hw: pointer to the HW struct
2188 * @recp_list: pointer to sw recipe list
2190 * Allocate memory for the entire recipe table and initialize the structures/
2191 * entries corresponding to basic recipes.
2194 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2196 struct ice_sw_recipe *recps;
2199 recps = (struct ice_sw_recipe *)
2200 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2202 return ICE_ERR_NO_MEMORY;
2204 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2205 recps[i].root_rid = i;
2206 INIT_LIST_HEAD(&recps[i].filt_rules);
2207 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2208 INIT_LIST_HEAD(&recps[i].rg_list);
2209 ice_init_lock(&recps[i].filt_rule_lock);
2218 * ice_aq_get_sw_cfg - get switch configuration
2219 * @hw: pointer to the hardware structure
2220 * @buf: pointer to the result buffer
2221 * @buf_size: length of the buffer available for response
2222 * @req_desc: pointer to requested descriptor
2223 * @num_elems: pointer to number of elements
2224 * @cd: pointer to command details structure or NULL
2226 * Get switch configuration (0x0200) to be placed in buf.
2227 * This admin command returns information such as initial VSI/port number
2228 * and switch ID it belongs to.
2230 * NOTE: *req_desc is both an input/output parameter.
2231 * The caller of this function first calls this function with *request_desc set
2232 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2233 * configuration information has been returned; if non-zero (meaning not all
2234 * the information was returned), the caller should call this function again
2235 * with *req_desc set to the previous value returned by f/w to get the
2236 * next block of switch configuration information.
2238 * *num_elems is output only parameter. This reflects the number of elements
2239 * in response buffer. The caller of this function to use *num_elems while
2240 * parsing the response buffer.
2242 static enum ice_status
2243 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2244 u16 buf_size, u16 *req_desc, u16 *num_elems,
2245 struct ice_sq_cd *cd)
2247 struct ice_aqc_get_sw_cfg *cmd;
2248 struct ice_aq_desc desc;
2249 enum ice_status status;
2251 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2252 cmd = &desc.params.get_sw_conf;
2253 cmd->element = CPU_TO_LE16(*req_desc);
2255 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2257 *req_desc = LE16_TO_CPU(cmd->element);
2258 *num_elems = LE16_TO_CPU(cmd->num_elems);
2265 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2266 * @hw: pointer to the HW struct
2267 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2268 * @global_lut_id: output parameter for the RSS global LUT's ID
2270 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2272 struct ice_aqc_alloc_free_res_elem *sw_buf;
2273 enum ice_status status;
2276 buf_len = ice_struct_size(sw_buf, elem, 1);
2277 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2279 return ICE_ERR_NO_MEMORY;
2281 sw_buf->num_elems = CPU_TO_LE16(1);
2282 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2283 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2284 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2286 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2288 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2289 shared_res ? "shared" : "dedicated", status);
2290 goto ice_alloc_global_lut_exit;
2293 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2295 ice_alloc_global_lut_exit:
2296 ice_free(hw, sw_buf);
2301 * ice_free_rss_global_lut - free a RSS global LUT
2302 * @hw: pointer to the HW struct
2303 * @global_lut_id: ID of the RSS global LUT to free
2305 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2307 struct ice_aqc_alloc_free_res_elem *sw_buf;
2308 u16 buf_len, num_elems = 1;
2309 enum ice_status status;
2311 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2312 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2314 return ICE_ERR_NO_MEMORY;
2316 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2317 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2318 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2320 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2322 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2323 global_lut_id, status);
2325 ice_free(hw, sw_buf);
2330 * ice_alloc_sw - allocate resources specific to switch
2331 * @hw: pointer to the HW struct
2332 * @ena_stats: true to turn on VEB stats
2333 * @shared_res: true for shared resource, false for dedicated resource
2334 * @sw_id: switch ID returned
2335 * @counter_id: VEB counter ID returned
2337 * allocates switch resources (SWID and VEB counter) (0x0208)
2340 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2343 struct ice_aqc_alloc_free_res_elem *sw_buf;
2344 struct ice_aqc_res_elem *sw_ele;
2345 enum ice_status status;
2348 buf_len = ice_struct_size(sw_buf, elem, 1);
2349 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2351 return ICE_ERR_NO_MEMORY;
2353 /* Prepare buffer for switch ID.
2354 * The number of resource entries in buffer is passed as 1 since only a
2355 * single switch/VEB instance is allocated, and hence a single sw_id
2358 sw_buf->num_elems = CPU_TO_LE16(1);
2360 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2361 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2362 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2364 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2365 ice_aqc_opc_alloc_res, NULL);
2368 goto ice_alloc_sw_exit;
2370 sw_ele = &sw_buf->elem[0];
2371 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2374 /* Prepare buffer for VEB Counter */
2375 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2376 struct ice_aqc_alloc_free_res_elem *counter_buf;
2377 struct ice_aqc_res_elem *counter_ele;
2379 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2380 ice_malloc(hw, buf_len);
2382 status = ICE_ERR_NO_MEMORY;
2383 goto ice_alloc_sw_exit;
2386 /* The number of resource entries in buffer is passed as 1 since
2387 * only a single switch/VEB instance is allocated, and hence a
2388 * single VEB counter is requested.
2390 counter_buf->num_elems = CPU_TO_LE16(1);
2391 counter_buf->res_type =
2392 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2393 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2394 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2398 ice_free(hw, counter_buf);
2399 goto ice_alloc_sw_exit;
2401 counter_ele = &counter_buf->elem[0];
2402 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2403 ice_free(hw, counter_buf);
2407 ice_free(hw, sw_buf);
2412 * ice_free_sw - free resources specific to switch
2413 * @hw: pointer to the HW struct
2414 * @sw_id: switch ID returned
2415 * @counter_id: VEB counter ID returned
2417 * free switch resources (SWID and VEB counter) (0x0209)
2419 * NOTE: This function frees multiple resources. It continues
2420 * releasing other resources even after it encounters error.
2421 * The error code returned is the last error it encountered.
2423 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2425 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2426 enum ice_status status, ret_status;
2429 buf_len = ice_struct_size(sw_buf, elem, 1);
2430 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2432 return ICE_ERR_NO_MEMORY;
2434 /* Prepare buffer to free for switch ID res.
2435 * The number of resource entries in buffer is passed as 1 since only a
2436 * single switch/VEB instance is freed, and hence a single sw_id
2439 sw_buf->num_elems = CPU_TO_LE16(1);
2440 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2441 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2443 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2444 ice_aqc_opc_free_res, NULL);
2447 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2449 /* Prepare buffer to free for VEB Counter resource */
2450 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2451 ice_malloc(hw, buf_len);
2453 ice_free(hw, sw_buf);
2454 return ICE_ERR_NO_MEMORY;
2457 /* The number of resource entries in buffer is passed as 1 since only a
2458 * single switch/VEB instance is freed, and hence a single VEB counter
2461 counter_buf->num_elems = CPU_TO_LE16(1);
2462 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2463 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2465 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2466 ice_aqc_opc_free_res, NULL);
2468 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2469 ret_status = status;
2472 ice_free(hw, counter_buf);
2473 ice_free(hw, sw_buf);
2479 * @hw: pointer to the HW struct
2480 * @vsi_ctx: pointer to a VSI context struct
2481 * @cd: pointer to command details structure or NULL
2483 * Add a VSI context to the hardware (0x0210)
2486 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2487 struct ice_sq_cd *cd)
2489 struct ice_aqc_add_update_free_vsi_resp *res;
2490 struct ice_aqc_add_get_update_free_vsi *cmd;
2491 struct ice_aq_desc desc;
2492 enum ice_status status;
2494 cmd = &desc.params.vsi_cmd;
2495 res = &desc.params.add_update_free_vsi_res;
2497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2499 if (!vsi_ctx->alloc_from_pool)
2500 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2501 ICE_AQ_VSI_IS_VALID);
2503 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2505 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2507 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2508 sizeof(vsi_ctx->info), cd);
2511 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2512 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2513 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2521 * @hw: pointer to the HW struct
2522 * @vsi_ctx: pointer to a VSI context struct
2523 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2524 * @cd: pointer to command details structure or NULL
2526 * Free VSI context info from hardware (0x0213)
2529 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2530 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2532 struct ice_aqc_add_update_free_vsi_resp *resp;
2533 struct ice_aqc_add_get_update_free_vsi *cmd;
2534 struct ice_aq_desc desc;
2535 enum ice_status status;
2537 cmd = &desc.params.vsi_cmd;
2538 resp = &desc.params.add_update_free_vsi_res;
2540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2542 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2544 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2546 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2548 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2549 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2557 * @hw: pointer to the HW struct
2558 * @vsi_ctx: pointer to a VSI context struct
2559 * @cd: pointer to command details structure or NULL
2561 * Update VSI context in the hardware (0x0211)
2564 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2565 struct ice_sq_cd *cd)
2567 struct ice_aqc_add_update_free_vsi_resp *resp;
2568 struct ice_aqc_add_get_update_free_vsi *cmd;
2569 struct ice_aq_desc desc;
2570 enum ice_status status;
2572 cmd = &desc.params.vsi_cmd;
2573 resp = &desc.params.add_update_free_vsi_res;
2575 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2577 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2579 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2581 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2582 sizeof(vsi_ctx->info), cd);
2585 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2586 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2593 * ice_is_vsi_valid - check whether the VSI is valid or not
2594 * @hw: pointer to the HW struct
2595 * @vsi_handle: VSI handle
2597 * check whether the VSI is valid or not
2599 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2601 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2605 * ice_get_hw_vsi_num - return the HW VSI number
2606 * @hw: pointer to the HW struct
2607 * @vsi_handle: VSI handle
2609 * return the HW VSI number
2610 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2612 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2614 return hw->vsi_ctx[vsi_handle]->vsi_num;
2618 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2619 * @hw: pointer to the HW struct
2620 * @vsi_handle: VSI handle
2622 * return the VSI context entry for a given VSI handle
2624 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2626 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2630 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2631 * @hw: pointer to the HW struct
2632 * @vsi_handle: VSI handle
2633 * @vsi: VSI context pointer
2635 * save the VSI context entry for a given VSI handle
2638 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2640 hw->vsi_ctx[vsi_handle] = vsi;
2644 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2645 * @hw: pointer to the HW struct
2646 * @vsi_handle: VSI handle
2648 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2650 struct ice_vsi_ctx *vsi;
2653 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2656 ice_for_each_traffic_class(i) {
2657 if (vsi->lan_q_ctx[i]) {
2658 ice_free(hw, vsi->lan_q_ctx[i]);
2659 vsi->lan_q_ctx[i] = NULL;
2665 * ice_clear_vsi_ctx - clear the VSI context entry
2666 * @hw: pointer to the HW struct
2667 * @vsi_handle: VSI handle
2669 * clear the VSI context entry
2671 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2673 struct ice_vsi_ctx *vsi;
2675 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2677 ice_clear_vsi_q_ctx(hw, vsi_handle);
2679 hw->vsi_ctx[vsi_handle] = NULL;
2684 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2685 * @hw: pointer to the HW struct
2687 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2691 for (i = 0; i < ICE_MAX_VSI; i++)
2692 ice_clear_vsi_ctx(hw, i);
2696 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2697 * @hw: pointer to the HW struct
2698 * @vsi_handle: unique VSI handle provided by drivers
2699 * @vsi_ctx: pointer to a VSI context struct
2700 * @cd: pointer to command details structure or NULL
2702 * Add a VSI context to the hardware also add it into the VSI handle list.
2703 * If this function gets called after reset for existing VSIs then update
2704 * with the new HW VSI number in the corresponding VSI handle list entry.
2707 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2708 struct ice_sq_cd *cd)
2710 struct ice_vsi_ctx *tmp_vsi_ctx;
2711 enum ice_status status;
2713 if (vsi_handle >= ICE_MAX_VSI)
2714 return ICE_ERR_PARAM;
2715 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2718 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2720 /* Create a new VSI context */
2721 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2722 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2724 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2725 return ICE_ERR_NO_MEMORY;
2727 *tmp_vsi_ctx = *vsi_ctx;
2729 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2731 /* update with new HW VSI num */
2732 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2739 * ice_free_vsi- free VSI context from hardware and VSI handle list
2740 * @hw: pointer to the HW struct
2741 * @vsi_handle: unique VSI handle
2742 * @vsi_ctx: pointer to a VSI context struct
2743 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2744 * @cd: pointer to command details structure or NULL
2746 * Free VSI context info from hardware as well as from VSI handle list
2749 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2750 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2752 enum ice_status status;
2754 if (!ice_is_vsi_valid(hw, vsi_handle))
2755 return ICE_ERR_PARAM;
2756 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2757 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2759 ice_clear_vsi_ctx(hw, vsi_handle);
2765 * @hw: pointer to the HW struct
2766 * @vsi_handle: unique VSI handle
2767 * @vsi_ctx: pointer to a VSI context struct
2768 * @cd: pointer to command details structure or NULL
2770 * Update VSI context in the hardware
2773 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2774 struct ice_sq_cd *cd)
2776 if (!ice_is_vsi_valid(hw, vsi_handle))
2777 return ICE_ERR_PARAM;
2778 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2779 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2783 * ice_aq_get_vsi_params
2784 * @hw: pointer to the HW struct
2785 * @vsi_ctx: pointer to a VSI context struct
2786 * @cd: pointer to command details structure or NULL
2788 * Get VSI context info from hardware (0x0212)
2791 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2792 struct ice_sq_cd *cd)
2794 struct ice_aqc_add_get_update_free_vsi *cmd;
2795 struct ice_aqc_get_vsi_resp *resp;
2796 struct ice_aq_desc desc;
2797 enum ice_status status;
2799 cmd = &desc.params.vsi_cmd;
2800 resp = &desc.params.get_vsi_resp;
2802 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2804 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2806 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2807 sizeof(vsi_ctx->info), cd);
2809 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2811 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2812 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2819 * ice_aq_add_update_mir_rule - add/update a mirror rule
2820 * @hw: pointer to the HW struct
2821 * @rule_type: Rule Type
2822 * @dest_vsi: VSI number to which packets will be mirrored
2823 * @count: length of the list
2824 * @mr_buf: buffer for list of mirrored VSI numbers
2825 * @cd: pointer to command details structure or NULL
2828 * Add/Update Mirror Rule (0x260).
2831 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2832 u16 count, struct ice_mir_rule_buf *mr_buf,
2833 struct ice_sq_cd *cd, u16 *rule_id)
2835 struct ice_aqc_add_update_mir_rule *cmd;
2836 struct ice_aq_desc desc;
2837 enum ice_status status;
2838 __le16 *mr_list = NULL;
2841 switch (rule_type) {
2842 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2843 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2844 /* Make sure count and mr_buf are set for these rule_types */
2845 if (!(count && mr_buf))
2846 return ICE_ERR_PARAM;
2848 buf_size = count * sizeof(__le16);
2849 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2851 return ICE_ERR_NO_MEMORY;
2853 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2854 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2855 /* Make sure count and mr_buf are not set for these
2858 if (count || mr_buf)
2859 return ICE_ERR_PARAM;
2862 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2863 return ICE_ERR_OUT_OF_RANGE;
2866 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2868 /* Pre-process 'mr_buf' items for add/update of virtual port
2869 * ingress/egress mirroring (but not physical port ingress/egress
2875 for (i = 0; i < count; i++) {
2878 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2880 /* Validate specified VSI number, make sure it is less
2881 * than ICE_MAX_VSI, if not return with error.
2883 if (id >= ICE_MAX_VSI) {
2884 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2886 ice_free(hw, mr_list);
2887 return ICE_ERR_OUT_OF_RANGE;
2890 /* add VSI to mirror rule */
2893 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2894 else /* remove VSI from mirror rule */
2895 mr_list[i] = CPU_TO_LE16(id);
2899 cmd = &desc.params.add_update_rule;
2900 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2901 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2902 ICE_AQC_RULE_ID_VALID_M);
2903 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2904 cmd->num_entries = CPU_TO_LE16(count);
2905 cmd->dest = CPU_TO_LE16(dest_vsi);
2907 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2909 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2911 ice_free(hw, mr_list);
2917 * ice_aq_delete_mir_rule - delete a mirror rule
2918 * @hw: pointer to the HW struct
2919 * @rule_id: Mirror rule ID (to be deleted)
2920 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2921 * otherwise it is returned to the shared pool
2922 * @cd: pointer to command details structure or NULL
2924 * Delete Mirror Rule (0x261).
2927 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2928 struct ice_sq_cd *cd)
2930 struct ice_aqc_delete_mir_rule *cmd;
2931 struct ice_aq_desc desc;
2933 /* rule_id should be in the range 0...63 */
2934 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2935 return ICE_ERR_OUT_OF_RANGE;
2937 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2939 cmd = &desc.params.del_rule;
2940 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2941 cmd->rule_id = CPU_TO_LE16(rule_id);
2944 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2946 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2950 * ice_aq_alloc_free_vsi_list
2951 * @hw: pointer to the HW struct
2952 * @vsi_list_id: VSI list ID returned or used for lookup
2953 * @lkup_type: switch rule filter lookup type
2954 * @opc: switch rules population command type - pass in the command opcode
2956 * allocates or free a VSI list resource
2958 static enum ice_status
2959 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2960 enum ice_sw_lkup_type lkup_type,
2961 enum ice_adminq_opc opc)
2963 struct ice_aqc_alloc_free_res_elem *sw_buf;
2964 struct ice_aqc_res_elem *vsi_ele;
2965 enum ice_status status;
2968 buf_len = ice_struct_size(sw_buf, elem, 1);
2969 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2971 return ICE_ERR_NO_MEMORY;
2972 sw_buf->num_elems = CPU_TO_LE16(1);
2974 if (lkup_type == ICE_SW_LKUP_MAC ||
2975 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2976 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2977 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2978 lkup_type == ICE_SW_LKUP_PROMISC ||
2979 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2980 lkup_type == ICE_SW_LKUP_LAST) {
2981 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2982 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2984 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2986 status = ICE_ERR_PARAM;
2987 goto ice_aq_alloc_free_vsi_list_exit;
2990 if (opc == ice_aqc_opc_free_res)
2991 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2993 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2995 goto ice_aq_alloc_free_vsi_list_exit;
2997 if (opc == ice_aqc_opc_alloc_res) {
2998 vsi_ele = &sw_buf->elem[0];
2999 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3002 ice_aq_alloc_free_vsi_list_exit:
3003 ice_free(hw, sw_buf);
3008 * ice_aq_set_storm_ctrl - Sets storm control configuration
3009 * @hw: pointer to the HW struct
3010 * @bcast_thresh: represents the upper threshold for broadcast storm control
3011 * @mcast_thresh: represents the upper threshold for multicast storm control
3012 * @ctl_bitmask: storm control knobs
3014 * Sets the storm control configuration (0x0280)
3017 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3020 struct ice_aqc_storm_cfg *cmd;
3021 struct ice_aq_desc desc;
3023 cmd = &desc.params.storm_conf;
3025 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3027 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3028 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3029 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3031 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3035 * ice_aq_get_storm_ctrl - gets storm control configuration
3036 * @hw: pointer to the HW struct
3037 * @bcast_thresh: represents the upper threshold for broadcast storm control
3038 * @mcast_thresh: represents the upper threshold for multicast storm control
3039 * @ctl_bitmask: storm control knobs
3041 * Gets the storm control configuration (0x0281)
3044 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3047 enum ice_status status;
3048 struct ice_aq_desc desc;
3050 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3052 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3054 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3057 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3060 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3063 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3070 * ice_aq_sw_rules - add/update/remove switch rules
3071 * @hw: pointer to the HW struct
3072 * @rule_list: pointer to switch rule population list
3073 * @rule_list_sz: total size of the rule list in bytes
3074 * @num_rules: number of switch rules in the rule_list
3075 * @opc: switch rules population command type - pass in the command opcode
3076 * @cd: pointer to command details structure or NULL
3078 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3080 static enum ice_status
3081 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3082 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3084 struct ice_aq_desc desc;
3085 enum ice_status status;
3087 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3089 if (opc != ice_aqc_opc_add_sw_rules &&
3090 opc != ice_aqc_opc_update_sw_rules &&
3091 opc != ice_aqc_opc_remove_sw_rules)
3092 return ICE_ERR_PARAM;
3094 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3096 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3097 desc.params.sw_rules.num_rules_fltr_entry_index =
3098 CPU_TO_LE16(num_rules);
3099 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3100 if (opc != ice_aqc_opc_add_sw_rules &&
3101 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3102 status = ICE_ERR_DOES_NOT_EXIST;
3108 * ice_aq_add_recipe - add switch recipe
3109 * @hw: pointer to the HW struct
3110 * @s_recipe_list: pointer to switch rule population list
3111 * @num_recipes: number of switch recipes in the list
3112 * @cd: pointer to command details structure or NULL
3117 ice_aq_add_recipe(struct ice_hw *hw,
3118 struct ice_aqc_recipe_data_elem *s_recipe_list,
3119 u16 num_recipes, struct ice_sq_cd *cd)
3121 struct ice_aqc_add_get_recipe *cmd;
3122 struct ice_aq_desc desc;
3125 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3126 cmd = &desc.params.add_get_recipe;
3127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3129 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3130 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3132 buf_size = num_recipes * sizeof(*s_recipe_list);
3134 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3138 * ice_aq_get_recipe - get switch recipe
3139 * @hw: pointer to the HW struct
3140 * @s_recipe_list: pointer to switch rule population list
3141 * @num_recipes: pointer to the number of recipes (input and output)
3142 * @recipe_root: root recipe number of recipe(s) to retrieve
3143 * @cd: pointer to command details structure or NULL
3147 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3148 * On output, *num_recipes will equal the number of entries returned in
3151 * The caller must supply enough space in s_recipe_list to hold all possible
3152 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3155 ice_aq_get_recipe(struct ice_hw *hw,
3156 struct ice_aqc_recipe_data_elem *s_recipe_list,
3157 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3159 struct ice_aqc_add_get_recipe *cmd;
3160 struct ice_aq_desc desc;
3161 enum ice_status status;
3164 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3165 return ICE_ERR_PARAM;
3167 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3168 cmd = &desc.params.add_get_recipe;
3169 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3171 cmd->return_index = CPU_TO_LE16(recipe_root);
3172 cmd->num_sub_recipes = 0;
3174 buf_size = *num_recipes * sizeof(*s_recipe_list);
3176 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3177 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3183 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3184 * @hw: pointer to the HW struct
3185 * @params: parameters used to update the default recipe
3187 * This function only supports updating default recipes and it only supports
3188 * updating a single recipe based on the lkup_idx at a time.
3190 * This is done as a read-modify-write operation. First, get the current recipe
3191 * contents based on the recipe's ID. Then modify the field vector index and
3192 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3193 * the pre-existing recipe with the modifications.
3196 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3197 struct ice_update_recipe_lkup_idx_params *params)
3199 struct ice_aqc_recipe_data_elem *rcp_list;
3200 u16 num_recps = ICE_MAX_NUM_RECIPES;
3201 enum ice_status status;
3203 rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3205 return ICE_ERR_NO_MEMORY;
3207 /* read current recipe list from firmware */
3208 rcp_list->recipe_indx = params->rid;
3209 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3211 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3212 params->rid, status);
3216 /* only modify existing recipe's lkup_idx and mask if valid, while
3217 * leaving all other fields the same, then update the recipe firmware
3219 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3220 if (params->mask_valid)
3221 rcp_list->content.mask[params->lkup_idx] =
3222 CPU_TO_LE16(params->mask);
3224 if (params->ignore_valid)
3225 rcp_list->content.lkup_indx[params->lkup_idx] |=
3226 ICE_AQ_RECIPE_LKUP_IGNORE;
3228 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3230 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3231 params->rid, params->lkup_idx, params->fv_idx,
3232 params->mask, params->mask_valid ? "true" : "false",
3236 ice_free(hw, rcp_list);
3241 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3242 * @hw: pointer to the HW struct
3243 * @profile_id: package profile ID to associate the recipe with
3244 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3245 * @cd: pointer to command details structure or NULL
3246 * Recipe to profile association (0x0291)
3249 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3250 struct ice_sq_cd *cd)
3252 struct ice_aqc_recipe_to_profile *cmd;
3253 struct ice_aq_desc desc;
3255 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3256 cmd = &desc.params.recipe_to_profile;
3257 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3258 cmd->profile_id = CPU_TO_LE16(profile_id);
3259 /* Set the recipe ID bit in the bitmask to let the device know which
3260 * profile we are associating the recipe to
3262 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3263 ICE_NONDMA_TO_NONDMA);
3265 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3269 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3270 * @hw: pointer to the HW struct
3271 * @profile_id: package profile ID to associate the recipe with
3272 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3273 * @cd: pointer to command details structure or NULL
3274 * Associate profile ID with given recipe (0x0293)
3277 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3278 struct ice_sq_cd *cd)
3280 struct ice_aqc_recipe_to_profile *cmd;
3281 struct ice_aq_desc desc;
3282 enum ice_status status;
3284 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3285 cmd = &desc.params.recipe_to_profile;
3286 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3287 cmd->profile_id = CPU_TO_LE16(profile_id);
3289 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3291 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3292 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3298 * ice_alloc_recipe - add recipe resource
3299 * @hw: pointer to the hardware structure
3300 * @rid: recipe ID returned as response to AQ call
3302 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3304 struct ice_aqc_alloc_free_res_elem *sw_buf;
3305 enum ice_status status;
3308 buf_len = ice_struct_size(sw_buf, elem, 1);
3309 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3311 return ICE_ERR_NO_MEMORY;
3313 sw_buf->num_elems = CPU_TO_LE16(1);
3314 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3315 ICE_AQC_RES_TYPE_S) |
3316 ICE_AQC_RES_TYPE_FLAG_SHARED);
3317 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3318 ice_aqc_opc_alloc_res, NULL);
3320 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3321 ice_free(hw, sw_buf);
3326 /* ice_init_port_info - Initialize port_info with switch configuration data
3327 * @pi: pointer to port_info
3328 * @vsi_port_num: VSI number or port number
3329 * @type: Type of switch element (port or VSI)
3330 * @swid: switch ID of the switch the element is attached to
3331 * @pf_vf_num: PF or VF number
3332 * @is_vf: true if the element is a VF, false otherwise
3335 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3336 u16 swid, u16 pf_vf_num, bool is_vf)
3339 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3340 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3342 pi->pf_vf_num = pf_vf_num;
3344 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3345 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3348 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3353 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3354 * @hw: pointer to the hardware structure
3356 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3358 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3359 enum ice_status status;
3366 num_total_ports = 1;
3368 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3369 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3372 return ICE_ERR_NO_MEMORY;
3374 /* Multiple calls to ice_aq_get_sw_cfg may be required
3375 * to get all the switch configuration information. The need
3376 * for additional calls is indicated by ice_aq_get_sw_cfg
3377 * writing a non-zero value in req_desc
3380 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3382 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3383 &req_desc, &num_elems, NULL);
3388 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3389 u16 pf_vf_num, swid, vsi_port_num;
3393 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3394 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3396 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3397 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3399 swid = LE16_TO_CPU(ele->swid);
3401 if (LE16_TO_CPU(ele->pf_vf_num) &
3402 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3405 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3406 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3409 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3410 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3411 if (j == num_total_ports) {
3412 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3413 status = ICE_ERR_CFG;
3416 ice_init_port_info(hw->port_info,
3417 vsi_port_num, res_type, swid,
3425 } while (req_desc && !status);
3433 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3434 * @hw: pointer to the hardware structure
3435 * @fi: filter info structure to fill/update
3437 * This helper function populates the lb_en and lan_en elements of the provided
3438 * ice_fltr_info struct using the switch's type and characteristics of the
3439 * switch rule being configured.
3441 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3443 if ((fi->flag & ICE_FLTR_RX) &&
3444 (fi->fltr_act == ICE_FWD_TO_VSI ||
3445 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3446 fi->lkup_type == ICE_SW_LKUP_LAST)
3450 if ((fi->flag & ICE_FLTR_TX) &&
3451 (fi->fltr_act == ICE_FWD_TO_VSI ||
3452 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3453 fi->fltr_act == ICE_FWD_TO_Q ||
3454 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3455 /* Setting LB for prune actions will result in replicated
3456 * packets to the internal switch that will be dropped.
3458 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3461 /* Set lan_en to TRUE if
3462 * 1. The switch is a VEB AND
3464 * 2.1 The lookup is a directional lookup like ethertype,
3465 * promiscuous, ethertype-MAC, promiscuous-VLAN
3466 * and default-port OR
3467 * 2.2 The lookup is VLAN, OR
3468 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3469 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3473 * The switch is a VEPA.
3475 * In all other cases, the LAN enable has to be set to false.
3478 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3479 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3480 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3481 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3482 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3483 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3484 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3485 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3486 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3487 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3496 * ice_fill_sw_rule - Helper function to fill switch rule structure
3497 * @hw: pointer to the hardware structure
3498 * @f_info: entry containing packet forwarding information
3499 * @s_rule: switch rule structure to be filled in based on mac_entry
3500 * @opc: switch rules population command type - pass in the command opcode
3503 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3504 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3506 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3507 u16 vlan_tpid = ICE_ETH_P_8021Q;
3515 if (opc == ice_aqc_opc_remove_sw_rules) {
3516 s_rule->pdata.lkup_tx_rx.act = 0;
3517 s_rule->pdata.lkup_tx_rx.index =
3518 CPU_TO_LE16(f_info->fltr_rule_id);
3519 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3523 eth_hdr_sz = sizeof(dummy_eth_header);
3524 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3526 /* initialize the ether header with a dummy header */
3527 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3528 ice_fill_sw_info(hw, f_info);
3530 switch (f_info->fltr_act) {
3531 case ICE_FWD_TO_VSI:
3532 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3533 ICE_SINGLE_ACT_VSI_ID_M;
3534 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3535 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3536 ICE_SINGLE_ACT_VALID_BIT;
3538 case ICE_FWD_TO_VSI_LIST:
3539 act |= ICE_SINGLE_ACT_VSI_LIST;
3540 act |= (f_info->fwd_id.vsi_list_id <<
3541 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3542 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3543 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3544 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3545 ICE_SINGLE_ACT_VALID_BIT;
3548 act |= ICE_SINGLE_ACT_TO_Q;
3549 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3550 ICE_SINGLE_ACT_Q_INDEX_M;
3552 case ICE_DROP_PACKET:
3553 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3554 ICE_SINGLE_ACT_VALID_BIT;
3556 case ICE_FWD_TO_QGRP:
3557 q_rgn = f_info->qgrp_size > 0 ?
3558 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3559 act |= ICE_SINGLE_ACT_TO_Q;
3560 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3561 ICE_SINGLE_ACT_Q_INDEX_M;
3562 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3563 ICE_SINGLE_ACT_Q_REGION_M;
3570 act |= ICE_SINGLE_ACT_LB_ENABLE;
3572 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3574 switch (f_info->lkup_type) {
3575 case ICE_SW_LKUP_MAC:
3576 daddr = f_info->l_data.mac.mac_addr;
3578 case ICE_SW_LKUP_VLAN:
3579 vlan_id = f_info->l_data.vlan.vlan_id;
3580 if (f_info->l_data.vlan.tpid_valid)
3581 vlan_tpid = f_info->l_data.vlan.tpid;
3582 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3583 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3584 act |= ICE_SINGLE_ACT_PRUNE;
3585 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3588 case ICE_SW_LKUP_ETHERTYPE_MAC:
3589 daddr = f_info->l_data.ethertype_mac.mac_addr;
3591 case ICE_SW_LKUP_ETHERTYPE:
3592 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3593 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3595 case ICE_SW_LKUP_MAC_VLAN:
3596 daddr = f_info->l_data.mac_vlan.mac_addr;
3597 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3599 case ICE_SW_LKUP_PROMISC_VLAN:
3600 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3602 case ICE_SW_LKUP_PROMISC:
3603 daddr = f_info->l_data.mac_vlan.mac_addr;
3609 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3610 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3611 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3613 /* Recipe set depending on lookup type */
3614 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3615 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3616 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3619 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3620 ICE_NONDMA_TO_NONDMA);
3622 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3623 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3624 *off = CPU_TO_BE16(vlan_id);
3625 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3626 *off = CPU_TO_BE16(vlan_tpid);
3629 /* Create the switch rule with the final dummy Ethernet header */
3630 if (opc != ice_aqc_opc_update_sw_rules)
3631 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3635 * ice_add_marker_act
3636 * @hw: pointer to the hardware structure
3637 * @m_ent: the management entry for which sw marker needs to be added
3638 * @sw_marker: sw marker to tag the Rx descriptor with
3639 * @l_id: large action resource ID
3641 * Create a large action to hold software marker and update the switch rule
3642 * entry pointed by m_ent with newly created large action
3644 static enum ice_status
3645 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3646 u16 sw_marker, u16 l_id)
3648 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3649 /* For software marker we need 3 large actions
3650 * 1. FWD action: FWD TO VSI or VSI LIST
3651 * 2. GENERIC VALUE action to hold the profile ID
3652 * 3. GENERIC VALUE action to hold the software marker ID
3654 const u16 num_lg_acts = 3;
3655 enum ice_status status;
3661 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3662 return ICE_ERR_PARAM;
3664 /* Create two back-to-back switch rules and submit them to the HW using
3665 * one memory buffer:
3669 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3670 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3671 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3673 return ICE_ERR_NO_MEMORY;
3675 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3677 /* Fill in the first switch rule i.e. large action */
3678 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3679 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3680 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3682 /* First action VSI forwarding or VSI list forwarding depending on how
3685 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3686 m_ent->fltr_info.fwd_id.hw_vsi_id;
3688 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3689 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3690 if (m_ent->vsi_count > 1)
3691 act |= ICE_LG_ACT_VSI_LIST;
3692 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3694 /* Second action descriptor type */
3695 act = ICE_LG_ACT_GENERIC;
3697 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3698 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3700 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3701 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3703 /* Third action Marker value */
3704 act |= ICE_LG_ACT_GENERIC;
3705 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3706 ICE_LG_ACT_GENERIC_VALUE_M;
3708 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3710 /* call the fill switch rule to fill the lookup Tx Rx structure */
3711 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3712 ice_aqc_opc_update_sw_rules);
3714 /* Update the action to point to the large action ID */
3715 rx_tx->pdata.lkup_tx_rx.act =
3716 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3717 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3718 ICE_SINGLE_ACT_PTR_VAL_M));
3720 /* Use the filter rule ID of the previously created rule with single
3721 * act. Once the update happens, hardware will treat this as large
3724 rx_tx->pdata.lkup_tx_rx.index =
3725 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3727 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3728 ice_aqc_opc_update_sw_rules, NULL);
3730 m_ent->lg_act_idx = l_id;
3731 m_ent->sw_marker_id = sw_marker;
3734 ice_free(hw, lg_act);
3739 * ice_add_counter_act - add/update filter rule with counter action
3740 * @hw: pointer to the hardware structure
3741 * @m_ent: the management entry for which counter needs to be added
3742 * @counter_id: VLAN counter ID returned as part of allocate resource
3743 * @l_id: large action resource ID
3745 static enum ice_status
3746 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3747 u16 counter_id, u16 l_id)
3749 struct ice_aqc_sw_rules_elem *lg_act;
3750 struct ice_aqc_sw_rules_elem *rx_tx;
3751 enum ice_status status;
3752 /* 2 actions will be added while adding a large action counter */
3753 const int num_acts = 2;
3760 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3761 return ICE_ERR_PARAM;
3763 /* Create two back-to-back switch rules and submit them to the HW using
3764 * one memory buffer:
3768 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3769 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3770 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3772 return ICE_ERR_NO_MEMORY;
3774 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3776 /* Fill in the first switch rule i.e. large action */
3777 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3778 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3779 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3781 /* First action VSI forwarding or VSI list forwarding depending on how
3784 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3785 m_ent->fltr_info.fwd_id.hw_vsi_id;
3787 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3788 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3789 ICE_LG_ACT_VSI_LIST_ID_M;
3790 if (m_ent->vsi_count > 1)
3791 act |= ICE_LG_ACT_VSI_LIST;
3792 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3794 /* Second action counter ID */
3795 act = ICE_LG_ACT_STAT_COUNT;
3796 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3797 ICE_LG_ACT_STAT_COUNT_M;
3798 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3800 /* call the fill switch rule to fill the lookup Tx Rx structure */
3801 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3802 ice_aqc_opc_update_sw_rules);
3804 act = ICE_SINGLE_ACT_PTR;
3805 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3806 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3808 /* Use the filter rule ID of the previously created rule with single
3809 * act. Once the update happens, hardware will treat this as large
3812 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3813 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3815 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3816 ice_aqc_opc_update_sw_rules, NULL);
3818 m_ent->lg_act_idx = l_id;
3819 m_ent->counter_index = counter_id;
3822 ice_free(hw, lg_act);
3827 * ice_create_vsi_list_map
3828 * @hw: pointer to the hardware structure
3829 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3830 * @num_vsi: number of VSI handles in the array
3831 * @vsi_list_id: VSI list ID generated as part of allocate resource
3833 * Helper function to create a new entry of VSI list ID to VSI mapping
3834 * using the given VSI list ID
3836 static struct ice_vsi_list_map_info *
3837 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3840 struct ice_switch_info *sw = hw->switch_info;
3841 struct ice_vsi_list_map_info *v_map;
3844 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3848 v_map->vsi_list_id = vsi_list_id;
3850 for (i = 0; i < num_vsi; i++)
3851 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3853 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3858 * ice_update_vsi_list_rule
3859 * @hw: pointer to the hardware structure
3860 * @vsi_handle_arr: array of VSI handles to form a VSI list
3861 * @num_vsi: number of VSI handles in the array
3862 * @vsi_list_id: VSI list ID generated as part of allocate resource
3863 * @remove: Boolean value to indicate if this is a remove action
3864 * @opc: switch rules population command type - pass in the command opcode
3865 * @lkup_type: lookup type of the filter
3867 * Call AQ command to add a new switch rule or update existing switch rule
3868 * using the given VSI list ID
3870 static enum ice_status
3871 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3872 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3873 enum ice_sw_lkup_type lkup_type)
3875 struct ice_aqc_sw_rules_elem *s_rule;
3876 enum ice_status status;
3882 return ICE_ERR_PARAM;
3884 if (lkup_type == ICE_SW_LKUP_MAC ||
3885 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3886 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3887 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3888 lkup_type == ICE_SW_LKUP_PROMISC ||
3889 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3890 lkup_type == ICE_SW_LKUP_LAST)
3891 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3892 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3893 else if (lkup_type == ICE_SW_LKUP_VLAN)
3894 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3895 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3897 return ICE_ERR_PARAM;
3899 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3900 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3902 return ICE_ERR_NO_MEMORY;
3903 for (i = 0; i < num_vsi; i++) {
3904 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3905 status = ICE_ERR_PARAM;
3908 /* AQ call requires hw_vsi_id(s) */
3909 s_rule->pdata.vsi_list.vsi[i] =
3910 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3913 s_rule->type = CPU_TO_LE16(rule_type);
3914 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3915 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3917 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3920 ice_free(hw, s_rule);
3925 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3926 * @hw: pointer to the HW struct
3927 * @vsi_handle_arr: array of VSI handles to form a VSI list
3928 * @num_vsi: number of VSI handles in the array
3929 * @vsi_list_id: stores the ID of the VSI list to be created
3930 * @lkup_type: switch rule filter's lookup type
3932 static enum ice_status
3933 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3934 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3936 enum ice_status status;
3938 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3939 ice_aqc_opc_alloc_res);
3943 /* Update the newly created VSI list to include the specified VSIs */
3944 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3945 *vsi_list_id, false,
3946 ice_aqc_opc_add_sw_rules, lkup_type);
3950 * ice_create_pkt_fwd_rule
3951 * @hw: pointer to the hardware structure
3952 * @recp_list: corresponding filter management list
3953 * @f_entry: entry containing packet forwarding information
3955 * Create switch rule with given filter information and add an entry
3956 * to the corresponding filter management list to track this switch rule
3959 static enum ice_status
3960 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3961 struct ice_fltr_list_entry *f_entry)
3963 struct ice_fltr_mgmt_list_entry *fm_entry;
3964 struct ice_aqc_sw_rules_elem *s_rule;
3965 enum ice_status status;
3967 s_rule = (struct ice_aqc_sw_rules_elem *)
3968 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3970 return ICE_ERR_NO_MEMORY;
3971 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3972 ice_malloc(hw, sizeof(*fm_entry));
3974 status = ICE_ERR_NO_MEMORY;
3975 goto ice_create_pkt_fwd_rule_exit;
3978 fm_entry->fltr_info = f_entry->fltr_info;
3980 /* Initialize all the fields for the management entry */
3981 fm_entry->vsi_count = 1;
3982 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3983 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3984 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3986 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3987 ice_aqc_opc_add_sw_rules);
3989 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3990 ice_aqc_opc_add_sw_rules, NULL);
3992 ice_free(hw, fm_entry);
3993 goto ice_create_pkt_fwd_rule_exit;
3996 f_entry->fltr_info.fltr_rule_id =
3997 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3998 fm_entry->fltr_info.fltr_rule_id =
3999 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4001 /* The book keeping entries will get removed when base driver
4002 * calls remove filter AQ command
4004 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4006 ice_create_pkt_fwd_rule_exit:
4007 ice_free(hw, s_rule);
4012 * ice_update_pkt_fwd_rule
4013 * @hw: pointer to the hardware structure
4014 * @f_info: filter information for switch rule
4016 * Call AQ command to update a previously created switch rule with a
4019 static enum ice_status
4020 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4022 struct ice_aqc_sw_rules_elem *s_rule;
4023 enum ice_status status;
4025 s_rule = (struct ice_aqc_sw_rules_elem *)
4026 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4028 return ICE_ERR_NO_MEMORY;
4030 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4032 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4034 /* Update switch rule with new rule set to forward VSI list */
4035 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4036 ice_aqc_opc_update_sw_rules, NULL);
4038 ice_free(hw, s_rule);
4043 * ice_update_sw_rule_bridge_mode
4044 * @hw: pointer to the HW struct
4046 * Updates unicast switch filter rules based on VEB/VEPA mode
4048 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4050 struct ice_switch_info *sw = hw->switch_info;
4051 struct ice_fltr_mgmt_list_entry *fm_entry;
4052 enum ice_status status = ICE_SUCCESS;
4053 struct LIST_HEAD_TYPE *rule_head;
4054 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4056 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4057 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4059 ice_acquire_lock(rule_lock);
4060 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4062 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4063 u8 *addr = fi->l_data.mac.mac_addr;
4065 /* Update unicast Tx rules to reflect the selected
4068 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4069 (fi->fltr_act == ICE_FWD_TO_VSI ||
4070 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4071 fi->fltr_act == ICE_FWD_TO_Q ||
4072 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4073 status = ice_update_pkt_fwd_rule(hw, fi);
4079 ice_release_lock(rule_lock);
4085 * ice_add_update_vsi_list
4086 * @hw: pointer to the hardware structure
4087 * @m_entry: pointer to current filter management list entry
4088 * @cur_fltr: filter information from the book keeping entry
4089 * @new_fltr: filter information with the new VSI to be added
4091 * Call AQ command to add or update previously created VSI list with new VSI.
4093 * Helper function to do book keeping associated with adding filter information
4094 * The algorithm to do the book keeping is described below :
4095 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4096 * if only one VSI has been added till now
4097 * Allocate a new VSI list and add two VSIs
4098 * to this list using switch rule command
4099 * Update the previously created switch rule with the
4100 * newly created VSI list ID
4101 * if a VSI list was previously created
4102 * Add the new VSI to the previously created VSI list set
4103 * using the update switch rule command
4105 static enum ice_status
4106 ice_add_update_vsi_list(struct ice_hw *hw,
4107 struct ice_fltr_mgmt_list_entry *m_entry,
4108 struct ice_fltr_info *cur_fltr,
4109 struct ice_fltr_info *new_fltr)
4111 enum ice_status status = ICE_SUCCESS;
4112 u16 vsi_list_id = 0;
4114 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4115 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4116 return ICE_ERR_NOT_IMPL;
4118 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4119 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4120 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4121 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4122 return ICE_ERR_NOT_IMPL;
4124 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4125 /* Only one entry existed in the mapping and it was not already
4126 * a part of a VSI list. So, create a VSI list with the old and
4129 struct ice_fltr_info tmp_fltr;
4130 u16 vsi_handle_arr[2];
4132 /* A rule already exists with the new VSI being added */
4133 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4134 return ICE_ERR_ALREADY_EXISTS;
4136 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4137 vsi_handle_arr[1] = new_fltr->vsi_handle;
4138 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4140 new_fltr->lkup_type);
4144 tmp_fltr = *new_fltr;
4145 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4146 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4147 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4148 /* Update the previous switch rule of "MAC forward to VSI" to
4149 * "MAC fwd to VSI list"
4151 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4155 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4156 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4157 m_entry->vsi_list_info =
4158 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4161 if (!m_entry->vsi_list_info)
4162 return ICE_ERR_NO_MEMORY;
4164 /* If this entry was large action then the large action needs
4165 * to be updated to point to FWD to VSI list
4167 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4169 ice_add_marker_act(hw, m_entry,
4170 m_entry->sw_marker_id,
4171 m_entry->lg_act_idx);
4173 u16 vsi_handle = new_fltr->vsi_handle;
4174 enum ice_adminq_opc opcode;
4176 if (!m_entry->vsi_list_info)
4179 /* A rule already exists with the new VSI being added */
4180 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4183 /* Update the previously created VSI list set with
4184 * the new VSI ID passed in
4186 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4187 opcode = ice_aqc_opc_update_sw_rules;
4189 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4190 vsi_list_id, false, opcode,
4191 new_fltr->lkup_type);
4192 /* update VSI list mapping info with new VSI ID */
4194 ice_set_bit(vsi_handle,
4195 m_entry->vsi_list_info->vsi_map);
4198 m_entry->vsi_count++;
4203 * ice_find_rule_entry - Search a rule entry
4204 * @list_head: head of rule list
4205 * @f_info: rule information
4207 * Helper function to search for a given rule entry
4208 * Returns pointer to entry storing the rule if found
4210 static struct ice_fltr_mgmt_list_entry *
4211 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4212 struct ice_fltr_info *f_info)
4214 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4216 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4218 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4219 sizeof(f_info->l_data)) &&
4220 f_info->flag == list_itr->fltr_info.flag) {
4229 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4230 * @recp_list: VSI lists needs to be searched
4231 * @vsi_handle: VSI handle to be found in VSI list
4232 * @vsi_list_id: VSI list ID found containing vsi_handle
4234 * Helper function to search a VSI list with single entry containing given VSI
4235 * handle element. This can be extended further to search VSI list with more
4236 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4238 static struct ice_vsi_list_map_info *
4239 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4242 struct ice_vsi_list_map_info *map_info = NULL;
4243 struct LIST_HEAD_TYPE *list_head;
4245 list_head = &recp_list->filt_rules;
4246 if (recp_list->adv_rule) {
4247 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4249 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4250 ice_adv_fltr_mgmt_list_entry,
4252 if (list_itr->vsi_list_info) {
4253 map_info = list_itr->vsi_list_info;
4254 if (ice_is_bit_set(map_info->vsi_map,
4256 *vsi_list_id = map_info->vsi_list_id;
4262 struct ice_fltr_mgmt_list_entry *list_itr;
4264 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4265 ice_fltr_mgmt_list_entry,
4267 if (list_itr->vsi_count == 1 &&
4268 list_itr->vsi_list_info) {
4269 map_info = list_itr->vsi_list_info;
4270 if (ice_is_bit_set(map_info->vsi_map,
4272 *vsi_list_id = map_info->vsi_list_id;
4282 * ice_add_rule_internal - add rule for a given lookup type
4283 * @hw: pointer to the hardware structure
4284 * @recp_list: recipe list for which rule has to be added
4285 * @lport: logic port number on which function add rule
4286 * @f_entry: structure containing MAC forwarding information
4288 * Adds or updates the rule lists for a given recipe
4290 static enum ice_status
4291 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4292 u8 lport, struct ice_fltr_list_entry *f_entry)
4294 struct ice_fltr_info *new_fltr, *cur_fltr;
4295 struct ice_fltr_mgmt_list_entry *m_entry;
4296 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4297 enum ice_status status = ICE_SUCCESS;
4299 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4300 return ICE_ERR_PARAM;
4302 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4303 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4304 f_entry->fltr_info.fwd_id.hw_vsi_id =
4305 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4307 rule_lock = &recp_list->filt_rule_lock;
4309 ice_acquire_lock(rule_lock);
4310 new_fltr = &f_entry->fltr_info;
4311 if (new_fltr->flag & ICE_FLTR_RX)
4312 new_fltr->src = lport;
4313 else if (new_fltr->flag & ICE_FLTR_TX)
4315 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4317 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4319 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4320 goto exit_add_rule_internal;
4323 cur_fltr = &m_entry->fltr_info;
4324 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4326 exit_add_rule_internal:
4327 ice_release_lock(rule_lock);
4332 * ice_remove_vsi_list_rule
4333 * @hw: pointer to the hardware structure
4334 * @vsi_list_id: VSI list ID generated as part of allocate resource
4335 * @lkup_type: switch rule filter lookup type
4337 * The VSI list should be emptied before this function is called to remove the
4340 static enum ice_status
4341 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4342 enum ice_sw_lkup_type lkup_type)
4344 /* Free the vsi_list resource that we allocated. It is assumed that the
4345 * list is empty at this point.
4347 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4348 ice_aqc_opc_free_res);
4352 * ice_rem_update_vsi_list
4353 * @hw: pointer to the hardware structure
4354 * @vsi_handle: VSI handle of the VSI to remove
4355 * @fm_list: filter management entry for which the VSI list management needs to
4358 static enum ice_status
4359 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4360 struct ice_fltr_mgmt_list_entry *fm_list)
4362 enum ice_sw_lkup_type lkup_type;
4363 enum ice_status status = ICE_SUCCESS;
4366 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4367 fm_list->vsi_count == 0)
4368 return ICE_ERR_PARAM;
4370 /* A rule with the VSI being removed does not exist */
4371 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4372 return ICE_ERR_DOES_NOT_EXIST;
4374 lkup_type = fm_list->fltr_info.lkup_type;
4375 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4376 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4377 ice_aqc_opc_update_sw_rules,
4382 fm_list->vsi_count--;
4383 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4385 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4386 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4387 struct ice_vsi_list_map_info *vsi_list_info =
4388 fm_list->vsi_list_info;
4391 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4393 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4394 return ICE_ERR_OUT_OF_RANGE;
4396 /* Make sure VSI list is empty before removing it below */
4397 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4399 ice_aqc_opc_update_sw_rules,
4404 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4405 tmp_fltr_info.fwd_id.hw_vsi_id =
4406 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4407 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4408 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4410 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4411 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4415 fm_list->fltr_info = tmp_fltr_info;
4418 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4419 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4420 struct ice_vsi_list_map_info *vsi_list_info =
4421 fm_list->vsi_list_info;
4423 /* Remove the VSI list since it is no longer used */
4424 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4426 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4427 vsi_list_id, status);
4431 LIST_DEL(&vsi_list_info->list_entry);
4432 ice_free(hw, vsi_list_info);
4433 fm_list->vsi_list_info = NULL;
4440 * ice_remove_rule_internal - Remove a filter rule of a given type
4442 * @hw: pointer to the hardware structure
4443 * @recp_list: recipe list for which the rule needs to removed
4444 * @f_entry: rule entry containing filter information
4446 static enum ice_status
4447 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4448 struct ice_fltr_list_entry *f_entry)
4450 struct ice_fltr_mgmt_list_entry *list_elem;
4451 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4452 enum ice_status status = ICE_SUCCESS;
4453 bool remove_rule = false;
4456 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4457 return ICE_ERR_PARAM;
4458 f_entry->fltr_info.fwd_id.hw_vsi_id =
4459 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4461 rule_lock = &recp_list->filt_rule_lock;
4462 ice_acquire_lock(rule_lock);
4463 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4464 &f_entry->fltr_info);
4466 status = ICE_ERR_DOES_NOT_EXIST;
4470 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4472 } else if (!list_elem->vsi_list_info) {
4473 status = ICE_ERR_DOES_NOT_EXIST;
4475 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4476 /* a ref_cnt > 1 indicates that the vsi_list is being
4477 * shared by multiple rules. Decrement the ref_cnt and
4478 * remove this rule, but do not modify the list, as it
4479 * is in-use by other rules.
4481 list_elem->vsi_list_info->ref_cnt--;
4484 /* a ref_cnt of 1 indicates the vsi_list is only used
4485 * by one rule. However, the original removal request is only
4486 * for a single VSI. Update the vsi_list first, and only
4487 * remove the rule if there are no further VSIs in this list.
4489 vsi_handle = f_entry->fltr_info.vsi_handle;
4490 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4493 /* if VSI count goes to zero after updating the VSI list */
4494 if (list_elem->vsi_count == 0)
4499 /* Remove the lookup rule */
4500 struct ice_aqc_sw_rules_elem *s_rule;
4502 s_rule = (struct ice_aqc_sw_rules_elem *)
4503 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4505 status = ICE_ERR_NO_MEMORY;
4509 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4510 ice_aqc_opc_remove_sw_rules);
4512 status = ice_aq_sw_rules(hw, s_rule,
4513 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4514 ice_aqc_opc_remove_sw_rules, NULL);
4516 /* Remove a book keeping from the list */
4517 ice_free(hw, s_rule);
4522 LIST_DEL(&list_elem->list_entry);
4523 ice_free(hw, list_elem);
4526 ice_release_lock(rule_lock);
4531 * ice_aq_get_res_alloc - get allocated resources
4532 * @hw: pointer to the HW struct
4533 * @num_entries: pointer to u16 to store the number of resource entries returned
4534 * @buf: pointer to buffer
4535 * @buf_size: size of buf
4536 * @cd: pointer to command details structure or NULL
4538 * The caller-supplied buffer must be large enough to store the resource
4539 * information for all resource types. Each resource type is an
4540 * ice_aqc_get_res_resp_elem structure.
4543 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4544 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4545 struct ice_sq_cd *cd)
4547 struct ice_aqc_get_res_alloc *resp;
4548 enum ice_status status;
4549 struct ice_aq_desc desc;
4552 return ICE_ERR_BAD_PTR;
4554 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4555 return ICE_ERR_INVAL_SIZE;
4557 resp = &desc.params.get_res;
4559 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4560 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4562 if (!status && num_entries)
4563 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4569 * ice_aq_get_res_descs - get allocated resource descriptors
4570 * @hw: pointer to the hardware structure
4571 * @num_entries: number of resource entries in buffer
4572 * @buf: structure to hold response data buffer
4573 * @buf_size: size of buffer
4574 * @res_type: resource type
4575 * @res_shared: is resource shared
4576 * @desc_id: input - first desc ID to start; output - next desc ID
4577 * @cd: pointer to command details structure or NULL
4580 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4581 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4582 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4584 struct ice_aqc_get_allocd_res_desc *cmd;
4585 struct ice_aq_desc desc;
4586 enum ice_status status;
4588 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4590 cmd = &desc.params.get_res_desc;
4593 return ICE_ERR_PARAM;
4595 if (buf_size != (num_entries * sizeof(*buf)))
4596 return ICE_ERR_PARAM;
4598 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4600 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4601 ICE_AQC_RES_TYPE_M) | (res_shared ?
4602 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4603 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4605 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4607 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4613 * ice_add_mac_rule - Add a MAC address based filter rule
4614 * @hw: pointer to the hardware structure
4615 * @m_list: list of MAC addresses and forwarding information
4616 * @sw: pointer to switch info struct for which function add rule
4617 * @lport: logic port number on which function add rule
4619 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4620 * multiple unicast addresses, the function assumes that all the
4621 * addresses are unique in a given add_mac call. It doesn't
4622 * check for duplicates in this case, removing duplicates from a given
4623 * list should be taken care of in the caller of this function.
4625 static enum ice_status
4626 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4627 struct ice_switch_info *sw, u8 lport)
4629 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4630 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4631 struct ice_fltr_list_entry *m_list_itr;
4632 struct LIST_HEAD_TYPE *rule_head;
4633 u16 total_elem_left, s_rule_size;
4634 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4635 enum ice_status status = ICE_SUCCESS;
4636 u16 num_unicast = 0;
4640 rule_lock = &recp_list->filt_rule_lock;
4641 rule_head = &recp_list->filt_rules;
4643 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4645 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4649 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4650 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4651 if (!ice_is_vsi_valid(hw, vsi_handle))
4652 return ICE_ERR_PARAM;
4653 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4654 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4655 /* update the src in case it is VSI num */
4656 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4657 return ICE_ERR_PARAM;
4658 m_list_itr->fltr_info.src = hw_vsi_id;
4659 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4660 IS_ZERO_ETHER_ADDR(add))
4661 return ICE_ERR_PARAM;
4662 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4663 /* Don't overwrite the unicast address */
4664 ice_acquire_lock(rule_lock);
4665 if (ice_find_rule_entry(rule_head,
4666 &m_list_itr->fltr_info)) {
4667 ice_release_lock(rule_lock);
4668 return ICE_ERR_ALREADY_EXISTS;
4670 ice_release_lock(rule_lock);
4672 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4673 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4674 m_list_itr->status =
4675 ice_add_rule_internal(hw, recp_list, lport,
4677 if (m_list_itr->status)
4678 return m_list_itr->status;
4682 ice_acquire_lock(rule_lock);
4683 /* Exit if no suitable entries were found for adding bulk switch rule */
4685 status = ICE_SUCCESS;
4686 goto ice_add_mac_exit;
4689 /* Allocate switch rule buffer for the bulk update for unicast */
4690 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4691 s_rule = (struct ice_aqc_sw_rules_elem *)
4692 ice_calloc(hw, num_unicast, s_rule_size);
4694 status = ICE_ERR_NO_MEMORY;
4695 goto ice_add_mac_exit;
4699 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4701 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4702 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4704 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4705 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4706 ice_aqc_opc_add_sw_rules);
4707 r_iter = (struct ice_aqc_sw_rules_elem *)
4708 ((u8 *)r_iter + s_rule_size);
4712 /* Call AQ bulk switch rule update for all unicast addresses */
4714 /* Call AQ switch rule in AQ_MAX chunk */
4715 for (total_elem_left = num_unicast; total_elem_left > 0;
4716 total_elem_left -= elem_sent) {
4717 struct ice_aqc_sw_rules_elem *entry = r_iter;
4719 elem_sent = MIN_T(u8, total_elem_left,
4720 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4721 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4722 elem_sent, ice_aqc_opc_add_sw_rules,
4725 goto ice_add_mac_exit;
4726 r_iter = (struct ice_aqc_sw_rules_elem *)
4727 ((u8 *)r_iter + (elem_sent * s_rule_size));
4730 /* Fill up rule ID based on the value returned from FW */
4732 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4734 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4735 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4736 struct ice_fltr_mgmt_list_entry *fm_entry;
4738 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4739 f_info->fltr_rule_id =
4740 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4741 f_info->fltr_act = ICE_FWD_TO_VSI;
4742 /* Create an entry to track this MAC address */
4743 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4744 ice_malloc(hw, sizeof(*fm_entry));
4746 status = ICE_ERR_NO_MEMORY;
4747 goto ice_add_mac_exit;
4749 fm_entry->fltr_info = *f_info;
4750 fm_entry->vsi_count = 1;
4751 /* The book keeping entries will get removed when
4752 * base driver calls remove filter AQ command
4755 LIST_ADD(&fm_entry->list_entry, rule_head);
4756 r_iter = (struct ice_aqc_sw_rules_elem *)
4757 ((u8 *)r_iter + s_rule_size);
4762 ice_release_lock(rule_lock);
4764 ice_free(hw, s_rule);
4769 * ice_add_mac - Add a MAC address based filter rule
4770 * @hw: pointer to the hardware structure
4771 * @m_list: list of MAC addresses and forwarding information
4773 * Function add MAC rule for logical port from HW struct
4775 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4778 return ICE_ERR_PARAM;
4780 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4781 hw->port_info->lport);
4785 * ice_add_vlan_internal - Add one VLAN based filter rule
4786 * @hw: pointer to the hardware structure
4787 * @recp_list: recipe list for which rule has to be added
4788 * @f_entry: filter entry containing one VLAN information
4790 static enum ice_status
4791 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4792 struct ice_fltr_list_entry *f_entry)
4794 struct ice_fltr_mgmt_list_entry *v_list_itr;
4795 struct ice_fltr_info *new_fltr, *cur_fltr;
4796 enum ice_sw_lkup_type lkup_type;
4797 u16 vsi_list_id = 0, vsi_handle;
4798 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4799 enum ice_status status = ICE_SUCCESS;
4801 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4802 return ICE_ERR_PARAM;
4804 f_entry->fltr_info.fwd_id.hw_vsi_id =
4805 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4806 new_fltr = &f_entry->fltr_info;
4808 /* VLAN ID should only be 12 bits */
4809 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4810 return ICE_ERR_PARAM;
4812 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4813 return ICE_ERR_PARAM;
4815 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4816 lkup_type = new_fltr->lkup_type;
4817 vsi_handle = new_fltr->vsi_handle;
4818 rule_lock = &recp_list->filt_rule_lock;
4819 ice_acquire_lock(rule_lock);
4820 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4822 struct ice_vsi_list_map_info *map_info = NULL;
4824 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4825 /* All VLAN pruning rules use a VSI list. Check if
4826 * there is already a VSI list containing VSI that we
4827 * want to add. If found, use the same vsi_list_id for
4828 * this new VLAN rule or else create a new list.
4830 map_info = ice_find_vsi_list_entry(recp_list,
4834 status = ice_create_vsi_list_rule(hw,
4842 /* Convert the action to forwarding to a VSI list. */
4843 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4844 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4847 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4849 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4852 status = ICE_ERR_DOES_NOT_EXIST;
4855 /* reuse VSI list for new rule and increment ref_cnt */
4857 v_list_itr->vsi_list_info = map_info;
4858 map_info->ref_cnt++;
4860 v_list_itr->vsi_list_info =
4861 ice_create_vsi_list_map(hw, &vsi_handle,
4865 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4866 /* Update existing VSI list to add new VSI ID only if it used
4869 cur_fltr = &v_list_itr->fltr_info;
4870 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4873 /* If VLAN rule exists and VSI list being used by this rule is
4874 * referenced by more than 1 VLAN rule. Then create a new VSI
4875 * list appending previous VSI with new VSI and update existing
4876 * VLAN rule to point to new VSI list ID
4878 struct ice_fltr_info tmp_fltr;
4879 u16 vsi_handle_arr[2];
4882 /* Current implementation only supports reusing VSI list with
4883 * one VSI count. We should never hit below condition
4885 if (v_list_itr->vsi_count > 1 &&
4886 v_list_itr->vsi_list_info->ref_cnt > 1) {
4887 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4888 status = ICE_ERR_CFG;
4893 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4896 /* A rule already exists with the new VSI being added */
4897 if (cur_handle == vsi_handle) {
4898 status = ICE_ERR_ALREADY_EXISTS;
4902 vsi_handle_arr[0] = cur_handle;
4903 vsi_handle_arr[1] = vsi_handle;
4904 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4905 &vsi_list_id, lkup_type);
4909 tmp_fltr = v_list_itr->fltr_info;
4910 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4911 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4912 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4913 /* Update the previous switch rule to a new VSI list which
4914 * includes current VSI that is requested
4916 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4920 /* before overriding VSI list map info. decrement ref_cnt of
4923 v_list_itr->vsi_list_info->ref_cnt--;
4925 /* now update to newly created list */
4926 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4927 v_list_itr->vsi_list_info =
4928 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4930 v_list_itr->vsi_count++;
4934 ice_release_lock(rule_lock);
4939 * ice_add_vlan_rule - Add VLAN based filter rule
4940 * @hw: pointer to the hardware structure
4941 * @v_list: list of VLAN entries and forwarding information
4942 * @sw: pointer to switch info struct for which function add rule
4944 static enum ice_status
4945 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4946 struct ice_switch_info *sw)
4948 struct ice_fltr_list_entry *v_list_itr;
4949 struct ice_sw_recipe *recp_list;
4951 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4952 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4954 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4955 return ICE_ERR_PARAM;
4956 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4957 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4959 if (v_list_itr->status)
4960 return v_list_itr->status;
4966 * ice_add_vlan - Add a VLAN based filter rule
4967 * @hw: pointer to the hardware structure
4968 * @v_list: list of VLAN and forwarding information
4970 * Function add VLAN rule for logical port from HW struct
4972 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4975 return ICE_ERR_PARAM;
4977 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4981 * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
4982 * @hw: pointer to the hardware structure
4983 * @mv_list: list of MAC and VLAN filters
4984 * @sw: pointer to switch info struct for which function add rule
4985 * @lport: logic port number on which function add rule
4987 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4988 * pruning bits enabled, then it is the responsibility of the caller to make
4989 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4990 * VLAN won't be received on that VSI otherwise.
4992 static enum ice_status
4993 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4994 struct ice_switch_info *sw, u8 lport)
4996 struct ice_fltr_list_entry *mv_list_itr;
4997 struct ice_sw_recipe *recp_list;
4999 if (!mv_list || !hw)
5000 return ICE_ERR_PARAM;
5002 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5003 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5005 enum ice_sw_lkup_type l_type =
5006 mv_list_itr->fltr_info.lkup_type;
5008 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5009 return ICE_ERR_PARAM;
5010 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5011 mv_list_itr->status =
5012 ice_add_rule_internal(hw, recp_list, lport,
5014 if (mv_list_itr->status)
5015 return mv_list_itr->status;
5021 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5022 * @hw: pointer to the hardware structure
5023 * @mv_list: list of MAC VLAN addresses and forwarding information
5025 * Function add MAC VLAN rule for logical port from HW struct
5028 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5030 if (!mv_list || !hw)
5031 return ICE_ERR_PARAM;
5033 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5034 hw->port_info->lport);
5038 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5039 * @hw: pointer to the hardware structure
5040 * @em_list: list of ether type MAC filter, MAC is optional
5041 * @sw: pointer to switch info struct for which function add rule
5042 * @lport: logic port number on which function add rule
5044 * This function requires the caller to populate the entries in
5045 * the filter list with the necessary fields (including flags to
5046 * indicate Tx or Rx rules).
5048 static enum ice_status
5049 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5050 struct ice_switch_info *sw, u8 lport)
5052 struct ice_fltr_list_entry *em_list_itr;
5054 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5056 struct ice_sw_recipe *recp_list;
5057 enum ice_sw_lkup_type l_type;
5059 l_type = em_list_itr->fltr_info.lkup_type;
5060 recp_list = &sw->recp_list[l_type];
5062 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5063 l_type != ICE_SW_LKUP_ETHERTYPE)
5064 return ICE_ERR_PARAM;
5066 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5069 if (em_list_itr->status)
5070 return em_list_itr->status;
5076 * ice_add_eth_mac - Add a ethertype based filter rule
5077 * @hw: pointer to the hardware structure
5078 * @em_list: list of ethertype and forwarding information
5080 * Function add ethertype rule for logical port from HW struct
5083 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5085 if (!em_list || !hw)
5086 return ICE_ERR_PARAM;
5088 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5089 hw->port_info->lport);
5093 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5094 * @hw: pointer to the hardware structure
5095 * @em_list: list of ethertype or ethertype MAC entries
5096 * @sw: pointer to switch info struct for which function add rule
5098 static enum ice_status
5099 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5100 struct ice_switch_info *sw)
5102 struct ice_fltr_list_entry *em_list_itr, *tmp;
5104 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5106 struct ice_sw_recipe *recp_list;
5107 enum ice_sw_lkup_type l_type;
5109 l_type = em_list_itr->fltr_info.lkup_type;
5111 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5112 l_type != ICE_SW_LKUP_ETHERTYPE)
5113 return ICE_ERR_PARAM;
5115 recp_list = &sw->recp_list[l_type];
5116 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5118 if (em_list_itr->status)
5119 return em_list_itr->status;
5125 * ice_remove_eth_mac - remove a ethertype based filter rule
5126 * @hw: pointer to the hardware structure
5127 * @em_list: list of ethertype and forwarding information
5131 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5133 if (!em_list || !hw)
5134 return ICE_ERR_PARAM;
5136 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5140 * ice_rem_sw_rule_info
5141 * @hw: pointer to the hardware structure
5142 * @rule_head: pointer to the switch list structure that we want to delete
5145 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5147 if (!LIST_EMPTY(rule_head)) {
5148 struct ice_fltr_mgmt_list_entry *entry;
5149 struct ice_fltr_mgmt_list_entry *tmp;
5151 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5152 ice_fltr_mgmt_list_entry, list_entry) {
5153 LIST_DEL(&entry->list_entry);
5154 ice_free(hw, entry);
5160 * ice_rem_adv_rule_info
5161 * @hw: pointer to the hardware structure
5162 * @rule_head: pointer to the switch list structure that we want to delete
5165 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5167 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5168 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5170 if (LIST_EMPTY(rule_head))
5173 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5174 ice_adv_fltr_mgmt_list_entry, list_entry) {
5175 LIST_DEL(&lst_itr->list_entry);
5176 ice_free(hw, lst_itr->lkups);
5177 ice_free(hw, lst_itr);
5182 * ice_rem_all_sw_rules_info
5183 * @hw: pointer to the hardware structure
5185 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5187 struct ice_switch_info *sw = hw->switch_info;
5190 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5191 struct LIST_HEAD_TYPE *rule_head;
5193 rule_head = &sw->recp_list[i].filt_rules;
5194 if (!sw->recp_list[i].adv_rule)
5195 ice_rem_sw_rule_info(hw, rule_head);
5197 ice_rem_adv_rule_info(hw, rule_head);
5198 if (sw->recp_list[i].adv_rule &&
5199 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5200 sw->recp_list[i].adv_rule = false;
5205 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5206 * @pi: pointer to the port_info structure
5207 * @vsi_handle: VSI handle to set as default
5208 * @set: true to add the above mentioned switch rule, false to remove it
5209 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5211 * add filter rule to set/unset given VSI as default VSI for the switch
5212 * (represented by swid)
5215 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5218 struct ice_aqc_sw_rules_elem *s_rule;
5219 struct ice_fltr_info f_info;
5220 struct ice_hw *hw = pi->hw;
5221 enum ice_adminq_opc opcode;
5222 enum ice_status status;
5226 if (!ice_is_vsi_valid(hw, vsi_handle))
5227 return ICE_ERR_PARAM;
5228 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5230 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5231 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5233 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5235 return ICE_ERR_NO_MEMORY;
5237 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5239 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5240 f_info.flag = direction;
5241 f_info.fltr_act = ICE_FWD_TO_VSI;
5242 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5244 if (f_info.flag & ICE_FLTR_RX) {
5245 f_info.src = pi->lport;
5246 f_info.src_id = ICE_SRC_ID_LPORT;
5248 f_info.fltr_rule_id =
5249 pi->dflt_rx_vsi_rule_id;
5250 } else if (f_info.flag & ICE_FLTR_TX) {
5251 f_info.src_id = ICE_SRC_ID_VSI;
5252 f_info.src = hw_vsi_id;
5254 f_info.fltr_rule_id =
5255 pi->dflt_tx_vsi_rule_id;
5259 opcode = ice_aqc_opc_add_sw_rules;
5261 opcode = ice_aqc_opc_remove_sw_rules;
5263 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5265 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5266 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5269 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5271 if (f_info.flag & ICE_FLTR_TX) {
5272 pi->dflt_tx_vsi_num = hw_vsi_id;
5273 pi->dflt_tx_vsi_rule_id = index;
5274 } else if (f_info.flag & ICE_FLTR_RX) {
5275 pi->dflt_rx_vsi_num = hw_vsi_id;
5276 pi->dflt_rx_vsi_rule_id = index;
5279 if (f_info.flag & ICE_FLTR_TX) {
5280 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5281 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5282 } else if (f_info.flag & ICE_FLTR_RX) {
5283 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5284 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5289 ice_free(hw, s_rule);
5294 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5295 * @list_head: head of rule list
5296 * @f_info: rule information
5298 * Helper function to search for a unicast rule entry - this is to be used
5299 * to remove unicast MAC filter that is not shared with other VSIs on the
5302 * Returns pointer to entry storing the rule if found
5304 static struct ice_fltr_mgmt_list_entry *
5305 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5306 struct ice_fltr_info *f_info)
5308 struct ice_fltr_mgmt_list_entry *list_itr;
5310 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5312 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5313 sizeof(f_info->l_data)) &&
5314 f_info->fwd_id.hw_vsi_id ==
5315 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5316 f_info->flag == list_itr->fltr_info.flag)
5323 * ice_remove_mac_rule - remove a MAC based filter rule
5324 * @hw: pointer to the hardware structure
5325 * @m_list: list of MAC addresses and forwarding information
5326 * @recp_list: list from which function remove MAC address
5328 * This function removes either a MAC filter rule or a specific VSI from a
5329 * VSI list for a multicast MAC address.
5331 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5332 * ice_add_mac. Caller should be aware that this call will only work if all
5333 * the entries passed into m_list were added previously. It will not attempt to
5334 * do a partial remove of entries that were found.
5336 static enum ice_status
5337 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5338 struct ice_sw_recipe *recp_list)
5340 struct ice_fltr_list_entry *list_itr, *tmp;
5341 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5344 return ICE_ERR_PARAM;
5346 rule_lock = &recp_list->filt_rule_lock;
5347 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5349 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5350 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5353 if (l_type != ICE_SW_LKUP_MAC)
5354 return ICE_ERR_PARAM;
5356 vsi_handle = list_itr->fltr_info.vsi_handle;
5357 if (!ice_is_vsi_valid(hw, vsi_handle))
5358 return ICE_ERR_PARAM;
5360 list_itr->fltr_info.fwd_id.hw_vsi_id =
5361 ice_get_hw_vsi_num(hw, vsi_handle);
5362 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5363 /* Don't remove the unicast address that belongs to
5364 * another VSI on the switch, since it is not being
5367 ice_acquire_lock(rule_lock);
5368 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5369 &list_itr->fltr_info)) {
5370 ice_release_lock(rule_lock);
5371 return ICE_ERR_DOES_NOT_EXIST;
5373 ice_release_lock(rule_lock);
5375 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5377 if (list_itr->status)
5378 return list_itr->status;
5384 * ice_remove_mac - remove a MAC address based filter rule
5385 * @hw: pointer to the hardware structure
5386 * @m_list: list of MAC addresses and forwarding information
5389 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5391 struct ice_sw_recipe *recp_list;
5393 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5394 return ice_remove_mac_rule(hw, m_list, recp_list);
5398 * ice_remove_vlan_rule - Remove VLAN based filter rule
5399 * @hw: pointer to the hardware structure
5400 * @v_list: list of VLAN entries and forwarding information
5401 * @recp_list: list from which function remove VLAN
5403 static enum ice_status
5404 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5405 struct ice_sw_recipe *recp_list)
5407 struct ice_fltr_list_entry *v_list_itr, *tmp;
5409 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5411 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5413 if (l_type != ICE_SW_LKUP_VLAN)
5414 return ICE_ERR_PARAM;
5415 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5417 if (v_list_itr->status)
5418 return v_list_itr->status;
5424 * ice_remove_vlan - remove a VLAN address based filter rule
5425 * @hw: pointer to the hardware structure
5426 * @v_list: list of VLAN and forwarding information
5430 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5432 struct ice_sw_recipe *recp_list;
5435 return ICE_ERR_PARAM;
5437 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5438 return ice_remove_vlan_rule(hw, v_list, recp_list);
5442 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5443 * @hw: pointer to the hardware structure
5444 * @v_list: list of MAC VLAN entries and forwarding information
5445 * @recp_list: list from which function remove MAC VLAN
5447 static enum ice_status
5448 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5449 struct ice_sw_recipe *recp_list)
5451 struct ice_fltr_list_entry *v_list_itr, *tmp;
5453 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5454 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5456 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5458 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5459 return ICE_ERR_PARAM;
5460 v_list_itr->status =
5461 ice_remove_rule_internal(hw, recp_list,
5463 if (v_list_itr->status)
5464 return v_list_itr->status;
5470 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5471 * @hw: pointer to the hardware structure
5472 * @mv_list: list of MAC VLAN and forwarding information
5475 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5477 struct ice_sw_recipe *recp_list;
5479 if (!mv_list || !hw)
5480 return ICE_ERR_PARAM;
5482 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5483 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5487 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5488 * @fm_entry: filter entry to inspect
5489 * @vsi_handle: VSI handle to compare with filter info
5492 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5494 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5495 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5496 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5497 fm_entry->vsi_list_info &&
5498 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5503 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5504 * @hw: pointer to the hardware structure
5505 * @vsi_handle: VSI handle to remove filters from
5506 * @vsi_list_head: pointer to the list to add entry to
5507 * @fi: pointer to fltr_info of filter entry to copy & add
5509 * Helper function, used when creating a list of filters to remove from
5510 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5511 * original filter entry, with the exception of fltr_info.fltr_act and
5512 * fltr_info.fwd_id fields. These are set such that later logic can
5513 * extract which VSI to remove the fltr from, and pass on that information.
5515 static enum ice_status
5516 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5517 struct LIST_HEAD_TYPE *vsi_list_head,
5518 struct ice_fltr_info *fi)
5520 struct ice_fltr_list_entry *tmp;
5522 /* this memory is freed up in the caller function
5523 * once filters for this VSI are removed
5525 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5527 return ICE_ERR_NO_MEMORY;
5529 tmp->fltr_info = *fi;
5531 /* Overwrite these fields to indicate which VSI to remove filter from,
5532 * so find and remove logic can extract the information from the
5533 * list entries. Note that original entries will still have proper
5536 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5537 tmp->fltr_info.vsi_handle = vsi_handle;
5538 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5540 LIST_ADD(&tmp->list_entry, vsi_list_head);
5546 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5547 * @hw: pointer to the hardware structure
5548 * @vsi_handle: VSI handle to remove filters from
5549 * @lkup_list_head: pointer to the list that has certain lookup type filters
5550 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5552 * Locates all filters in lkup_list_head that are used by the given VSI,
5553 * and adds COPIES of those entries to vsi_list_head (intended to be used
5554 * to remove the listed filters).
5555 * Note that this means all entries in vsi_list_head must be explicitly
5556 * deallocated by the caller when done with list.
5558 static enum ice_status
5559 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5560 struct LIST_HEAD_TYPE *lkup_list_head,
5561 struct LIST_HEAD_TYPE *vsi_list_head)
5563 struct ice_fltr_mgmt_list_entry *fm_entry;
5564 enum ice_status status = ICE_SUCCESS;
5566 /* check to make sure VSI ID is valid and within boundary */
5567 if (!ice_is_vsi_valid(hw, vsi_handle))
5568 return ICE_ERR_PARAM;
5570 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5571 ice_fltr_mgmt_list_entry, list_entry) {
5572 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5575 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5577 &fm_entry->fltr_info);
5585 * ice_determine_promisc_mask
5586 * @fi: filter info to parse
5588 * Helper function to determine which ICE_PROMISC_ mask corresponds
5589 * to given filter into.
5591 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5593 u16 vid = fi->l_data.mac_vlan.vlan_id;
5594 u8 *macaddr = fi->l_data.mac.mac_addr;
5595 bool is_tx_fltr = false;
5596 u8 promisc_mask = 0;
5598 if (fi->flag == ICE_FLTR_TX)
5601 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5602 promisc_mask |= is_tx_fltr ?
5603 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5604 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5605 promisc_mask |= is_tx_fltr ?
5606 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5607 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5608 promisc_mask |= is_tx_fltr ?
5609 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5611 promisc_mask |= is_tx_fltr ?
5612 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5614 return promisc_mask;
5618 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5619 * @hw: pointer to the hardware structure
5620 * @vsi_handle: VSI handle to retrieve info from
5621 * @promisc_mask: pointer to mask to be filled in
5622 * @vid: VLAN ID of promisc VLAN VSI
5623 * @sw: pointer to switch info struct for which function add rule
5625 static enum ice_status
5626 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5627 u16 *vid, struct ice_switch_info *sw)
5629 struct ice_fltr_mgmt_list_entry *itr;
5630 struct LIST_HEAD_TYPE *rule_head;
5631 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5633 if (!ice_is_vsi_valid(hw, vsi_handle))
5634 return ICE_ERR_PARAM;
5638 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5639 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5641 ice_acquire_lock(rule_lock);
5642 LIST_FOR_EACH_ENTRY(itr, rule_head,
5643 ice_fltr_mgmt_list_entry, list_entry) {
5644 /* Continue if this filter doesn't apply to this VSI or the
5645 * VSI ID is not in the VSI map for this filter
5647 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5650 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5652 ice_release_lock(rule_lock);
5658 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5659 * @hw: pointer to the hardware structure
5660 * @vsi_handle: VSI handle to retrieve info from
5661 * @promisc_mask: pointer to mask to be filled in
5662 * @vid: VLAN ID of promisc VLAN VSI
5665 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5668 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5669 vid, hw->switch_info);
5673 * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5674 * @hw: pointer to the hardware structure
5675 * @vsi_handle: VSI handle to retrieve info from
5676 * @promisc_mask: pointer to mask to be filled in
5677 * @vid: VLAN ID of promisc VLAN VSI
5678 * @sw: pointer to switch info struct for which function add rule
5680 static enum ice_status
5681 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5682 u16 *vid, struct ice_switch_info *sw)
5684 struct ice_fltr_mgmt_list_entry *itr;
5685 struct LIST_HEAD_TYPE *rule_head;
5686 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5688 if (!ice_is_vsi_valid(hw, vsi_handle))
5689 return ICE_ERR_PARAM;
5693 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5694 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5696 ice_acquire_lock(rule_lock);
5697 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5699 /* Continue if this filter doesn't apply to this VSI or the
5700 * VSI ID is not in the VSI map for this filter
5702 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5705 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5707 ice_release_lock(rule_lock);
5713 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5714 * @hw: pointer to the hardware structure
5715 * @vsi_handle: VSI handle to retrieve info from
5716 * @promisc_mask: pointer to mask to be filled in
5717 * @vid: VLAN ID of promisc VLAN VSI
5720 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5723 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5724 vid, hw->switch_info);
5728 * ice_remove_promisc - Remove promisc based filter rules
5729 * @hw: pointer to the hardware structure
5730 * @recp_id: recipe ID for which the rule needs to removed
5731 * @v_list: list of promisc entries
5733 static enum ice_status
5734 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5735 struct LIST_HEAD_TYPE *v_list)
5737 struct ice_fltr_list_entry *v_list_itr, *tmp;
5738 struct ice_sw_recipe *recp_list;
5740 recp_list = &hw->switch_info->recp_list[recp_id];
5741 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5743 v_list_itr->status =
5744 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5745 if (v_list_itr->status)
5746 return v_list_itr->status;
5752 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5753 * @hw: pointer to the hardware structure
5754 * @vsi_handle: VSI handle to clear mode
5755 * @promisc_mask: mask of promiscuous config bits to clear
5756 * @vid: VLAN ID to clear VLAN promiscuous
5757 * @sw: pointer to switch info struct for which function add rule
5759 static enum ice_status
5760 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5761 u16 vid, struct ice_switch_info *sw)
5763 struct ice_fltr_list_entry *fm_entry, *tmp;
5764 struct LIST_HEAD_TYPE remove_list_head;
5765 struct ice_fltr_mgmt_list_entry *itr;
5766 struct LIST_HEAD_TYPE *rule_head;
5767 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5768 enum ice_status status = ICE_SUCCESS;
5771 if (!ice_is_vsi_valid(hw, vsi_handle))
5772 return ICE_ERR_PARAM;
5774 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5775 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5777 recipe_id = ICE_SW_LKUP_PROMISC;
5779 rule_head = &sw->recp_list[recipe_id].filt_rules;
5780 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5782 INIT_LIST_HEAD(&remove_list_head);
5784 ice_acquire_lock(rule_lock);
5785 LIST_FOR_EACH_ENTRY(itr, rule_head,
5786 ice_fltr_mgmt_list_entry, list_entry) {
5787 struct ice_fltr_info *fltr_info;
5788 u8 fltr_promisc_mask = 0;
5790 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5792 fltr_info = &itr->fltr_info;
5794 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5795 vid != fltr_info->l_data.mac_vlan.vlan_id)
5798 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5800 /* Skip if filter is not completely specified by given mask */
5801 if (fltr_promisc_mask & ~promisc_mask)
5804 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5808 ice_release_lock(rule_lock);
5809 goto free_fltr_list;
5812 ice_release_lock(rule_lock);
5814 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5817 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5818 ice_fltr_list_entry, list_entry) {
5819 LIST_DEL(&fm_entry->list_entry);
5820 ice_free(hw, fm_entry);
5827 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5828 * @hw: pointer to the hardware structure
5829 * @vsi_handle: VSI handle to clear mode
5830 * @promisc_mask: mask of promiscuous config bits to clear
5831 * @vid: VLAN ID to clear VLAN promiscuous
5834 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5835 u8 promisc_mask, u16 vid)
5837 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5838 vid, hw->switch_info);
5842 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5843 * @hw: pointer to the hardware structure
5844 * @vsi_handle: VSI handle to configure
5845 * @promisc_mask: mask of promiscuous config bits
5846 * @vid: VLAN ID to set VLAN promiscuous
5847 * @lport: logical port number to configure promisc mode
5848 * @sw: pointer to switch info struct for which function add rule
5850 static enum ice_status
5851 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5852 u16 vid, u8 lport, struct ice_switch_info *sw)
5854 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5855 struct ice_fltr_list_entry f_list_entry;
5856 struct ice_fltr_info new_fltr;
5857 enum ice_status status = ICE_SUCCESS;
5863 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5865 if (!ice_is_vsi_valid(hw, vsi_handle))
5866 return ICE_ERR_PARAM;
5867 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5869 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5871 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5872 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5873 new_fltr.l_data.mac_vlan.vlan_id = vid;
5874 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5876 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5877 recipe_id = ICE_SW_LKUP_PROMISC;
5880 /* Separate filters must be set for each direction/packet type
5881 * combination, so we will loop over the mask value, store the
5882 * individual type, and clear it out in the input mask as it
5885 while (promisc_mask) {
5886 struct ice_sw_recipe *recp_list;
5892 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5893 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5894 pkt_type = UCAST_FLTR;
5895 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5896 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5897 pkt_type = UCAST_FLTR;
5899 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5900 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5901 pkt_type = MCAST_FLTR;
5902 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5903 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5904 pkt_type = MCAST_FLTR;
5906 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5907 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5908 pkt_type = BCAST_FLTR;
5909 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5910 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5911 pkt_type = BCAST_FLTR;
5915 /* Check for VLAN promiscuous flag */
5916 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5917 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5918 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5919 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5923 /* Set filter DA based on packet type */
5924 mac_addr = new_fltr.l_data.mac.mac_addr;
5925 if (pkt_type == BCAST_FLTR) {
5926 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5927 } else if (pkt_type == MCAST_FLTR ||
5928 pkt_type == UCAST_FLTR) {
5929 /* Use the dummy ether header DA */
5930 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5931 ICE_NONDMA_TO_NONDMA);
5932 if (pkt_type == MCAST_FLTR)
5933 mac_addr[0] |= 0x1; /* Set multicast bit */
5936 /* Need to reset this to zero for all iterations */
5939 new_fltr.flag |= ICE_FLTR_TX;
5940 new_fltr.src = hw_vsi_id;
5942 new_fltr.flag |= ICE_FLTR_RX;
5943 new_fltr.src = lport;
5946 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5947 new_fltr.vsi_handle = vsi_handle;
5948 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5949 f_list_entry.fltr_info = new_fltr;
5950 recp_list = &sw->recp_list[recipe_id];
5952 status = ice_add_rule_internal(hw, recp_list, lport,
5954 if (status != ICE_SUCCESS)
5955 goto set_promisc_exit;
5963 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5964 * @hw: pointer to the hardware structure
5965 * @vsi_handle: VSI handle to configure
5966 * @promisc_mask: mask of promiscuous config bits
5967 * @vid: VLAN ID to set VLAN promiscuous
5970 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5973 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5974 hw->port_info->lport,
5979 * _ice_set_vlan_vsi_promisc
5980 * @hw: pointer to the hardware structure
5981 * @vsi_handle: VSI handle to configure
5982 * @promisc_mask: mask of promiscuous config bits
5983 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5984 * @lport: logical port number to configure promisc mode
5985 * @sw: pointer to switch info struct for which function add rule
5987 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5989 static enum ice_status
5990 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5991 bool rm_vlan_promisc, u8 lport,
5992 struct ice_switch_info *sw)
5994 struct ice_fltr_list_entry *list_itr, *tmp;
5995 struct LIST_HEAD_TYPE vsi_list_head;
5996 struct LIST_HEAD_TYPE *vlan_head;
5997 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5998 enum ice_status status;
6001 INIT_LIST_HEAD(&vsi_list_head);
6002 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6003 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6004 ice_acquire_lock(vlan_lock);
6005 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6007 ice_release_lock(vlan_lock);
6009 goto free_fltr_list;
6011 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6013 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6014 if (rm_vlan_promisc)
6015 status = _ice_clear_vsi_promisc(hw, vsi_handle,
6019 status = _ice_set_vsi_promisc(hw, vsi_handle,
6020 promisc_mask, vlan_id,
6027 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6028 ice_fltr_list_entry, list_entry) {
6029 LIST_DEL(&list_itr->list_entry);
6030 ice_free(hw, list_itr);
6036 * ice_set_vlan_vsi_promisc
6037 * @hw: pointer to the hardware structure
6038 * @vsi_handle: VSI handle to configure
6039 * @promisc_mask: mask of promiscuous config bits
6040 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6042 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6045 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6046 bool rm_vlan_promisc)
6048 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6049 rm_vlan_promisc, hw->port_info->lport,
6054 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6055 * @hw: pointer to the hardware structure
6056 * @vsi_handle: VSI handle to remove filters from
6057 * @recp_list: recipe list from which function remove fltr
6058 * @lkup: switch rule filter lookup type
6061 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6062 struct ice_sw_recipe *recp_list,
6063 enum ice_sw_lkup_type lkup)
6065 struct ice_fltr_list_entry *fm_entry;
6066 struct LIST_HEAD_TYPE remove_list_head;
6067 struct LIST_HEAD_TYPE *rule_head;
6068 struct ice_fltr_list_entry *tmp;
6069 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6070 enum ice_status status;
6072 INIT_LIST_HEAD(&remove_list_head);
6073 rule_lock = &recp_list[lkup].filt_rule_lock;
6074 rule_head = &recp_list[lkup].filt_rules;
6075 ice_acquire_lock(rule_lock);
6076 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6078 ice_release_lock(rule_lock);
6080 goto free_fltr_list;
6083 case ICE_SW_LKUP_MAC:
6084 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6086 case ICE_SW_LKUP_VLAN:
6087 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6089 case ICE_SW_LKUP_PROMISC:
6090 case ICE_SW_LKUP_PROMISC_VLAN:
6091 ice_remove_promisc(hw, lkup, &remove_list_head);
6093 case ICE_SW_LKUP_MAC_VLAN:
6094 ice_remove_mac_vlan(hw, &remove_list_head);
6096 case ICE_SW_LKUP_ETHERTYPE:
6097 case ICE_SW_LKUP_ETHERTYPE_MAC:
6098 ice_remove_eth_mac(hw, &remove_list_head);
6100 case ICE_SW_LKUP_DFLT:
6101 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6103 case ICE_SW_LKUP_LAST:
6104 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6109 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6110 ice_fltr_list_entry, list_entry) {
6111 LIST_DEL(&fm_entry->list_entry);
6112 ice_free(hw, fm_entry);
6117 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6118 * @hw: pointer to the hardware structure
6119 * @vsi_handle: VSI handle to remove filters from
6120 * @sw: pointer to switch info struct
6123 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6124 struct ice_switch_info *sw)
6126 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6128 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6129 sw->recp_list, ICE_SW_LKUP_MAC);
6130 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6131 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6132 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6133 sw->recp_list, ICE_SW_LKUP_PROMISC);
6134 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6135 sw->recp_list, ICE_SW_LKUP_VLAN);
6136 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6137 sw->recp_list, ICE_SW_LKUP_DFLT);
6138 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6139 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6140 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6141 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6142 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6143 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6147 * ice_remove_vsi_fltr - Remove all filters for a VSI
6148 * @hw: pointer to the hardware structure
6149 * @vsi_handle: VSI handle to remove filters from
6151 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6153 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6157 * ice_alloc_res_cntr - allocating resource counter
6158 * @hw: pointer to the hardware structure
6159 * @type: type of resource
6160 * @alloc_shared: if set it is shared else dedicated
6161 * @num_items: number of entries requested for FD resource type
6162 * @counter_id: counter index returned by AQ call
6165 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6168 struct ice_aqc_alloc_free_res_elem *buf;
6169 enum ice_status status;
6172 /* Allocate resource */
6173 buf_len = ice_struct_size(buf, elem, 1);
6174 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6176 return ICE_ERR_NO_MEMORY;
6178 buf->num_elems = CPU_TO_LE16(num_items);
6179 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6180 ICE_AQC_RES_TYPE_M) | alloc_shared);
6182 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6183 ice_aqc_opc_alloc_res, NULL);
6187 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6195 * ice_free_res_cntr - free resource counter
6196 * @hw: pointer to the hardware structure
6197 * @type: type of resource
6198 * @alloc_shared: if set it is shared else dedicated
6199 * @num_items: number of entries to be freed for FD resource type
6200 * @counter_id: counter ID resource which needs to be freed
6203 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6206 struct ice_aqc_alloc_free_res_elem *buf;
6207 enum ice_status status;
6211 buf_len = ice_struct_size(buf, elem, 1);
6212 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6214 return ICE_ERR_NO_MEMORY;
6216 buf->num_elems = CPU_TO_LE16(num_items);
6217 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6218 ICE_AQC_RES_TYPE_M) | alloc_shared);
6219 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6221 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6222 ice_aqc_opc_free_res, NULL);
6224 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6231 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6232 * @hw: pointer to the hardware structure
6233 * @counter_id: returns counter index
6235 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6237 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6238 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6243 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6244 * @hw: pointer to the hardware structure
6245 * @counter_id: counter index to be freed
6247 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6249 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6250 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6255 * ice_alloc_res_lg_act - add large action resource
6256 * @hw: pointer to the hardware structure
6257 * @l_id: large action ID to fill it in
6258 * @num_acts: number of actions to hold with a large action entry
6260 static enum ice_status
6261 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6263 struct ice_aqc_alloc_free_res_elem *sw_buf;
6264 enum ice_status status;
6267 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6268 return ICE_ERR_PARAM;
6270 /* Allocate resource for large action */
6271 buf_len = ice_struct_size(sw_buf, elem, 1);
6272 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6274 return ICE_ERR_NO_MEMORY;
6276 sw_buf->num_elems = CPU_TO_LE16(1);
6278 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6279 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6280 * If num_acts is greater than 2, then use
6281 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6282 * The num_acts cannot exceed 4. This was ensured at the
6283 * beginning of the function.
6286 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6287 else if (num_acts == 2)
6288 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6290 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6292 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6293 ice_aqc_opc_alloc_res, NULL);
6295 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6297 ice_free(hw, sw_buf);
6302 * ice_add_mac_with_sw_marker - add filter with sw marker
6303 * @hw: pointer to the hardware structure
6304 * @f_info: filter info structure containing the MAC filter information
6305 * @sw_marker: sw marker to tag the Rx descriptor with
6308 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6311 struct ice_fltr_mgmt_list_entry *m_entry;
6312 struct ice_fltr_list_entry fl_info;
6313 struct ice_sw_recipe *recp_list;
6314 struct LIST_HEAD_TYPE l_head;
6315 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6316 enum ice_status ret;
6320 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6321 return ICE_ERR_PARAM;
6323 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6324 return ICE_ERR_PARAM;
6326 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6327 return ICE_ERR_PARAM;
6329 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6330 return ICE_ERR_PARAM;
6331 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6333 /* Add filter if it doesn't exist so then the adding of large
6334 * action always results in update
6337 INIT_LIST_HEAD(&l_head);
6338 fl_info.fltr_info = *f_info;
6339 LIST_ADD(&fl_info.list_entry, &l_head);
6341 entry_exists = false;
6342 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6343 hw->port_info->lport);
6344 if (ret == ICE_ERR_ALREADY_EXISTS)
6345 entry_exists = true;
6349 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6350 rule_lock = &recp_list->filt_rule_lock;
6351 ice_acquire_lock(rule_lock);
6352 /* Get the book keeping entry for the filter */
6353 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6357 /* If counter action was enabled for this rule then don't enable
6358 * sw marker large action
6360 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6361 ret = ICE_ERR_PARAM;
6365 /* if same marker was added before */
6366 if (m_entry->sw_marker_id == sw_marker) {
6367 ret = ICE_ERR_ALREADY_EXISTS;
6371 /* Allocate a hardware table entry to hold large act. Three actions
6372 * for marker based large action
6374 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6378 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6381 /* Update the switch rule to add the marker action */
6382 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6384 ice_release_lock(rule_lock);
6389 ice_release_lock(rule_lock);
6390 /* only remove entry if it did not exist previously */
6392 ret = ice_remove_mac(hw, &l_head);
6398 * ice_add_mac_with_counter - add filter with counter enabled
6399 * @hw: pointer to the hardware structure
6400 * @f_info: pointer to filter info structure containing the MAC filter
6404 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6406 struct ice_fltr_mgmt_list_entry *m_entry;
6407 struct ice_fltr_list_entry fl_info;
6408 struct ice_sw_recipe *recp_list;
6409 struct LIST_HEAD_TYPE l_head;
6410 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6411 enum ice_status ret;
6416 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6417 return ICE_ERR_PARAM;
6419 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6420 return ICE_ERR_PARAM;
6422 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6423 return ICE_ERR_PARAM;
6424 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6425 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6427 entry_exist = false;
6429 rule_lock = &recp_list->filt_rule_lock;
6431 /* Add filter if it doesn't exist so then the adding of large
6432 * action always results in update
6434 INIT_LIST_HEAD(&l_head);
6436 fl_info.fltr_info = *f_info;
6437 LIST_ADD(&fl_info.list_entry, &l_head);
6439 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6440 hw->port_info->lport);
6441 if (ret == ICE_ERR_ALREADY_EXISTS)
6446 ice_acquire_lock(rule_lock);
6447 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6449 ret = ICE_ERR_BAD_PTR;
6453 /* Don't enable counter for a filter for which sw marker was enabled */
6454 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6455 ret = ICE_ERR_PARAM;
6459 /* If a counter was already enabled then don't need to add again */
6460 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6461 ret = ICE_ERR_ALREADY_EXISTS;
6465 /* Allocate a hardware table entry to VLAN counter */
6466 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6470 /* Allocate a hardware table entry to hold large act. Two actions for
6471 * counter based large action
6473 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6477 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6480 /* Update the switch rule to add the counter action */
6481 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6483 ice_release_lock(rule_lock);
6488 ice_release_lock(rule_lock);
6489 /* only remove entry if it did not exist previously */
6491 ret = ice_remove_mac(hw, &l_head);
6496 /* This is mapping table entry that maps every word within a given protocol
6497 * structure to the real byte offset as per the specification of that
6499 * for example dst address is 3 words in ethertype header and corresponding
6500 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6501 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6502 * matching entry describing its field. This needs to be updated if new
6503 * structure is added to that union.
6505 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6506 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6507 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6508 { ICE_ETYPE_OL, { 0 } },
6509 { ICE_VLAN_OFOS, { 0, 2 } },
6510 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6511 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6512 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6513 26, 28, 30, 32, 34, 36, 38 } },
6514 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6515 26, 28, 30, 32, 34, 36, 38 } },
6516 { ICE_TCP_IL, { 0, 2 } },
6517 { ICE_UDP_OF, { 0, 2 } },
6518 { ICE_UDP_ILOS, { 0, 2 } },
6519 { ICE_SCTP_IL, { 0, 2 } },
6520 { ICE_VXLAN, { 8, 10, 12, 14 } },
6521 { ICE_GENEVE, { 8, 10, 12, 14 } },
6522 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6523 { ICE_NVGRE, { 0, 2, 4, 6 } },
6524 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6525 { ICE_PPPOE, { 0, 2, 4, 6 } },
6526 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6527 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6528 { ICE_ESP, { 0, 2, 4, 6 } },
6529 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6530 { ICE_NAT_T, { 8, 10, 12, 14 } },
6531 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6532 { ICE_VLAN_EX, { 0, 2 } },
6535 /* The following table describes preferred grouping of recipes.
6536 * If a recipe that needs to be programmed is a superset or matches one of the
6537 * following combinations, then the recipe needs to be chained as per the
6541 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6542 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6543 { ICE_MAC_IL, ICE_MAC_IL_HW },
6544 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6545 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6546 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6547 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6548 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6549 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6550 { ICE_TCP_IL, ICE_TCP_IL_HW },
6551 { ICE_UDP_OF, ICE_UDP_OF_HW },
6552 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6553 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6554 { ICE_VXLAN, ICE_UDP_OF_HW },
6555 { ICE_GENEVE, ICE_UDP_OF_HW },
6556 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6557 { ICE_NVGRE, ICE_GRE_OF_HW },
6558 { ICE_GTP, ICE_UDP_OF_HW },
6559 { ICE_PPPOE, ICE_PPPOE_HW },
6560 { ICE_PFCP, ICE_UDP_ILOS_HW },
6561 { ICE_L2TPV3, ICE_L2TPV3_HW },
6562 { ICE_ESP, ICE_ESP_HW },
6563 { ICE_AH, ICE_AH_HW },
6564 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6565 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6566 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6570 * ice_find_recp - find a recipe
6571 * @hw: pointer to the hardware structure
6572 * @lkup_exts: extension sequence to match
6574 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6576 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6577 enum ice_sw_tunnel_type tun_type)
6579 bool refresh_required = true;
6580 struct ice_sw_recipe *recp;
6583 /* Walk through existing recipes to find a match */
6584 recp = hw->switch_info->recp_list;
6585 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6586 /* If recipe was not created for this ID, in SW bookkeeping,
6587 * check if FW has an entry for this recipe. If the FW has an
6588 * entry update it in our SW bookkeeping and continue with the
6591 if (!recp[i].recp_created)
6592 if (ice_get_recp_frm_fw(hw,
6593 hw->switch_info->recp_list, i,
6597 /* Skip inverse action recipes */
6598 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6599 ICE_AQ_RECIPE_ACT_INV_ACT)
6602 /* if number of words we are looking for match */
6603 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6604 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6605 struct ice_fv_word *be = lkup_exts->fv_words;
6606 u16 *cr = recp[i].lkup_exts.field_mask;
6607 u16 *de = lkup_exts->field_mask;
6611 /* ar, cr, and qr are related to the recipe words, while
6612 * be, de, and pe are related to the lookup words
6614 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6615 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6617 if (ar[qr].off == be[pe].off &&
6618 ar[qr].prot_id == be[pe].prot_id &&
6620 /* Found the "pe"th word in the
6625 /* After walking through all the words in the
6626 * "i"th recipe if "p"th word was not found then
6627 * this recipe is not what we are looking for.
6628 * So break out from this loop and try the next
6631 if (qr >= recp[i].lkup_exts.n_val_words) {
6636 /* If for "i"th recipe the found was never set to false
6637 * then it means we found our match
6639 if (tun_type == recp[i].tun_type && found)
6640 return i; /* Return the recipe ID */
6643 return ICE_MAX_NUM_RECIPES;
6647 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6649 * As protocol id for outer vlan is different in dvm and svm, if dvm is
6650 * supported protocol array record for outer vlan has to be modified to
6651 * reflect the value proper for DVM.
6653 void ice_change_proto_id_to_dvm(void)
6657 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6658 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6659 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6660 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6664 * ice_prot_type_to_id - get protocol ID from protocol type
6665 * @type: protocol type
6666 * @id: pointer to variable that will receive the ID
6668 * Returns true if found, false otherwise
6670 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6674 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6675 if (ice_prot_id_tbl[i].type == type) {
6676 *id = ice_prot_id_tbl[i].protocol_id;
6683 * ice_fill_valid_words - count valid words
6684 * @rule: advanced rule with lookup information
6685 * @lkup_exts: byte offset extractions of the words that are valid
6687 * calculate valid words in a lookup rule using mask value
6690 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6691 struct ice_prot_lkup_ext *lkup_exts)
6693 u8 j, word, prot_id, ret_val;
6695 if (!ice_prot_type_to_id(rule->type, &prot_id))
6698 word = lkup_exts->n_val_words;
6700 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6701 if (((u16 *)&rule->m_u)[j] &&
6702 (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6703 /* No more space to accommodate */
6704 if (word >= ICE_MAX_CHAIN_WORDS)
6706 lkup_exts->fv_words[word].off =
6707 ice_prot_ext[rule->type].offs[j];
6708 lkup_exts->fv_words[word].prot_id =
6709 ice_prot_id_tbl[rule->type].protocol_id;
6710 lkup_exts->field_mask[word] =
6711 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6715 ret_val = word - lkup_exts->n_val_words;
6716 lkup_exts->n_val_words = word;
6722 * ice_create_first_fit_recp_def - Create a recipe grouping
6723 * @hw: pointer to the hardware structure
6724 * @lkup_exts: an array of protocol header extractions
6725 * @rg_list: pointer to a list that stores new recipe groups
6726 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6728 * Using first fit algorithm, take all the words that are still not done
6729 * and start grouping them in 4-word groups. Each group makes up one
6732 static enum ice_status
6733 ice_create_first_fit_recp_def(struct ice_hw *hw,
6734 struct ice_prot_lkup_ext *lkup_exts,
6735 struct LIST_HEAD_TYPE *rg_list,
6738 struct ice_pref_recipe_group *grp = NULL;
6743 if (!lkup_exts->n_val_words) {
6744 struct ice_recp_grp_entry *entry;
6746 entry = (struct ice_recp_grp_entry *)
6747 ice_malloc(hw, sizeof(*entry));
6749 return ICE_ERR_NO_MEMORY;
6750 LIST_ADD(&entry->l_entry, rg_list);
6751 grp = &entry->r_group;
6753 grp->n_val_pairs = 0;
6756 /* Walk through every word in the rule to check if it is not done. If so
6757 * then this word needs to be part of a new recipe.
6759 for (j = 0; j < lkup_exts->n_val_words; j++)
6760 if (!ice_is_bit_set(lkup_exts->done, j)) {
6762 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6763 struct ice_recp_grp_entry *entry;
6765 entry = (struct ice_recp_grp_entry *)
6766 ice_malloc(hw, sizeof(*entry));
6768 return ICE_ERR_NO_MEMORY;
6769 LIST_ADD(&entry->l_entry, rg_list);
6770 grp = &entry->r_group;
6774 grp->pairs[grp->n_val_pairs].prot_id =
6775 lkup_exts->fv_words[j].prot_id;
6776 grp->pairs[grp->n_val_pairs].off =
6777 lkup_exts->fv_words[j].off;
6778 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6786 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6787 * @hw: pointer to the hardware structure
6788 * @fv_list: field vector with the extraction sequence information
6789 * @rg_list: recipe groupings with protocol-offset pairs
6791 * Helper function to fill in the field vector indices for protocol-offset
6792 * pairs. These indexes are then ultimately programmed into a recipe.
6794 static enum ice_status
6795 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6796 struct LIST_HEAD_TYPE *rg_list)
6798 struct ice_sw_fv_list_entry *fv;
6799 struct ice_recp_grp_entry *rg;
6800 struct ice_fv_word *fv_ext;
6802 if (LIST_EMPTY(fv_list))
6805 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6806 fv_ext = fv->fv_ptr->ew;
6808 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6811 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6812 struct ice_fv_word *pr;
6817 pr = &rg->r_group.pairs[i];
6818 mask = rg->r_group.mask[i];
6820 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6821 if (fv_ext[j].prot_id == pr->prot_id &&
6822 fv_ext[j].off == pr->off) {
6825 /* Store index of field vector */
6827 rg->fv_mask[i] = mask;
6831 /* Protocol/offset could not be found, caller gave an
6835 return ICE_ERR_PARAM;
6843 * ice_find_free_recp_res_idx - find free result indexes for recipe
6844 * @hw: pointer to hardware structure
6845 * @profiles: bitmap of profiles that will be associated with the new recipe
6846 * @free_idx: pointer to variable to receive the free index bitmap
6848 * The algorithm used here is:
6849 * 1. When creating a new recipe, create a set P which contains all
6850 * Profiles that will be associated with our new recipe
6852 * 2. For each Profile p in set P:
6853 * a. Add all recipes associated with Profile p into set R
6854 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6855 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6856 * i. Or just assume they all have the same possible indexes:
6858 * i.e., PossibleIndexes = 0x0000F00000000000
6860 * 3. For each Recipe r in set R:
6861 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6862 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6864 * FreeIndexes will contain the bits indicating the indexes free for use,
6865 * then the code needs to update the recipe[r].used_result_idx_bits to
6866 * indicate which indexes were selected for use by this recipe.
6869 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6870 ice_bitmap_t *free_idx)
6872 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6873 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6874 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6877 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6878 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6879 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6880 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6882 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6884 /* For each profile we are going to associate the recipe with, add the
6885 * recipes that are associated with that profile. This will give us
6886 * the set of recipes that our recipe may collide with. Also, determine
6887 * what possible result indexes are usable given this set of profiles.
6889 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6890 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6891 ICE_MAX_NUM_RECIPES);
6892 ice_and_bitmap(possible_idx, possible_idx,
6893 hw->switch_info->prof_res_bm[bit],
6897 /* For each recipe that our new recipe may collide with, determine
6898 * which indexes have been used.
6900 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6901 ice_or_bitmap(used_idx, used_idx,
6902 hw->switch_info->recp_list[bit].res_idxs,
6905 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6907 /* return number of free indexes */
6908 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6912 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6913 * @hw: pointer to hardware structure
6914 * @rm: recipe management list entry
6915 * @profiles: bitmap of profiles that will be associated.
6917 static enum ice_status
6918 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6919 ice_bitmap_t *profiles)
6921 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6922 struct ice_aqc_recipe_data_elem *tmp;
6923 struct ice_aqc_recipe_data_elem *buf;
6924 struct ice_recp_grp_entry *entry;
6925 enum ice_status status;
6931 /* When more than one recipe are required, another recipe is needed to
6932 * chain them together. Matching a tunnel metadata ID takes up one of
6933 * the match fields in the chaining recipe reducing the number of
6934 * chained recipes by one.
6936 /* check number of free result indices */
6937 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6938 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6940 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6941 free_res_idx, rm->n_grp_count);
6943 if (rm->n_grp_count > 1) {
6944 if (rm->n_grp_count > free_res_idx)
6945 return ICE_ERR_MAX_LIMIT;
6950 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6951 return ICE_ERR_MAX_LIMIT;
6953 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6954 ICE_MAX_NUM_RECIPES,
6957 return ICE_ERR_NO_MEMORY;
6959 buf = (struct ice_aqc_recipe_data_elem *)
6960 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6962 status = ICE_ERR_NO_MEMORY;
6966 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6967 recipe_count = ICE_MAX_NUM_RECIPES;
6968 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6970 if (status || recipe_count == 0)
6973 /* Allocate the recipe resources, and configure them according to the
6974 * match fields from protocol headers and extracted field vectors.
6976 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6977 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6980 status = ice_alloc_recipe(hw, &entry->rid);
6984 /* Clear the result index of the located recipe, as this will be
6985 * updated, if needed, later in the recipe creation process.
6987 tmp[0].content.result_indx = 0;
6989 buf[recps] = tmp[0];
6990 buf[recps].recipe_indx = (u8)entry->rid;
6991 /* if the recipe is a non-root recipe RID should be programmed
6992 * as 0 for the rules to be applied correctly.
6994 buf[recps].content.rid = 0;
6995 ice_memset(&buf[recps].content.lkup_indx, 0,
6996 sizeof(buf[recps].content.lkup_indx),
6999 /* All recipes use look-up index 0 to match switch ID. */
7000 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7001 buf[recps].content.mask[0] =
7002 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7003 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7006 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7007 buf[recps].content.lkup_indx[i] = 0x80;
7008 buf[recps].content.mask[i] = 0;
7011 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7012 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7013 buf[recps].content.mask[i + 1] =
7014 CPU_TO_LE16(entry->fv_mask[i]);
7017 if (rm->n_grp_count > 1) {
7018 /* Checks to see if there really is a valid result index
7021 if (chain_idx >= ICE_MAX_FV_WORDS) {
7022 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7023 status = ICE_ERR_MAX_LIMIT;
7027 entry->chain_idx = chain_idx;
7028 buf[recps].content.result_indx =
7029 ICE_AQ_RECIPE_RESULT_EN |
7030 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7031 ICE_AQ_RECIPE_RESULT_DATA_M);
7032 ice_clear_bit(chain_idx, result_idx_bm);
7033 chain_idx = ice_find_first_bit(result_idx_bm,
7037 /* fill recipe dependencies */
7038 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7039 ICE_MAX_NUM_RECIPES);
7040 ice_set_bit(buf[recps].recipe_indx,
7041 (ice_bitmap_t *)buf[recps].recipe_bitmap);
7042 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7046 if (rm->n_grp_count == 1) {
7047 rm->root_rid = buf[0].recipe_indx;
7048 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7049 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7050 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7051 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7052 sizeof(buf[0].recipe_bitmap),
7053 ICE_NONDMA_TO_NONDMA);
7055 status = ICE_ERR_BAD_PTR;
7058 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7059 * the recipe which is getting created if specified
7060 * by user. Usually any advanced switch filter, which results
7061 * into new extraction sequence, ended up creating a new recipe
7062 * of type ROOT and usually recipes are associated with profiles
7063 * Switch rule referreing newly created recipe, needs to have
7064 * either/or 'fwd' or 'join' priority, otherwise switch rule
7065 * evaluation will not happen correctly. In other words, if
7066 * switch rule to be evaluated on priority basis, then recipe
7067 * needs to have priority, otherwise it will be evaluated last.
7069 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7071 struct ice_recp_grp_entry *last_chain_entry;
7074 /* Allocate the last recipe that will chain the outcomes of the
7075 * other recipes together
7077 status = ice_alloc_recipe(hw, &rid);
7081 buf[recps].recipe_indx = (u8)rid;
7082 buf[recps].content.rid = (u8)rid;
7083 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7084 /* the new entry created should also be part of rg_list to
7085 * make sure we have complete recipe
7087 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7088 sizeof(*last_chain_entry));
7089 if (!last_chain_entry) {
7090 status = ICE_ERR_NO_MEMORY;
7093 last_chain_entry->rid = rid;
7094 ice_memset(&buf[recps].content.lkup_indx, 0,
7095 sizeof(buf[recps].content.lkup_indx),
7097 /* All recipes use look-up index 0 to match switch ID. */
7098 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7099 buf[recps].content.mask[0] =
7100 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7101 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7102 buf[recps].content.lkup_indx[i] =
7103 ICE_AQ_RECIPE_LKUP_IGNORE;
7104 buf[recps].content.mask[i] = 0;
7108 /* update r_bitmap with the recp that is used for chaining */
7109 ice_set_bit(rid, rm->r_bitmap);
7110 /* this is the recipe that chains all the other recipes so it
7111 * should not have a chaining ID to indicate the same
7113 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7114 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7116 last_chain_entry->fv_idx[i] = entry->chain_idx;
7117 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7118 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7119 ice_set_bit(entry->rid, rm->r_bitmap);
7121 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7122 if (sizeof(buf[recps].recipe_bitmap) >=
7123 sizeof(rm->r_bitmap)) {
7124 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7125 sizeof(buf[recps].recipe_bitmap),
7126 ICE_NONDMA_TO_NONDMA);
7128 status = ICE_ERR_BAD_PTR;
7131 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7134 rm->root_rid = (u8)rid;
7136 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7140 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7141 ice_release_change_lock(hw);
7145 /* Every recipe that just got created add it to the recipe
7148 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7149 struct ice_switch_info *sw = hw->switch_info;
7150 bool is_root, idx_found = false;
7151 struct ice_sw_recipe *recp;
7152 u16 idx, buf_idx = 0;
7154 /* find buffer index for copying some data */
7155 for (idx = 0; idx < rm->n_grp_count; idx++)
7156 if (buf[idx].recipe_indx == entry->rid) {
7162 status = ICE_ERR_OUT_OF_RANGE;
7166 recp = &sw->recp_list[entry->rid];
7167 is_root = (rm->root_rid == entry->rid);
7168 recp->is_root = is_root;
7170 recp->root_rid = entry->rid;
7171 recp->big_recp = (is_root && rm->n_grp_count > 1);
7173 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7174 entry->r_group.n_val_pairs *
7175 sizeof(struct ice_fv_word),
7176 ICE_NONDMA_TO_NONDMA);
7178 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7179 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7181 /* Copy non-result fv index values and masks to recipe. This
7182 * call will also update the result recipe bitmask.
7184 ice_collect_result_idx(&buf[buf_idx], recp);
7186 /* for non-root recipes, also copy to the root, this allows
7187 * easier matching of a complete chained recipe
7190 ice_collect_result_idx(&buf[buf_idx],
7191 &sw->recp_list[rm->root_rid]);
7193 recp->n_ext_words = entry->r_group.n_val_pairs;
7194 recp->chain_idx = entry->chain_idx;
7195 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7196 recp->n_grp_count = rm->n_grp_count;
7197 recp->tun_type = rm->tun_type;
7198 recp->recp_created = true;
7212 * ice_create_recipe_group - creates recipe group
7213 * @hw: pointer to hardware structure
7214 * @rm: recipe management list entry
7215 * @lkup_exts: lookup elements
7217 static enum ice_status
7218 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7219 struct ice_prot_lkup_ext *lkup_exts)
7221 enum ice_status status;
7224 rm->n_grp_count = 0;
7226 /* Create recipes for words that are marked not done by packing them
7229 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7230 &rm->rg_list, &recp_count);
7232 rm->n_grp_count += recp_count;
7233 rm->n_ext_words = lkup_exts->n_val_words;
7234 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7235 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7236 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7237 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7244 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7245 * @hw: pointer to hardware structure
7246 * @lkups: lookup elements or match criteria for the advanced recipe, one
7247 * structure per protocol header
7248 * @lkups_cnt: number of protocols
7249 * @bm: bitmap of field vectors to consider
7250 * @fv_list: pointer to a list that holds the returned field vectors
7252 static enum ice_status
7253 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7254 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7256 enum ice_status status;
7263 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7265 return ICE_ERR_NO_MEMORY;
7267 for (i = 0; i < lkups_cnt; i++)
7268 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7269 status = ICE_ERR_CFG;
7273 /* Find field vectors that include all specified protocol types */
7274 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7277 ice_free(hw, prot_ids);
7282 * ice_tun_type_match_word - determine if tun type needs a match mask
7283 * @tun_type: tunnel type
7284 * @mask: mask to be used for the tunnel
7286 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7289 case ICE_SW_TUN_VXLAN_GPE:
7290 case ICE_SW_TUN_GENEVE:
7291 case ICE_SW_TUN_VXLAN:
7292 case ICE_SW_TUN_NVGRE:
7293 case ICE_SW_TUN_UDP:
7294 case ICE_ALL_TUNNELS:
7295 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7296 case ICE_NON_TUN_QINQ:
7297 case ICE_SW_TUN_PPPOE_QINQ:
7298 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7299 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7300 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7301 *mask = ICE_TUN_FLAG_MASK;
7304 case ICE_SW_TUN_GENEVE_VLAN:
7305 case ICE_SW_TUN_VXLAN_VLAN:
7306 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7316 * ice_add_special_words - Add words that are not protocols, such as metadata
7317 * @rinfo: other information regarding the rule e.g. priority and action info
7318 * @lkup_exts: lookup word structure
7320 static enum ice_status
7321 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7322 struct ice_prot_lkup_ext *lkup_exts)
7326 /* If this is a tunneled packet, then add recipe index to match the
7327 * tunnel bit in the packet metadata flags.
7329 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7330 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7331 u8 word = lkup_exts->n_val_words++;
7333 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7334 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7335 lkup_exts->field_mask[word] = mask;
7337 return ICE_ERR_MAX_LIMIT;
7344 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7345 * @hw: pointer to hardware structure
7346 * @rinfo: other information regarding the rule e.g. priority and action info
7347 * @bm: pointer to memory for returning the bitmap of field vectors
7350 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7353 enum ice_prof_type prof_type;
7355 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7357 switch (rinfo->tun_type) {
7359 case ICE_NON_TUN_QINQ:
7360 prof_type = ICE_PROF_NON_TUN;
7362 case ICE_ALL_TUNNELS:
7363 prof_type = ICE_PROF_TUN_ALL;
7365 case ICE_SW_TUN_VXLAN_GPE:
7366 case ICE_SW_TUN_GENEVE:
7367 case ICE_SW_TUN_GENEVE_VLAN:
7368 case ICE_SW_TUN_VXLAN:
7369 case ICE_SW_TUN_VXLAN_VLAN:
7370 case ICE_SW_TUN_UDP:
7371 case ICE_SW_TUN_GTP:
7372 prof_type = ICE_PROF_TUN_UDP;
7374 case ICE_SW_TUN_NVGRE:
7375 prof_type = ICE_PROF_TUN_GRE;
7377 case ICE_SW_TUN_PPPOE:
7378 case ICE_SW_TUN_PPPOE_QINQ:
7379 prof_type = ICE_PROF_TUN_PPPOE;
7381 case ICE_SW_TUN_PPPOE_PAY:
7382 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7383 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7385 case ICE_SW_TUN_PPPOE_IPV4:
7386 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7387 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7388 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7389 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7391 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7392 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7394 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7395 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7397 case ICE_SW_TUN_PPPOE_IPV6:
7398 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7399 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7400 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7401 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7403 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7404 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7406 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7407 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7409 case ICE_SW_TUN_PROFID_IPV6_ESP:
7410 case ICE_SW_TUN_IPV6_ESP:
7411 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7413 case ICE_SW_TUN_PROFID_IPV6_AH:
7414 case ICE_SW_TUN_IPV6_AH:
7415 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7417 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7418 case ICE_SW_TUN_IPV6_L2TPV3:
7419 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7421 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7422 case ICE_SW_TUN_IPV6_NAT_T:
7423 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7425 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7426 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7428 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7429 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7431 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7432 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7434 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7435 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7437 case ICE_SW_TUN_IPV4_NAT_T:
7438 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7440 case ICE_SW_TUN_IPV4_L2TPV3:
7441 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7443 case ICE_SW_TUN_IPV4_ESP:
7444 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7446 case ICE_SW_TUN_IPV4_AH:
7447 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7449 case ICE_SW_IPV4_TCP:
7450 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7452 case ICE_SW_IPV4_UDP:
7453 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7455 case ICE_SW_IPV6_TCP:
7456 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7458 case ICE_SW_IPV6_UDP:
7459 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7461 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7462 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7463 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7464 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7465 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7466 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7467 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7469 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7470 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7471 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7472 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7473 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7474 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7475 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7477 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7478 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7479 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7480 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7481 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7482 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7483 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7485 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7486 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7487 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7488 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7489 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7490 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7491 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7493 case ICE_SW_TUN_AND_NON_TUN:
7494 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7496 prof_type = ICE_PROF_ALL;
7500 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7504 * ice_is_prof_rule - determine if rule type is a profile rule
7505 * @type: the rule type
7507 * if the rule type is a profile rule, that means that there no field value
7508 * match required, in this case just a profile hit is required.
7510 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7513 case ICE_SW_TUN_PROFID_IPV6_ESP:
7514 case ICE_SW_TUN_PROFID_IPV6_AH:
7515 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7516 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7517 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7518 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7519 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7520 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7530 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7531 * @hw: pointer to hardware structure
7532 * @lkups: lookup elements or match criteria for the advanced recipe, one
7533 * structure per protocol header
7534 * @lkups_cnt: number of protocols
7535 * @rinfo: other information regarding the rule e.g. priority and action info
7536 * @rid: return the recipe ID of the recipe created
7538 static enum ice_status
7539 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7540 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7542 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7543 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7544 struct ice_prot_lkup_ext *lkup_exts;
7545 struct ice_recp_grp_entry *r_entry;
7546 struct ice_sw_fv_list_entry *fvit;
7547 struct ice_recp_grp_entry *r_tmp;
7548 struct ice_sw_fv_list_entry *tmp;
7549 enum ice_status status = ICE_SUCCESS;
7550 struct ice_sw_recipe *rm;
7553 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7554 return ICE_ERR_PARAM;
7556 lkup_exts = (struct ice_prot_lkup_ext *)
7557 ice_malloc(hw, sizeof(*lkup_exts));
7559 return ICE_ERR_NO_MEMORY;
7561 /* Determine the number of words to be matched and if it exceeds a
7562 * recipe's restrictions
7564 for (i = 0; i < lkups_cnt; i++) {
7567 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7568 status = ICE_ERR_CFG;
7569 goto err_free_lkup_exts;
7572 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7574 status = ICE_ERR_CFG;
7575 goto err_free_lkup_exts;
7579 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7581 status = ICE_ERR_NO_MEMORY;
7582 goto err_free_lkup_exts;
7585 /* Get field vectors that contain fields extracted from all the protocol
7586 * headers being programmed.
7588 INIT_LIST_HEAD(&rm->fv_list);
7589 INIT_LIST_HEAD(&rm->rg_list);
7591 /* Get bitmap of field vectors (profiles) that are compatible with the
7592 * rule request; only these will be searched in the subsequent call to
7595 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7597 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7601 /* Create any special protocol/offset pairs, such as looking at tunnel
7602 * bits by extracting metadata
7604 status = ice_add_special_words(rinfo, lkup_exts);
7606 goto err_free_lkup_exts;
7608 /* Group match words into recipes using preferred recipe grouping
7611 status = ice_create_recipe_group(hw, rm, lkup_exts);
7615 /* set the recipe priority if specified */
7616 rm->priority = (u8)rinfo->priority;
7618 /* Find offsets from the field vector. Pick the first one for all the
7621 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7625 /* An empty FV list means to use all the profiles returned in the
7628 if (LIST_EMPTY(&rm->fv_list)) {
7631 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7632 struct ice_sw_fv_list_entry *fvl;
7634 fvl = (struct ice_sw_fv_list_entry *)
7635 ice_malloc(hw, sizeof(*fvl));
7639 fvl->profile_id = j;
7640 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7644 /* get bitmap of all profiles the recipe will be associated with */
7645 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7646 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7648 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7649 ice_set_bit((u16)fvit->profile_id, profiles);
7652 /* Look for a recipe which matches our requested fv / mask list */
7653 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7654 if (*rid < ICE_MAX_NUM_RECIPES)
7655 /* Success if found a recipe that match the existing criteria */
7658 rm->tun_type = rinfo->tun_type;
7659 /* Recipe we need does not exist, add a recipe */
7660 status = ice_add_sw_recipe(hw, rm, profiles);
7664 /* Associate all the recipes created with all the profiles in the
7665 * common field vector.
7667 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7669 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7672 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7673 (u8 *)r_bitmap, NULL);
7677 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7678 ICE_MAX_NUM_RECIPES);
7679 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7683 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7686 ice_release_change_lock(hw);
7691 /* Update profile to recipe bitmap array */
7692 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7693 ICE_MAX_NUM_RECIPES);
7695 /* Update recipe to profile bitmap array */
7696 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7697 ice_set_bit((u16)fvit->profile_id,
7698 recipe_to_profile[j]);
7701 *rid = rm->root_rid;
7702 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7703 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7705 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7706 ice_recp_grp_entry, l_entry) {
7707 LIST_DEL(&r_entry->l_entry);
7708 ice_free(hw, r_entry);
7711 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7713 LIST_DEL(&fvit->list_entry);
7718 ice_free(hw, rm->root_buf);
7723 ice_free(hw, lkup_exts);
7729 * ice_find_dummy_packet - find dummy packet by tunnel type
7731 * @lkups: lookup elements or match criteria for the advanced recipe, one
7732 * structure per protocol header
7733 * @lkups_cnt: number of protocols
7734 * @tun_type: tunnel type from the match criteria
7735 * @pkt: dummy packet to fill according to filter match criteria
7736 * @pkt_len: packet length of dummy packet
7737 * @offsets: pointer to receive the pointer to the offsets for the packet
7740 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7741 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7743 const struct ice_dummy_pkt_offsets **offsets)
7745 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7749 for (i = 0; i < lkups_cnt; i++) {
7750 if (lkups[i].type == ICE_UDP_ILOS)
7752 else if (lkups[i].type == ICE_TCP_IL)
7754 else if (lkups[i].type == ICE_IPV6_OFOS)
7756 else if (lkups[i].type == ICE_VLAN_OFOS)
7758 else if (lkups[i].type == ICE_IPV4_OFOS &&
7759 lkups[i].h_u.ipv4_hdr.protocol ==
7760 ICE_IPV4_NVGRE_PROTO_ID &&
7761 lkups[i].m_u.ipv4_hdr.protocol ==
7764 else if (lkups[i].type == ICE_PPPOE &&
7765 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7766 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7767 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7770 else if (lkups[i].type == ICE_ETYPE_OL &&
7771 lkups[i].h_u.ethertype.ethtype_id ==
7772 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7773 lkups[i].m_u.ethertype.ethtype_id ==
7776 else if (lkups[i].type == ICE_IPV4_IL &&
7777 lkups[i].h_u.ipv4_hdr.protocol ==
7779 lkups[i].m_u.ipv4_hdr.protocol ==
7784 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7785 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7786 *pkt = dummy_qinq_ipv6_pkt;
7787 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7788 *offsets = dummy_qinq_ipv6_packet_offsets;
7790 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7791 tun_type == ICE_NON_TUN_QINQ) {
7792 *pkt = dummy_qinq_ipv4_pkt;
7793 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7794 *offsets = dummy_qinq_ipv4_packet_offsets;
7798 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7799 *pkt = dummy_qinq_pppoe_ipv6_packet;
7800 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7801 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7803 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7804 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7805 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7806 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7808 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7809 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7810 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7811 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7812 *offsets = dummy_qinq_pppoe_packet_offsets;
7816 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7817 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7818 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7819 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7821 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7822 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7823 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7824 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7828 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7829 *pkt = dummy_ipv4_esp_pkt;
7830 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7831 *offsets = dummy_ipv4_esp_packet_offsets;
7835 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7836 *pkt = dummy_ipv6_esp_pkt;
7837 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7838 *offsets = dummy_ipv6_esp_packet_offsets;
7842 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7843 *pkt = dummy_ipv4_ah_pkt;
7844 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7845 *offsets = dummy_ipv4_ah_packet_offsets;
7849 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7850 *pkt = dummy_ipv6_ah_pkt;
7851 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7852 *offsets = dummy_ipv6_ah_packet_offsets;
7856 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7857 *pkt = dummy_ipv4_nat_pkt;
7858 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7859 *offsets = dummy_ipv4_nat_packet_offsets;
7863 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7864 *pkt = dummy_ipv6_nat_pkt;
7865 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7866 *offsets = dummy_ipv6_nat_packet_offsets;
7870 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7871 *pkt = dummy_ipv4_l2tpv3_pkt;
7872 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7873 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7877 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7878 *pkt = dummy_ipv6_l2tpv3_pkt;
7879 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7880 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7884 if (tun_type == ICE_SW_TUN_GTP) {
7885 *pkt = dummy_udp_gtp_packet;
7886 *pkt_len = sizeof(dummy_udp_gtp_packet);
7887 *offsets = dummy_udp_gtp_packet_offsets;
7891 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7892 *pkt = dummy_pppoe_ipv6_packet;
7893 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7894 *offsets = dummy_pppoe_packet_offsets;
7896 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7897 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7898 *pkt = dummy_pppoe_ipv4_packet;
7899 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7900 *offsets = dummy_pppoe_packet_offsets;
7904 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7905 *pkt = dummy_pppoe_ipv4_packet;
7906 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7907 *offsets = dummy_pppoe_packet_ipv4_offsets;
7911 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7912 *pkt = dummy_pppoe_ipv4_tcp_packet;
7913 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7914 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7918 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7919 *pkt = dummy_pppoe_ipv4_udp_packet;
7920 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7921 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7925 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7926 *pkt = dummy_pppoe_ipv6_packet;
7927 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7928 *offsets = dummy_pppoe_packet_ipv6_offsets;
7932 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7933 *pkt = dummy_pppoe_ipv6_tcp_packet;
7934 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7935 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7939 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7940 *pkt = dummy_pppoe_ipv6_udp_packet;
7941 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7942 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7946 if (tun_type == ICE_SW_IPV4_TCP) {
7947 *pkt = dummy_tcp_packet;
7948 *pkt_len = sizeof(dummy_tcp_packet);
7949 *offsets = dummy_tcp_packet_offsets;
7953 if (tun_type == ICE_SW_IPV4_UDP) {
7954 *pkt = dummy_udp_packet;
7955 *pkt_len = sizeof(dummy_udp_packet);
7956 *offsets = dummy_udp_packet_offsets;
7960 if (tun_type == ICE_SW_IPV6_TCP) {
7961 *pkt = dummy_tcp_ipv6_packet;
7962 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7963 *offsets = dummy_tcp_ipv6_packet_offsets;
7967 if (tun_type == ICE_SW_IPV6_UDP) {
7968 *pkt = dummy_udp_ipv6_packet;
7969 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7970 *offsets = dummy_udp_ipv6_packet_offsets;
7974 /* Support GTP tunnel + L3 */
7975 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7976 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7977 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7978 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7981 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7982 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7983 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7984 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7987 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7988 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7989 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7990 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7993 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7994 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7995 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7996 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8000 if (tun_type == ICE_ALL_TUNNELS) {
8001 *pkt = dummy_gre_udp_packet;
8002 *pkt_len = sizeof(dummy_gre_udp_packet);
8003 *offsets = dummy_gre_udp_packet_offsets;
8007 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8009 *pkt = dummy_gre_tcp_packet;
8010 *pkt_len = sizeof(dummy_gre_tcp_packet);
8011 *offsets = dummy_gre_tcp_packet_offsets;
8015 *pkt = dummy_gre_udp_packet;
8016 *pkt_len = sizeof(dummy_gre_udp_packet);
8017 *offsets = dummy_gre_udp_packet_offsets;
8021 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8022 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8023 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8024 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8026 *pkt = dummy_udp_tun_tcp_packet;
8027 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8028 *offsets = dummy_udp_tun_tcp_packet_offsets;
8032 *pkt = dummy_udp_tun_udp_packet;
8033 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8034 *offsets = dummy_udp_tun_udp_packet_offsets;
8040 *pkt = dummy_vlan_udp_packet;
8041 *pkt_len = sizeof(dummy_vlan_udp_packet);
8042 *offsets = dummy_vlan_udp_packet_offsets;
8045 *pkt = dummy_udp_packet;
8046 *pkt_len = sizeof(dummy_udp_packet);
8047 *offsets = dummy_udp_packet_offsets;
8049 } else if (udp && ipv6) {
8051 *pkt = dummy_vlan_udp_ipv6_packet;
8052 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8053 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8056 *pkt = dummy_udp_ipv6_packet;
8057 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8058 *offsets = dummy_udp_ipv6_packet_offsets;
8060 } else if ((tcp && ipv6) || ipv6) {
8062 *pkt = dummy_vlan_tcp_ipv6_packet;
8063 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8064 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8067 *pkt = dummy_tcp_ipv6_packet;
8068 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8069 *offsets = dummy_tcp_ipv6_packet_offsets;
8074 *pkt = dummy_vlan_tcp_packet;
8075 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8076 *offsets = dummy_vlan_tcp_packet_offsets;
8078 *pkt = dummy_tcp_packet;
8079 *pkt_len = sizeof(dummy_tcp_packet);
8080 *offsets = dummy_tcp_packet_offsets;
8085 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8087 * @lkups: lookup elements or match criteria for the advanced recipe, one
8088 * structure per protocol header
8089 * @lkups_cnt: number of protocols
8090 * @s_rule: stores rule information from the match criteria
8091 * @dummy_pkt: dummy packet to fill according to filter match criteria
8092 * @pkt_len: packet length of dummy packet
8093 * @offsets: offset info for the dummy packet
8095 static enum ice_status
8096 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8097 struct ice_aqc_sw_rules_elem *s_rule,
8098 const u8 *dummy_pkt, u16 pkt_len,
8099 const struct ice_dummy_pkt_offsets *offsets)
8104 /* Start with a packet with a pre-defined/dummy content. Then, fill
8105 * in the header values to be looked up or matched.
8107 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8109 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8111 for (i = 0; i < lkups_cnt; i++) {
8112 enum ice_protocol_type type;
8113 u16 offset = 0, len = 0, j;
8116 /* find the start of this layer; it should be found since this
8117 * was already checked when search for the dummy packet
8119 type = lkups[i].type;
8120 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8121 if (type == offsets[j].type) {
8122 offset = offsets[j].offset;
8127 /* this should never happen in a correct calling sequence */
8129 return ICE_ERR_PARAM;
8131 switch (lkups[i].type) {
8134 len = sizeof(struct ice_ether_hdr);
8137 len = sizeof(struct ice_ethtype_hdr);
8141 len = sizeof(struct ice_vlan_hdr);
8145 len = sizeof(struct ice_ipv4_hdr);
8149 len = sizeof(struct ice_ipv6_hdr);
8154 len = sizeof(struct ice_l4_hdr);
8157 len = sizeof(struct ice_sctp_hdr);
8160 len = sizeof(struct ice_nvgre);
8165 len = sizeof(struct ice_udp_tnl_hdr);
8169 case ICE_GTP_NO_PAY:
8170 len = sizeof(struct ice_udp_gtp_hdr);
8173 len = sizeof(struct ice_pppoe_hdr);
8176 len = sizeof(struct ice_esp_hdr);
8179 len = sizeof(struct ice_nat_t_hdr);
8182 len = sizeof(struct ice_ah_hdr);
8185 len = sizeof(struct ice_l2tpv3_sess_hdr);
8188 return ICE_ERR_PARAM;
8191 /* the length should be a word multiple */
8192 if (len % ICE_BYTES_PER_WORD)
8195 /* We have the offset to the header start, the length, the
8196 * caller's header values and mask. Use this information to
8197 * copy the data into the dummy packet appropriately based on
8198 * the mask. Note that we need to only write the bits as
8199 * indicated by the mask to make sure we don't improperly write
8200 * over any significant packet data.
8202 for (j = 0; j < len / sizeof(u16); j++)
8203 if (((u16 *)&lkups[i].m_u)[j])
8204 ((u16 *)(pkt + offset))[j] =
8205 (((u16 *)(pkt + offset))[j] &
8206 ~((u16 *)&lkups[i].m_u)[j]) |
8207 (((u16 *)&lkups[i].h_u)[j] &
8208 ((u16 *)&lkups[i].m_u)[j]);
8211 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8217 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8218 * @hw: pointer to the hardware structure
8219 * @tun_type: tunnel type
8220 * @pkt: dummy packet to fill in
8221 * @offsets: offset info for the dummy packet
8223 static enum ice_status
8224 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8225 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8230 case ICE_SW_TUN_AND_NON_TUN:
8231 case ICE_SW_TUN_VXLAN_GPE:
8232 case ICE_SW_TUN_VXLAN:
8233 case ICE_SW_TUN_VXLAN_VLAN:
8234 case ICE_SW_TUN_UDP:
8235 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8239 case ICE_SW_TUN_GENEVE:
8240 case ICE_SW_TUN_GENEVE_VLAN:
8241 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8246 /* Nothing needs to be done for this tunnel type */
8250 /* Find the outer UDP protocol header and insert the port number */
8251 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8252 if (offsets[i].type == ICE_UDP_OF) {
8253 struct ice_l4_hdr *hdr;
8256 offset = offsets[i].offset;
8257 hdr = (struct ice_l4_hdr *)&pkt[offset];
8258 hdr->dst_port = CPU_TO_BE16(open_port);
8268 * ice_find_adv_rule_entry - Search a rule entry
8269 * @hw: pointer to the hardware structure
8270 * @lkups: lookup elements or match criteria for the advanced recipe, one
8271 * structure per protocol header
8272 * @lkups_cnt: number of protocols
8273 * @recp_id: recipe ID for which we are finding the rule
8274 * @rinfo: other information regarding the rule e.g. priority and action info
8276 * Helper function to search for a given advance rule entry
8277 * Returns pointer to entry storing the rule if found
8279 static struct ice_adv_fltr_mgmt_list_entry *
8280 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8281 u16 lkups_cnt, u16 recp_id,
8282 struct ice_adv_rule_info *rinfo)
8284 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8285 struct ice_switch_info *sw = hw->switch_info;
8288 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8289 ice_adv_fltr_mgmt_list_entry, list_entry) {
8290 bool lkups_matched = true;
8292 if (lkups_cnt != list_itr->lkups_cnt)
8294 for (i = 0; i < list_itr->lkups_cnt; i++)
8295 if (memcmp(&list_itr->lkups[i], &lkups[i],
8297 lkups_matched = false;
8300 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8301 rinfo->tun_type == list_itr->rule_info.tun_type &&
8309 * ice_adv_add_update_vsi_list
8310 * @hw: pointer to the hardware structure
8311 * @m_entry: pointer to current adv filter management list entry
8312 * @cur_fltr: filter information from the book keeping entry
8313 * @new_fltr: filter information with the new VSI to be added
8315 * Call AQ command to add or update previously created VSI list with new VSI.
8317 * Helper function to do book keeping associated with adding filter information
8318 * The algorithm to do the booking keeping is described below :
8319 * When a VSI needs to subscribe to a given advanced filter
8320 * if only one VSI has been added till now
8321 * Allocate a new VSI list and add two VSIs
8322 * to this list using switch rule command
8323 * Update the previously created switch rule with the
8324 * newly created VSI list ID
8325 * if a VSI list was previously created
8326 * Add the new VSI to the previously created VSI list set
8327 * using the update switch rule command
8329 static enum ice_status
8330 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8331 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8332 struct ice_adv_rule_info *cur_fltr,
8333 struct ice_adv_rule_info *new_fltr)
8335 enum ice_status status;
8336 u16 vsi_list_id = 0;
8338 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8339 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8340 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8341 return ICE_ERR_NOT_IMPL;
8343 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8344 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8345 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8346 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8347 return ICE_ERR_NOT_IMPL;
8349 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8350 /* Only one entry existed in the mapping and it was not already
8351 * a part of a VSI list. So, create a VSI list with the old and
8354 struct ice_fltr_info tmp_fltr;
8355 u16 vsi_handle_arr[2];
8357 /* A rule already exists with the new VSI being added */
8358 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8359 new_fltr->sw_act.fwd_id.hw_vsi_id)
8360 return ICE_ERR_ALREADY_EXISTS;
8362 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8363 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8364 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8370 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8371 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8372 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8373 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8374 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8375 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8377 /* Update the previous switch rule of "forward to VSI" to
8380 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8384 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8385 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8386 m_entry->vsi_list_info =
8387 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8390 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8392 if (!m_entry->vsi_list_info)
8395 /* A rule already exists with the new VSI being added */
8396 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8399 /* Update the previously created VSI list set with
8400 * the new VSI ID passed in
8402 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8404 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8406 ice_aqc_opc_update_sw_rules,
8408 /* update VSI list mapping info with new VSI ID */
8410 ice_set_bit(vsi_handle,
8411 m_entry->vsi_list_info->vsi_map);
8414 m_entry->vsi_count++;
8419 * ice_add_adv_rule - helper function to create an advanced switch rule
8420 * @hw: pointer to the hardware structure
8421 * @lkups: information on the words that needs to be looked up. All words
8422 * together makes one recipe
8423 * @lkups_cnt: num of entries in the lkups array
8424 * @rinfo: other information related to the rule that needs to be programmed
8425 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8426 * ignored is case of error.
8428 * This function can program only 1 rule at a time. The lkups is used to
8429 * describe the all the words that forms the "lookup" portion of the recipe.
8430 * These words can span multiple protocols. Callers to this function need to
8431 * pass in a list of protocol headers with lookup information along and mask
8432 * that determines which words are valid from the given protocol header.
8433 * rinfo describes other information related to this rule such as forwarding
8434 * IDs, priority of this rule, etc.
8437 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8438 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8439 struct ice_rule_query_data *added_entry)
8441 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8442 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8443 const struct ice_dummy_pkt_offsets *pkt_offsets;
8444 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8445 struct LIST_HEAD_TYPE *rule_head;
8446 struct ice_switch_info *sw;
8447 enum ice_status status;
8448 const u8 *pkt = NULL;
8454 /* Initialize profile to result index bitmap */
8455 if (!hw->switch_info->prof_res_bm_init) {
8456 hw->switch_info->prof_res_bm_init = 1;
8457 ice_init_prof_result_bm(hw);
8460 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8461 if (!prof_rule && !lkups_cnt)
8462 return ICE_ERR_PARAM;
8464 /* get # of words we need to match */
8466 for (i = 0; i < lkups_cnt; i++) {
8469 ptr = (u16 *)&lkups[i].m_u;
8470 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8476 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8477 return ICE_ERR_PARAM;
8479 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8480 return ICE_ERR_PARAM;
8483 /* make sure that we can locate a dummy packet */
8484 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8487 status = ICE_ERR_PARAM;
8488 goto err_ice_add_adv_rule;
8491 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8492 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8493 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8494 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8497 vsi_handle = rinfo->sw_act.vsi_handle;
8498 if (!ice_is_vsi_valid(hw, vsi_handle))
8499 return ICE_ERR_PARAM;
8501 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8502 rinfo->sw_act.fwd_id.hw_vsi_id =
8503 ice_get_hw_vsi_num(hw, vsi_handle);
8504 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8505 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8507 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8510 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8512 /* we have to add VSI to VSI_LIST and increment vsi_count.
8513 * Also Update VSI list so that we can change forwarding rule
8514 * if the rule already exists, we will check if it exists with
8515 * same vsi_id, if not then add it to the VSI list if it already
8516 * exists if not then create a VSI list and add the existing VSI
8517 * ID and the new VSI ID to the list
8518 * We will add that VSI to the list
8520 status = ice_adv_add_update_vsi_list(hw, m_entry,
8521 &m_entry->rule_info,
8524 added_entry->rid = rid;
8525 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8526 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8530 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8531 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8533 return ICE_ERR_NO_MEMORY;
8534 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8535 switch (rinfo->sw_act.fltr_act) {
8536 case ICE_FWD_TO_VSI:
8537 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8538 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8539 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8542 act |= ICE_SINGLE_ACT_TO_Q;
8543 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8544 ICE_SINGLE_ACT_Q_INDEX_M;
8546 case ICE_FWD_TO_QGRP:
8547 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8548 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8549 act |= ICE_SINGLE_ACT_TO_Q;
8550 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8551 ICE_SINGLE_ACT_Q_INDEX_M;
8552 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8553 ICE_SINGLE_ACT_Q_REGION_M;
8555 case ICE_DROP_PACKET:
8556 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8557 ICE_SINGLE_ACT_VALID_BIT;
8560 status = ICE_ERR_CFG;
8561 goto err_ice_add_adv_rule;
8564 /* set the rule LOOKUP type based on caller specified 'RX'
8565 * instead of hardcoding it to be either LOOKUP_TX/RX
8567 * for 'RX' set the source to be the port number
8568 * for 'TX' set the source to be the source HW VSI number (determined
8572 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8573 s_rule->pdata.lkup_tx_rx.src =
8574 CPU_TO_LE16(hw->port_info->lport);
8576 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8577 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8580 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8581 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8583 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8584 pkt_len, pkt_offsets);
8586 goto err_ice_add_adv_rule;
8588 if (rinfo->tun_type != ICE_NON_TUN &&
8589 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8590 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8591 s_rule->pdata.lkup_tx_rx.hdr,
8594 goto err_ice_add_adv_rule;
8597 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8598 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8601 goto err_ice_add_adv_rule;
8602 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8603 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8605 status = ICE_ERR_NO_MEMORY;
8606 goto err_ice_add_adv_rule;
8609 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8610 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8611 ICE_NONDMA_TO_NONDMA);
8612 if (!adv_fltr->lkups && !prof_rule) {
8613 status = ICE_ERR_NO_MEMORY;
8614 goto err_ice_add_adv_rule;
8617 adv_fltr->lkups_cnt = lkups_cnt;
8618 adv_fltr->rule_info = *rinfo;
8619 adv_fltr->rule_info.fltr_rule_id =
8620 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8621 sw = hw->switch_info;
8622 sw->recp_list[rid].adv_rule = true;
8623 rule_head = &sw->recp_list[rid].filt_rules;
8625 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8626 adv_fltr->vsi_count = 1;
8628 /* Add rule entry to book keeping list */
8629 LIST_ADD(&adv_fltr->list_entry, rule_head);
8631 added_entry->rid = rid;
8632 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8633 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8635 err_ice_add_adv_rule:
8636 if (status && adv_fltr) {
8637 ice_free(hw, adv_fltr->lkups);
8638 ice_free(hw, adv_fltr);
8641 ice_free(hw, s_rule);
8647 * ice_adv_rem_update_vsi_list
8648 * @hw: pointer to the hardware structure
8649 * @vsi_handle: VSI handle of the VSI to remove
8650 * @fm_list: filter management entry for which the VSI list management needs to
8653 static enum ice_status
8654 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8655 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8657 struct ice_vsi_list_map_info *vsi_list_info;
8658 enum ice_sw_lkup_type lkup_type;
8659 enum ice_status status;
8662 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8663 fm_list->vsi_count == 0)
8664 return ICE_ERR_PARAM;
8666 /* A rule with the VSI being removed does not exist */
8667 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8668 return ICE_ERR_DOES_NOT_EXIST;
8670 lkup_type = ICE_SW_LKUP_LAST;
8671 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8672 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8673 ice_aqc_opc_update_sw_rules,
8678 fm_list->vsi_count--;
8679 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8680 vsi_list_info = fm_list->vsi_list_info;
8681 if (fm_list->vsi_count == 1) {
8682 struct ice_fltr_info tmp_fltr;
8685 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8687 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8688 return ICE_ERR_OUT_OF_RANGE;
8690 /* Make sure VSI list is empty before removing it below */
8691 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8693 ice_aqc_opc_update_sw_rules,
8698 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8699 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8700 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8701 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8702 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8703 tmp_fltr.fwd_id.hw_vsi_id =
8704 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8705 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8706 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8707 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8709 /* Update the previous switch rule of "MAC forward to VSI" to
8710 * "MAC fwd to VSI list"
8712 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8714 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8715 tmp_fltr.fwd_id.hw_vsi_id, status);
8718 fm_list->vsi_list_info->ref_cnt--;
8720 /* Remove the VSI list since it is no longer used */
8721 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8723 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8724 vsi_list_id, status);
8728 LIST_DEL(&vsi_list_info->list_entry);
8729 ice_free(hw, vsi_list_info);
8730 fm_list->vsi_list_info = NULL;
8737 * ice_rem_adv_rule - removes existing advanced switch rule
8738 * @hw: pointer to the hardware structure
8739 * @lkups: information on the words that needs to be looked up. All words
8740 * together makes one recipe
8741 * @lkups_cnt: num of entries in the lkups array
8742 * @rinfo: Its the pointer to the rule information for the rule
8744 * This function can be used to remove 1 rule at a time. The lkups is
8745 * used to describe all the words that forms the "lookup" portion of the
8746 * rule. These words can span multiple protocols. Callers to this function
8747 * need to pass in a list of protocol headers with lookup information along
8748 * and mask that determines which words are valid from the given protocol
8749 * header. rinfo describes other information related to this rule such as
8750 * forwarding IDs, priority of this rule, etc.
8753 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8754 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8756 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8757 struct ice_prot_lkup_ext lkup_exts;
8758 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8759 enum ice_status status = ICE_SUCCESS;
8760 bool remove_rule = false;
8761 u16 i, rid, vsi_handle;
8763 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8764 for (i = 0; i < lkups_cnt; i++) {
8767 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8770 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8775 /* Create any special protocol/offset pairs, such as looking at tunnel
8776 * bits by extracting metadata
8778 status = ice_add_special_words(rinfo, &lkup_exts);
8782 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8783 /* If did not find a recipe that match the existing criteria */
8784 if (rid == ICE_MAX_NUM_RECIPES)
8785 return ICE_ERR_PARAM;
8787 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8788 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8789 /* the rule is already removed */
8792 ice_acquire_lock(rule_lock);
8793 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8795 } else if (list_elem->vsi_count > 1) {
8796 remove_rule = false;
8797 vsi_handle = rinfo->sw_act.vsi_handle;
8798 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8800 vsi_handle = rinfo->sw_act.vsi_handle;
8801 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8803 ice_release_lock(rule_lock);
8806 if (list_elem->vsi_count == 0)
8809 ice_release_lock(rule_lock);
8811 struct ice_aqc_sw_rules_elem *s_rule;
8814 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8815 s_rule = (struct ice_aqc_sw_rules_elem *)
8816 ice_malloc(hw, rule_buf_sz);
8818 return ICE_ERR_NO_MEMORY;
8819 s_rule->pdata.lkup_tx_rx.act = 0;
8820 s_rule->pdata.lkup_tx_rx.index =
8821 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8822 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8823 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8825 ice_aqc_opc_remove_sw_rules, NULL);
8826 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8827 struct ice_switch_info *sw = hw->switch_info;
8829 ice_acquire_lock(rule_lock);
8830 LIST_DEL(&list_elem->list_entry);
8831 ice_free(hw, list_elem->lkups);
8832 ice_free(hw, list_elem);
8833 ice_release_lock(rule_lock);
8834 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8835 sw->recp_list[rid].adv_rule = false;
8837 ice_free(hw, s_rule);
8843 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8844 * @hw: pointer to the hardware structure
8845 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8847 * This function is used to remove 1 rule at a time. The removal is based on
8848 * the remove_entry parameter. This function will remove rule for a given
8849 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8852 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8853 struct ice_rule_query_data *remove_entry)
8855 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8856 struct LIST_HEAD_TYPE *list_head;
8857 struct ice_adv_rule_info rinfo;
8858 struct ice_switch_info *sw;
8860 sw = hw->switch_info;
8861 if (!sw->recp_list[remove_entry->rid].recp_created)
8862 return ICE_ERR_PARAM;
8863 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8864 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8866 if (list_itr->rule_info.fltr_rule_id ==
8867 remove_entry->rule_id) {
8868 rinfo = list_itr->rule_info;
8869 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8870 return ice_rem_adv_rule(hw, list_itr->lkups,
8871 list_itr->lkups_cnt, &rinfo);
8874 /* either list is empty or unable to find rule */
8875 return ICE_ERR_DOES_NOT_EXIST;
8879 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
8881 * @hw: pointer to the hardware structure
8882 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8884 * This function is used to remove all the rules for a given VSI and as soon
8885 * as removing a rule fails, it will return immediately with the error code,
8886 * else it will return ICE_SUCCESS
8888 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8890 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8891 struct ice_vsi_list_map_info *map_info;
8892 struct LIST_HEAD_TYPE *list_head;
8893 struct ice_adv_rule_info rinfo;
8894 struct ice_switch_info *sw;
8895 enum ice_status status;
8898 sw = hw->switch_info;
8899 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8900 if (!sw->recp_list[rid].recp_created)
8902 if (!sw->recp_list[rid].adv_rule)
8905 list_head = &sw->recp_list[rid].filt_rules;
8906 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8907 ice_adv_fltr_mgmt_list_entry,
8909 rinfo = list_itr->rule_info;
8911 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8912 map_info = list_itr->vsi_list_info;
8916 if (!ice_is_bit_set(map_info->vsi_map,
8919 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8923 rinfo.sw_act.vsi_handle = vsi_handle;
8924 status = ice_rem_adv_rule(hw, list_itr->lkups,
8925 list_itr->lkups_cnt, &rinfo);
8935 * ice_replay_fltr - Replay all the filters stored by a specific list head
8936 * @hw: pointer to the hardware structure
8937 * @list_head: list for which filters needs to be replayed
8938 * @recp_id: Recipe ID for which rules need to be replayed
8940 static enum ice_status
8941 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8943 struct ice_fltr_mgmt_list_entry *itr;
8944 enum ice_status status = ICE_SUCCESS;
8945 struct ice_sw_recipe *recp_list;
8946 u8 lport = hw->port_info->lport;
8947 struct LIST_HEAD_TYPE l_head;
8949 if (LIST_EMPTY(list_head))
8952 recp_list = &hw->switch_info->recp_list[recp_id];
8953 /* Move entries from the given list_head to a temporary l_head so that
8954 * they can be replayed. Otherwise when trying to re-add the same
8955 * filter, the function will return already exists
8957 LIST_REPLACE_INIT(list_head, &l_head);
8959 /* Mark the given list_head empty by reinitializing it so filters
8960 * could be added again by *handler
8962 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8964 struct ice_fltr_list_entry f_entry;
8967 f_entry.fltr_info = itr->fltr_info;
8968 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8969 status = ice_add_rule_internal(hw, recp_list, lport,
8971 if (status != ICE_SUCCESS)
8976 /* Add a filter per VSI separately */
8977 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8979 if (!ice_is_vsi_valid(hw, vsi_handle))
8982 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8983 f_entry.fltr_info.vsi_handle = vsi_handle;
8984 f_entry.fltr_info.fwd_id.hw_vsi_id =
8985 ice_get_hw_vsi_num(hw, vsi_handle);
8986 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8987 if (recp_id == ICE_SW_LKUP_VLAN)
8988 status = ice_add_vlan_internal(hw, recp_list,
8991 status = ice_add_rule_internal(hw, recp_list,
8994 if (status != ICE_SUCCESS)
8999 /* Clear the filter management list */
9000 ice_rem_sw_rule_info(hw, &l_head);
9005 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9006 * @hw: pointer to the hardware structure
9008 * NOTE: This function does not clean up partially added filters on error.
9009 * It is up to caller of the function to issue a reset or fail early.
9011 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9013 struct ice_switch_info *sw = hw->switch_info;
9014 enum ice_status status = ICE_SUCCESS;
9017 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9018 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9020 status = ice_replay_fltr(hw, i, head);
9021 if (status != ICE_SUCCESS)
9028 * ice_replay_vsi_fltr - Replay filters for requested VSI
9029 * @hw: pointer to the hardware structure
9030 * @pi: pointer to port information structure
9031 * @sw: pointer to switch info struct for which function replays filters
9032 * @vsi_handle: driver VSI handle
9033 * @recp_id: Recipe ID for which rules need to be replayed
9034 * @list_head: list for which filters need to be replayed
9036 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9037 * It is required to pass valid VSI handle.
9039 static enum ice_status
9040 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9041 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9042 struct LIST_HEAD_TYPE *list_head)
9044 struct ice_fltr_mgmt_list_entry *itr;
9045 enum ice_status status = ICE_SUCCESS;
9046 struct ice_sw_recipe *recp_list;
9049 if (LIST_EMPTY(list_head))
9051 recp_list = &sw->recp_list[recp_id];
9052 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9054 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9056 struct ice_fltr_list_entry f_entry;
9058 f_entry.fltr_info = itr->fltr_info;
9059 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9060 itr->fltr_info.vsi_handle == vsi_handle) {
9061 /* update the src in case it is VSI num */
9062 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9063 f_entry.fltr_info.src = hw_vsi_id;
9064 status = ice_add_rule_internal(hw, recp_list,
9067 if (status != ICE_SUCCESS)
9071 if (!itr->vsi_list_info ||
9072 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9074 /* Clearing it so that the logic can add it back */
9075 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9076 f_entry.fltr_info.vsi_handle = vsi_handle;
9077 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9078 /* update the src in case it is VSI num */
9079 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9080 f_entry.fltr_info.src = hw_vsi_id;
9081 if (recp_id == ICE_SW_LKUP_VLAN)
9082 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9084 status = ice_add_rule_internal(hw, recp_list,
9087 if (status != ICE_SUCCESS)
9095 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9096 * @hw: pointer to the hardware structure
9097 * @vsi_handle: driver VSI handle
9098 * @list_head: list for which filters need to be replayed
9100 * Replay the advanced rule for the given VSI.
9102 static enum ice_status
9103 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9104 struct LIST_HEAD_TYPE *list_head)
9106 struct ice_rule_query_data added_entry = { 0 };
9107 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9108 enum ice_status status = ICE_SUCCESS;
9110 if (LIST_EMPTY(list_head))
9112 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9114 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9115 u16 lk_cnt = adv_fltr->lkups_cnt;
9117 if (vsi_handle != rinfo->sw_act.vsi_handle)
9119 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9128 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9129 * @hw: pointer to the hardware structure
9130 * @pi: pointer to port information structure
9131 * @vsi_handle: driver VSI handle
9133 * Replays filters for requested VSI via vsi_handle.
9136 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9139 struct ice_switch_info *sw = hw->switch_info;
9140 enum ice_status status;
9143 /* Update the recipes that were created */
9144 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9145 struct LIST_HEAD_TYPE *head;
9147 head = &sw->recp_list[i].filt_replay_rules;
9148 if (!sw->recp_list[i].adv_rule)
9149 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9152 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9153 if (status != ICE_SUCCESS)
9161 * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9162 * @hw: pointer to the HW struct
9163 * @sw: pointer to switch info struct for which function removes filters
9165 * Deletes the filter replay rules for given switch
9167 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9174 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9175 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9176 struct LIST_HEAD_TYPE *l_head;
9178 l_head = &sw->recp_list[i].filt_replay_rules;
9179 if (!sw->recp_list[i].adv_rule)
9180 ice_rem_sw_rule_info(hw, l_head);
9182 ice_rem_adv_rule_info(hw, l_head);
9188 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9189 * @hw: pointer to the HW struct
9191 * Deletes the filter replay rules.
9193 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9195 ice_rm_sw_replay_rule_info(hw, hw->switch_info);