1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_ETH_P_8021Q 0x8100
19 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
20 * struct to configure any switch filter rules.
21 * {DA (6 bytes), SA(6 bytes),
22 * Ether type (2 bytes for header without VLAN tag) OR
23 * VLAN tag (4 bytes for header with VLAN tag) }
25 * Word on Hardcoded values
26 * byte 0 = 0x2: to identify it as locally administered DA MAC
27 * byte 6 = 0x2: to identify it as locally administered SA MAC
28 * byte 12 = 0x81 & byte 13 = 0x00:
29 * In case of VLAN filter first two bytes defines ether type (0x8100)
30 * and remaining two bytes are placeholder for programming a given VLAN ID
31 * In case of Ether type filter it is treated as header without VLAN tag
32 * and byte 12 and 13 is used to program a given Ether type instead
34 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
38 struct ice_dummy_pkt_offsets {
39 enum ice_protocol_type type;
40 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46 { ICE_IPV4_OFOS, 14 },
51 { ICE_PROTOCOL_LAST, 0 },
54 static const u8 dummy_gre_tcp_packet[] = {
55 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
56 0x00, 0x00, 0x00, 0x00,
57 0x00, 0x00, 0x00, 0x00,
59 0x08, 0x00, /* ICE_ETYPE_OL 12 */
61 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x2F, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x00, 0x00, 0x00,
67 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
68 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
71 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00,
75 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x06, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x50, 0x02, 0x20, 0x00,
85 0x00, 0x00, 0x00, 0x00
88 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91 { ICE_IPV4_OFOS, 14 },
96 { ICE_PROTOCOL_LAST, 0 },
99 static const u8 dummy_gre_udp_packet[] = {
100 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
104 0x08, 0x00, /* ICE_ETYPE_OL 12 */
106 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x2F, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
112 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
113 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
120 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x11, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
127 0x00, 0x08, 0x00, 0x00,
130 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
132 { ICE_ETYPE_OL, 12 },
133 { ICE_IPV4_OFOS, 14 },
137 { ICE_VXLAN_GPE, 42 },
141 { ICE_PROTOCOL_LAST, 0 },
144 static const u8 dummy_udp_tun_tcp_packet[] = {
145 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
146 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00,
149 0x08, 0x00, /* ICE_ETYPE_OL 12 */
151 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
152 0x00, 0x01, 0x00, 0x00,
153 0x40, 0x11, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
158 0x00, 0x46, 0x00, 0x00,
160 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
161 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
164 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
168 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x06, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x50, 0x02, 0x20, 0x00,
178 0x00, 0x00, 0x00, 0x00
181 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
183 { ICE_ETYPE_OL, 12 },
184 { ICE_IPV4_OFOS, 14 },
188 { ICE_VXLAN_GPE, 42 },
191 { ICE_UDP_ILOS, 84 },
192 { ICE_PROTOCOL_LAST, 0 },
195 static const u8 dummy_udp_tun_udp_packet[] = {
196 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
200 0x08, 0x00, /* ICE_ETYPE_OL 12 */
202 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
203 0x00, 0x01, 0x00, 0x00,
204 0x00, 0x11, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
209 0x00, 0x3a, 0x00, 0x00,
211 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
212 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
215 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00,
219 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
226 0x00, 0x08, 0x00, 0x00,
229 /* offset info for MAC + IPv4 + UDP dummy packet */
230 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
232 { ICE_ETYPE_OL, 12 },
233 { ICE_IPV4_OFOS, 14 },
234 { ICE_UDP_ILOS, 34 },
235 { ICE_PROTOCOL_LAST, 0 },
238 /* Dummy packet for MAC + IPv4 + UDP */
239 static const u8 dummy_udp_packet[] = {
240 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
241 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00,
244 0x08, 0x00, /* ICE_ETYPE_OL 12 */
246 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
247 0x00, 0x01, 0x00, 0x00,
248 0x00, 0x11, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
253 0x00, 0x08, 0x00, 0x00,
255 0x00, 0x00, /* 2 bytes for 4 byte alignment */
258 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
259 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
261 { ICE_ETYPE_OL, 12 },
262 { ICE_VLAN_OFOS, 14 },
263 { ICE_IPV4_OFOS, 18 },
264 { ICE_UDP_ILOS, 38 },
265 { ICE_PROTOCOL_LAST, 0 },
268 /* C-tag (801.1Q), IPv4:UDP dummy packet */
269 static const u8 dummy_vlan_udp_packet[] = {
270 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x81, 0x00, /* ICE_ETYPE_OL 12 */
276 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
278 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
279 0x00, 0x01, 0x00, 0x00,
280 0x00, 0x11, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
285 0x00, 0x08, 0x00, 0x00,
287 0x00, 0x00, /* 2 bytes for 4 byte alignment */
290 /* offset info for MAC + IPv4 + TCP dummy packet */
291 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
293 { ICE_ETYPE_OL, 12 },
294 { ICE_IPV4_OFOS, 14 },
296 { ICE_PROTOCOL_LAST, 0 },
299 /* Dummy packet for MAC + IPv4 + TCP */
300 static const u8 dummy_tcp_packet[] = {
301 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x08, 0x00, /* ICE_ETYPE_OL 12 */
307 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
308 0x00, 0x01, 0x00, 0x00,
309 0x00, 0x06, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
314 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
316 0x50, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
319 0x00, 0x00, /* 2 bytes for 4 byte alignment */
322 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
323 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
325 { ICE_ETYPE_OL, 12 },
326 { ICE_VLAN_OFOS, 14 },
327 { ICE_IPV4_OFOS, 18 },
329 { ICE_PROTOCOL_LAST, 0 },
332 /* C-tag (801.1Q), IPv4:TCP dummy packet */
333 static const u8 dummy_vlan_tcp_packet[] = {
334 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
338 0x81, 0x00, /* ICE_ETYPE_OL 12 */
340 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
342 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
343 0x00, 0x01, 0x00, 0x00,
344 0x00, 0x06, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
349 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
351 0x50, 0x00, 0x00, 0x00,
352 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, /* 2 bytes for 4 byte alignment */
357 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
359 { ICE_ETYPE_OL, 12 },
360 { ICE_IPV6_OFOS, 14 },
362 { ICE_PROTOCOL_LAST, 0 },
365 static const u8 dummy_tcp_ipv6_packet[] = {
366 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
367 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00,
370 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
372 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
373 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
386 0x50, 0x00, 0x00, 0x00,
387 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, /* 2 bytes for 4 byte alignment */
392 /* C-tag (802.1Q): IPv6 + TCP */
393 static const struct ice_dummy_pkt_offsets
394 dummy_vlan_tcp_ipv6_packet_offsets[] = {
396 { ICE_ETYPE_OL, 12 },
397 { ICE_VLAN_OFOS, 14 },
398 { ICE_IPV6_OFOS, 18 },
400 { ICE_PROTOCOL_LAST, 0 },
403 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
404 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
405 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x81, 0x00, /* ICE_ETYPE_OL 12 */
411 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
413 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
414 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
425 0x00, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
427 0x50, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
430 0x00, 0x00, /* 2 bytes for 4 byte alignment */
434 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
436 { ICE_ETYPE_OL, 12 },
437 { ICE_IPV6_OFOS, 14 },
438 { ICE_UDP_ILOS, 54 },
439 { ICE_PROTOCOL_LAST, 0 },
442 /* IPv6 + UDP dummy packet */
443 static const u8 dummy_udp_ipv6_packet[] = {
444 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
448 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
450 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
451 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00,
461 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
462 0x00, 0x10, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
465 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, /* 2 bytes for 4 byte alignment */
470 /* C-tag (802.1Q): IPv6 + UDP */
471 static const struct ice_dummy_pkt_offsets
472 dummy_vlan_udp_ipv6_packet_offsets[] = {
474 { ICE_ETYPE_OL, 12 },
475 { ICE_VLAN_OFOS, 14 },
476 { ICE_IPV6_OFOS, 18 },
477 { ICE_UDP_ILOS, 58 },
478 { ICE_PROTOCOL_LAST, 0 },
481 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
482 static const u8 dummy_vlan_udp_ipv6_packet[] = {
483 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
484 0x00, 0x00, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00,
487 0x81, 0x00, /* ICE_ETYPE_OL 12 */
489 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
491 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
492 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00,
502 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
503 0x00, 0x08, 0x00, 0x00,
505 0x00, 0x00, /* 2 bytes for 4 byte alignment */
508 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
509 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
511 { ICE_IPV4_OFOS, 14 },
516 { ICE_PROTOCOL_LAST, 0 },
519 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
520 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
525 0x45, 0x00, 0x00, 0x58, /* IP 14 */
526 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x11, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x00,
529 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
532 0x00, 0x44, 0x00, 0x00,
534 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
535 0x00, 0x00, 0x00, 0x00,
536 0x00, 0x00, 0x00, 0x85,
538 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
539 0x00, 0x00, 0x00, 0x00,
541 0x45, 0x00, 0x00, 0x28, /* IP 62 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x06, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
548 0x00, 0x00, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x50, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
553 0x00, 0x00, /* 2 bytes for 4 byte alignment */
556 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
557 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
559 { ICE_IPV4_OFOS, 14 },
563 { ICE_UDP_ILOS, 82 },
564 { ICE_PROTOCOL_LAST, 0 },
567 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
568 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
573 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
574 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x11, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
580 0x00, 0x38, 0x00, 0x00,
582 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x85,
586 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
587 0x00, 0x00, 0x00, 0x00,
589 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x11, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00,
595 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
596 0x00, 0x08, 0x00, 0x00,
598 0x00, 0x00, /* 2 bytes for 4 byte alignment */
601 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
602 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
604 { ICE_IPV4_OFOS, 14 },
609 { ICE_PROTOCOL_LAST, 0 },
612 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
613 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
618 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x11, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
625 0x00, 0x58, 0x00, 0x00,
627 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
628 0x00, 0x00, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x85,
631 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
632 0x00, 0x00, 0x00, 0x00,
634 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
635 0x00, 0x14, 0x06, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x50, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 byte alignment */
654 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
656 { ICE_IPV4_OFOS, 14 },
660 { ICE_UDP_ILOS, 102 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
670 0x45, 0x00, 0x00, 0x60, /* IP 14 */
671 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x11, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00,
676 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
677 0x00, 0x4c, 0x00, 0x00,
679 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x85,
683 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
684 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
687 0x00, 0x08, 0x11, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
698 0x00, 0x08, 0x00, 0x00,
700 0x00, 0x00, /* 2 bytes for 4 byte alignment */
703 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
705 { ICE_IPV6_OFOS, 14 },
710 { ICE_PROTOCOL_LAST, 0 },
713 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
714 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
719 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
720 0x00, 0x44, 0x11, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
731 0x00, 0x44, 0x00, 0x00,
733 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
734 0x00, 0x00, 0x00, 0x00,
735 0x00, 0x00, 0x00, 0x85,
737 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
738 0x00, 0x00, 0x00, 0x00,
740 0x45, 0x00, 0x00, 0x28, /* IP 82 */
741 0x00, 0x00, 0x00, 0x00,
742 0x00, 0x06, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x50, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, /* 2 bytes for 4 byte alignment */
755 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
757 { ICE_IPV6_OFOS, 14 },
761 { ICE_UDP_ILOS, 102 },
762 { ICE_PROTOCOL_LAST, 0 },
765 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
766 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
771 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
772 0x00, 0x38, 0x11, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
783 0x00, 0x38, 0x00, 0x00,
785 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
786 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x85,
789 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
790 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
793 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x11, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
799 0x00, 0x08, 0x00, 0x00,
801 0x00, 0x00, /* 2 bytes for 4 byte alignment */
804 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
806 { ICE_IPV6_OFOS, 14 },
811 { ICE_PROTOCOL_LAST, 0 },
814 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
815 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
820 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
821 0x00, 0x58, 0x11, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, 0x00, 0x00,
831 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
832 0x00, 0x58, 0x00, 0x00,
834 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
835 0x00, 0x00, 0x00, 0x00,
836 0x00, 0x00, 0x00, 0x85,
838 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
839 0x00, 0x00, 0x00, 0x00,
841 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
842 0x00, 0x14, 0x06, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
855 0x50, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x00, 0x00,
858 0x00, 0x00, /* 2 bytes for 4 byte alignment */
861 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
863 { ICE_IPV6_OFOS, 14 },
867 { ICE_UDP_ILOS, 102 },
868 { ICE_PROTOCOL_LAST, 0 },
871 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
872 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
877 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
878 0x00, 0x4c, 0x11, 0x00,
879 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
889 0x00, 0x4c, 0x00, 0x00,
891 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
892 0x00, 0x00, 0x00, 0x00,
893 0x00, 0x00, 0x00, 0x85,
895 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
896 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
899 0x00, 0x08, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
910 0x00, 0x08, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 byte alignment */
915 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
917 { ICE_IPV4_OFOS, 14 },
920 { ICE_PROTOCOL_LAST, 0 },
923 static const u8 dummy_udp_gtp_packet[] = {
924 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
929 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x11, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
936 0x00, 0x1c, 0x00, 0x00,
938 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
939 0x00, 0x00, 0x00, 0x00,
940 0x00, 0x00, 0x00, 0x85,
942 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
943 0x00, 0x00, 0x00, 0x00,
947 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
949 { ICE_IPV4_OFOS, 14 },
953 { ICE_PROTOCOL_LAST, 0 },
956 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
957 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
958 0x00, 0x00, 0x00, 0x00,
959 0x00, 0x00, 0x00, 0x00,
962 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
963 0x00, 0x00, 0x40, 0x00,
964 0x40, 0x11, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
968 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
969 0x00, 0x00, 0x00, 0x00,
971 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
972 0x00, 0x00, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x85,
975 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
976 0x00, 0x00, 0x00, 0x00,
978 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
979 0x00, 0x00, 0x40, 0x00,
980 0x40, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x00,
982 0x00, 0x00, 0x00, 0x00,
987 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
989 { ICE_IPV4_OFOS, 14 },
993 { ICE_PROTOCOL_LAST, 0 },
996 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
997 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
998 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00,
1002 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
1003 0x00, 0x00, 0x40, 0x00,
1004 0x40, 0x11, 0x00, 0x00,
1005 0x00, 0x00, 0x00, 0x00,
1006 0x00, 0x00, 0x00, 0x00,
1008 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1009 0x00, 0x00, 0x00, 0x00,
1011 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x85,
1015 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1016 0x00, 0x00, 0x00, 0x00,
1018 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1019 0x00, 0x00, 0x3b, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1027 0x00, 0x00, 0x00, 0x00,
1033 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1034 { ICE_MAC_OFOS, 0 },
1035 { ICE_IPV6_OFOS, 14 },
1038 { ICE_IPV4_IL, 82 },
1039 { ICE_PROTOCOL_LAST, 0 },
1042 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1043 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1044 0x00, 0x00, 0x00, 0x00,
1045 0x00, 0x00, 0x00, 0x00,
1048 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1049 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1050 0x00, 0x00, 0x00, 0x00,
1051 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, 0x00, 0x00,
1056 0x00, 0x00, 0x00, 0x00,
1057 0x00, 0x00, 0x00, 0x00,
1059 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1060 0x00, 0x00, 0x00, 0x00,
1062 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1063 0x00, 0x00, 0x00, 0x00,
1064 0x00, 0x00, 0x00, 0x85,
1066 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1067 0x00, 0x00, 0x00, 0x00,
1069 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1070 0x00, 0x00, 0x40, 0x00,
1071 0x40, 0x00, 0x00, 0x00,
1072 0x00, 0x00, 0x00, 0x00,
1073 0x00, 0x00, 0x00, 0x00,
1079 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1080 { ICE_MAC_OFOS, 0 },
1081 { ICE_IPV6_OFOS, 14 },
1084 { ICE_IPV6_IL, 82 },
1085 { ICE_PROTOCOL_LAST, 0 },
1088 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1089 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1094 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1095 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1096 0x00, 0x00, 0x00, 0x00,
1097 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 0x00, 0x00, 0x00, 0x00,
1103 0x00, 0x00, 0x00, 0x00,
1105 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1106 0x00, 0x00, 0x00, 0x00,
1108 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1109 0x00, 0x00, 0x00, 0x00,
1110 0x00, 0x00, 0x00, 0x85,
1112 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1113 0x00, 0x00, 0x00, 0x00,
1115 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1116 0x00, 0x00, 0x3b, 0x00,
1117 0x00, 0x00, 0x00, 0x00,
1118 0x00, 0x00, 0x00, 0x00,
1119 0x00, 0x00, 0x00, 0x00,
1120 0x00, 0x00, 0x00, 0x00,
1121 0x00, 0x00, 0x00, 0x00,
1122 0x00, 0x00, 0x00, 0x00,
1123 0x00, 0x00, 0x00, 0x00,
1124 0x00, 0x00, 0x00, 0x00,
1130 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1131 { ICE_MAC_OFOS, 0 },
1132 { ICE_IPV4_OFOS, 14 },
1134 { ICE_GTP_NO_PAY, 42 },
1135 { ICE_PROTOCOL_LAST, 0 },
1139 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1140 { ICE_MAC_OFOS, 0 },
1141 { ICE_IPV6_OFOS, 14 },
1143 { ICE_GTP_NO_PAY, 62 },
1144 { ICE_PROTOCOL_LAST, 0 },
1147 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1148 { ICE_MAC_OFOS, 0 },
1149 { ICE_ETYPE_OL, 12 },
1150 { ICE_VLAN_OFOS, 14},
1152 { ICE_PROTOCOL_LAST, 0 },
1155 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1156 { ICE_MAC_OFOS, 0 },
1157 { ICE_ETYPE_OL, 12 },
1158 { ICE_VLAN_OFOS, 14},
1160 { ICE_IPV4_OFOS, 26 },
1161 { ICE_PROTOCOL_LAST, 0 },
1164 static const u8 dummy_pppoe_ipv4_packet[] = {
1165 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1166 0x00, 0x00, 0x00, 0x00,
1167 0x00, 0x00, 0x00, 0x00,
1169 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1171 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1173 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1176 0x00, 0x21, /* PPP Link Layer 24 */
1178 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1181 0x00, 0x00, 0x00, 0x00,
1182 0x00, 0x00, 0x00, 0x00,
1184 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1188 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1189 { ICE_MAC_OFOS, 0 },
1190 { ICE_ETYPE_OL, 12 },
1191 { ICE_VLAN_OFOS, 14},
1193 { ICE_IPV4_OFOS, 26 },
1195 { ICE_PROTOCOL_LAST, 0 },
1198 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1199 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1200 0x00, 0x00, 0x00, 0x00,
1201 0x00, 0x00, 0x00, 0x00,
1203 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1205 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1207 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1210 0x00, 0x21, /* PPP Link Layer 24 */
1212 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1213 0x00, 0x01, 0x00, 0x00,
1214 0x00, 0x06, 0x00, 0x00,
1215 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1219 0x00, 0x00, 0x00, 0x00,
1220 0x00, 0x00, 0x00, 0x00,
1221 0x50, 0x00, 0x00, 0x00,
1222 0x00, 0x00, 0x00, 0x00,
1224 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1228 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1229 { ICE_MAC_OFOS, 0 },
1230 { ICE_ETYPE_OL, 12 },
1231 { ICE_VLAN_OFOS, 14},
1233 { ICE_IPV4_OFOS, 26 },
1234 { ICE_UDP_ILOS, 46 },
1235 { ICE_PROTOCOL_LAST, 0 },
1238 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1240 0x00, 0x00, 0x00, 0x00,
1241 0x00, 0x00, 0x00, 0x00,
1243 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1245 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1247 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1250 0x00, 0x21, /* PPP Link Layer 24 */
1252 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1253 0x00, 0x01, 0x00, 0x00,
1254 0x00, 0x11, 0x00, 0x00,
1255 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00,
1258 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1259 0x00, 0x08, 0x00, 0x00,
1261 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1264 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1265 { ICE_MAC_OFOS, 0 },
1266 { ICE_ETYPE_OL, 12 },
1267 { ICE_VLAN_OFOS, 14},
1269 { ICE_IPV6_OFOS, 26 },
1270 { ICE_PROTOCOL_LAST, 0 },
1273 static const u8 dummy_pppoe_ipv6_packet[] = {
1274 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1275 0x00, 0x00, 0x00, 0x00,
1276 0x00, 0x00, 0x00, 0x00,
1278 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1280 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1282 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1285 0x00, 0x57, /* PPP Link Layer 24 */
1287 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1288 0x00, 0x00, 0x3b, 0x00,
1289 0x00, 0x00, 0x00, 0x00,
1290 0x00, 0x00, 0x00, 0x00,
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1293 0x00, 0x00, 0x00, 0x00,
1294 0x00, 0x00, 0x00, 0x00,
1295 0x00, 0x00, 0x00, 0x00,
1296 0x00, 0x00, 0x00, 0x00,
1298 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1302 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1303 { ICE_MAC_OFOS, 0 },
1304 { ICE_ETYPE_OL, 12 },
1305 { ICE_VLAN_OFOS, 14},
1307 { ICE_IPV6_OFOS, 26 },
1309 { ICE_PROTOCOL_LAST, 0 },
1312 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1313 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1314 0x00, 0x00, 0x00, 0x00,
1315 0x00, 0x00, 0x00, 0x00,
1317 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1319 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1321 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1324 0x00, 0x57, /* PPP Link Layer 24 */
1326 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1327 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1328 0x00, 0x00, 0x00, 0x00,
1329 0x00, 0x00, 0x00, 0x00,
1330 0x00, 0x00, 0x00, 0x00,
1331 0x00, 0x00, 0x00, 0x00,
1332 0x00, 0x00, 0x00, 0x00,
1333 0x00, 0x00, 0x00, 0x00,
1334 0x00, 0x00, 0x00, 0x00,
1335 0x00, 0x00, 0x00, 0x00,
1337 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1338 0x00, 0x00, 0x00, 0x00,
1339 0x00, 0x00, 0x00, 0x00,
1340 0x50, 0x00, 0x00, 0x00,
1341 0x00, 0x00, 0x00, 0x00,
1343 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1347 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1348 { ICE_MAC_OFOS, 0 },
1349 { ICE_ETYPE_OL, 12 },
1350 { ICE_VLAN_OFOS, 14},
1352 { ICE_IPV6_OFOS, 26 },
1353 { ICE_UDP_ILOS, 66 },
1354 { ICE_PROTOCOL_LAST, 0 },
1357 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1358 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1359 0x00, 0x00, 0x00, 0x00,
1360 0x00, 0x00, 0x00, 0x00,
1362 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1364 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1366 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1369 0x00, 0x57, /* PPP Link Layer 24 */
1371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1372 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1373 0x00, 0x00, 0x00, 0x00,
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1379 0x00, 0x00, 0x00, 0x00,
1380 0x00, 0x00, 0x00, 0x00,
1382 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1383 0x00, 0x08, 0x00, 0x00,
1385 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1388 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1389 { ICE_MAC_OFOS, 0 },
1390 { ICE_IPV4_OFOS, 14 },
1392 { ICE_PROTOCOL_LAST, 0 },
1395 static const u8 dummy_ipv4_esp_pkt[] = {
1396 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1397 0x00, 0x00, 0x00, 0x00,
1398 0x00, 0x00, 0x00, 0x00,
1401 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1402 0x00, 0x00, 0x40, 0x00,
1403 0x40, 0x32, 0x00, 0x00,
1404 0x00, 0x00, 0x00, 0x00,
1405 0x00, 0x00, 0x00, 0x00,
1407 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1408 0x00, 0x00, 0x00, 0x00,
1409 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1412 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1413 { ICE_MAC_OFOS, 0 },
1414 { ICE_IPV6_OFOS, 14 },
1416 { ICE_PROTOCOL_LAST, 0 },
1419 static const u8 dummy_ipv6_esp_pkt[] = {
1420 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1421 0x00, 0x00, 0x00, 0x00,
1422 0x00, 0x00, 0x00, 0x00,
1425 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1426 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1427 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00,
1429 0x00, 0x00, 0x00, 0x00,
1430 0x00, 0x00, 0x00, 0x00,
1431 0x00, 0x00, 0x00, 0x00,
1432 0x00, 0x00, 0x00, 0x00,
1433 0x00, 0x00, 0x00, 0x00,
1434 0x00, 0x00, 0x00, 0x00,
1436 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1437 0x00, 0x00, 0x00, 0x00,
1438 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1441 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1442 { ICE_MAC_OFOS, 0 },
1443 { ICE_IPV4_OFOS, 14 },
1445 { ICE_PROTOCOL_LAST, 0 },
1448 static const u8 dummy_ipv4_ah_pkt[] = {
1449 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1450 0x00, 0x00, 0x00, 0x00,
1451 0x00, 0x00, 0x00, 0x00,
1454 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1455 0x00, 0x00, 0x40, 0x00,
1456 0x40, 0x33, 0x00, 0x00,
1457 0x00, 0x00, 0x00, 0x00,
1458 0x00, 0x00, 0x00, 0x00,
1460 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1461 0x00, 0x00, 0x00, 0x00,
1462 0x00, 0x00, 0x00, 0x00,
1463 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1466 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1467 { ICE_MAC_OFOS, 0 },
1468 { ICE_IPV6_OFOS, 14 },
1470 { ICE_PROTOCOL_LAST, 0 },
1473 static const u8 dummy_ipv6_ah_pkt[] = {
1474 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1475 0x00, 0x00, 0x00, 0x00,
1476 0x00, 0x00, 0x00, 0x00,
1479 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1480 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1481 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, 0x00, 0x00,
1485 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x00,
1487 0x00, 0x00, 0x00, 0x00,
1488 0x00, 0x00, 0x00, 0x00,
1490 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1491 0x00, 0x00, 0x00, 0x00,
1492 0x00, 0x00, 0x00, 0x00,
1493 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1496 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1497 { ICE_MAC_OFOS, 0 },
1498 { ICE_IPV4_OFOS, 14 },
1499 { ICE_UDP_ILOS, 34 },
1501 { ICE_PROTOCOL_LAST, 0 },
1504 static const u8 dummy_ipv4_nat_pkt[] = {
1505 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1506 0x00, 0x00, 0x00, 0x00,
1507 0x00, 0x00, 0x00, 0x00,
1510 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1511 0x00, 0x00, 0x40, 0x00,
1512 0x40, 0x11, 0x00, 0x00,
1513 0x00, 0x00, 0x00, 0x00,
1514 0x00, 0x00, 0x00, 0x00,
1516 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1517 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, 0x00, 0x00,
1520 0x00, 0x00, 0x00, 0x00,
1521 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1524 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1525 { ICE_MAC_OFOS, 0 },
1526 { ICE_IPV6_OFOS, 14 },
1527 { ICE_UDP_ILOS, 54 },
1529 { ICE_PROTOCOL_LAST, 0 },
1532 static const u8 dummy_ipv6_nat_pkt[] = {
1533 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1534 0x00, 0x00, 0x00, 0x00,
1535 0x00, 0x00, 0x00, 0x00,
1538 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1539 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1540 0x00, 0x00, 0x00, 0x00,
1541 0x00, 0x00, 0x00, 0x00,
1542 0x00, 0x00, 0x00, 0x00,
1543 0x00, 0x00, 0x00, 0x00,
1544 0x00, 0x00, 0x00, 0x00,
1545 0x00, 0x00, 0x00, 0x00,
1546 0x00, 0x00, 0x00, 0x00,
1547 0x00, 0x00, 0x00, 0x00,
1549 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1550 0x00, 0x00, 0x00, 0x00,
1552 0x00, 0x00, 0x00, 0x00,
1553 0x00, 0x00, 0x00, 0x00,
1554 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1558 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1559 { ICE_MAC_OFOS, 0 },
1560 { ICE_IPV4_OFOS, 14 },
1562 { ICE_PROTOCOL_LAST, 0 },
1565 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1566 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1567 0x00, 0x00, 0x00, 0x00,
1568 0x00, 0x00, 0x00, 0x00,
1571 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1572 0x00, 0x00, 0x40, 0x00,
1573 0x40, 0x73, 0x00, 0x00,
1574 0x00, 0x00, 0x00, 0x00,
1575 0x00, 0x00, 0x00, 0x00,
1577 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1578 0x00, 0x00, 0x00, 0x00,
1579 0x00, 0x00, 0x00, 0x00,
1580 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1583 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1584 { ICE_MAC_OFOS, 0 },
1585 { ICE_IPV6_OFOS, 14 },
1587 { ICE_PROTOCOL_LAST, 0 },
1590 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1591 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1592 0x00, 0x00, 0x00, 0x00,
1593 0x00, 0x00, 0x00, 0x00,
1596 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1597 0x00, 0x0c, 0x73, 0x40,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, 0x00, 0x00,
1601 0x00, 0x00, 0x00, 0x00,
1602 0x00, 0x00, 0x00, 0x00,
1603 0x00, 0x00, 0x00, 0x00,
1604 0x00, 0x00, 0x00, 0x00,
1605 0x00, 0x00, 0x00, 0x00,
1607 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1608 0x00, 0x00, 0x00, 0x00,
1609 0x00, 0x00, 0x00, 0x00,
1610 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1613 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1614 { ICE_MAC_OFOS, 0 },
1615 { ICE_ETYPE_OL, 12 },
1616 { ICE_VLAN_EX, 14 },
1617 { ICE_VLAN_OFOS, 18 },
1618 { ICE_IPV4_OFOS, 22 },
1619 { ICE_PROTOCOL_LAST, 0 },
1622 static const u8 dummy_qinq_ipv4_pkt[] = {
1623 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1624 0x00, 0x00, 0x00, 0x00,
1625 0x00, 0x00, 0x00, 0x00,
1627 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1629 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1630 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1632 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1633 0x00, 0x01, 0x00, 0x00,
1634 0x00, 0x11, 0x00, 0x00,
1635 0x00, 0x00, 0x00, 0x00,
1636 0x00, 0x00, 0x00, 0x00,
1638 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1639 0x00, 0x08, 0x00, 0x00,
1641 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1644 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1645 { ICE_MAC_OFOS, 0 },
1646 { ICE_ETYPE_OL, 12 },
1647 { ICE_VLAN_EX, 14 },
1648 { ICE_VLAN_OFOS, 18 },
1649 { ICE_IPV6_OFOS, 22 },
1650 { ICE_PROTOCOL_LAST, 0 },
1653 static const u8 dummy_qinq_ipv6_pkt[] = {
1654 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1655 0x00, 0x00, 0x00, 0x00,
1656 0x00, 0x00, 0x00, 0x00,
1658 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1660 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1661 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1663 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1664 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1665 0x00, 0x00, 0x00, 0x00,
1666 0x00, 0x00, 0x00, 0x00,
1667 0x00, 0x00, 0x00, 0x00,
1668 0x00, 0x00, 0x00, 0x00,
1669 0x00, 0x00, 0x00, 0x00,
1670 0x00, 0x00, 0x00, 0x00,
1671 0x00, 0x00, 0x00, 0x00,
1672 0x00, 0x00, 0x00, 0x00,
1674 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1675 0x00, 0x10, 0x00, 0x00,
1677 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1678 0x00, 0x00, 0x00, 0x00,
1680 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1683 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1684 { ICE_MAC_OFOS, 0 },
1685 { ICE_ETYPE_OL, 12 },
1686 { ICE_VLAN_EX, 14 },
1687 { ICE_VLAN_OFOS, 18 },
1689 { ICE_PROTOCOL_LAST, 0 },
1693 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1694 { ICE_MAC_OFOS, 0 },
1695 { ICE_ETYPE_OL, 12 },
1696 { ICE_VLAN_EX, 14 },
1697 { ICE_VLAN_OFOS, 18 },
1699 { ICE_IPV4_OFOS, 30 },
1700 { ICE_PROTOCOL_LAST, 0 },
1703 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1704 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1705 0x00, 0x00, 0x00, 0x00,
1706 0x00, 0x00, 0x00, 0x00,
1708 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1710 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1711 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1713 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1716 0x00, 0x21, /* PPP Link Layer 28 */
1718 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1719 0x00, 0x00, 0x00, 0x00,
1720 0x00, 0x00, 0x00, 0x00,
1721 0x00, 0x00, 0x00, 0x00,
1722 0x00, 0x00, 0x00, 0x00,
1724 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1728 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1729 { ICE_MAC_OFOS, 0 },
1730 { ICE_ETYPE_OL, 12 },
1732 { ICE_VLAN_OFOS, 18 },
1734 { ICE_IPV6_OFOS, 30 },
1735 { ICE_PROTOCOL_LAST, 0 },
1738 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1739 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1740 0x00, 0x00, 0x00, 0x00,
1741 0x00, 0x00, 0x00, 0x00,
1743 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1745 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1746 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1748 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1751 0x00, 0x57, /* PPP Link Layer 28*/
1753 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1754 0x00, 0x00, 0x3b, 0x00,
1755 0x00, 0x00, 0x00, 0x00,
1756 0x00, 0x00, 0x00, 0x00,
1757 0x00, 0x00, 0x00, 0x00,
1758 0x00, 0x00, 0x00, 0x00,
1759 0x00, 0x00, 0x00, 0x00,
1760 0x00, 0x00, 0x00, 0x00,
1761 0x00, 0x00, 0x00, 0x00,
1762 0x00, 0x00, 0x00, 0x00,
1764 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1767 /* this is a recipe to profile association bitmap */
1768 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1769 ICE_MAX_NUM_PROFILES);
1771 /* this is a profile to recipe association bitmap */
1772 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1773 ICE_MAX_NUM_RECIPES);
1775 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1778 * ice_collect_result_idx - copy result index values
1779 * @buf: buffer that contains the result index
1780 * @recp: the recipe struct to copy data into
1782 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1783 struct ice_sw_recipe *recp)
1785 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1786 ice_set_bit(buf->content.result_indx &
1787 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1791 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1792 * @rid: recipe ID that we are populating
1794 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1796 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1797 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1798 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1799 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1800 enum ice_sw_tunnel_type tun_type;
1801 u16 i, j, profile_num = 0;
1802 bool non_tun_valid = false;
1803 bool pppoe_valid = false;
1804 bool vxlan_valid = false;
1805 bool gre_valid = false;
1806 bool gtp_valid = false;
1807 bool flag_valid = false;
1809 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1810 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1815 for (i = 0; i < 12; i++) {
1816 if (gre_profile[i] == j)
1820 for (i = 0; i < 12; i++) {
1821 if (vxlan_profile[i] == j)
1825 for (i = 0; i < 7; i++) {
1826 if (pppoe_profile[i] == j)
1830 for (i = 0; i < 6; i++) {
1831 if (non_tun_profile[i] == j)
1832 non_tun_valid = true;
1835 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1836 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1839 if ((j >= ICE_PROFID_IPV4_ESP &&
1840 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1841 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1842 j <= ICE_PROFID_IPV6_GTPU_TEID))
1846 if (!non_tun_valid && vxlan_valid)
1847 tun_type = ICE_SW_TUN_VXLAN;
1848 else if (!non_tun_valid && gre_valid)
1849 tun_type = ICE_SW_TUN_NVGRE;
1850 else if (!non_tun_valid && pppoe_valid)
1851 tun_type = ICE_SW_TUN_PPPOE;
1852 else if (!non_tun_valid && gtp_valid)
1853 tun_type = ICE_SW_TUN_GTP;
1854 else if (non_tun_valid &&
1855 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1856 tun_type = ICE_SW_TUN_AND_NON_TUN;
1857 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1859 tun_type = ICE_NON_TUN;
1861 tun_type = ICE_NON_TUN;
1863 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1864 i = ice_is_bit_set(recipe_to_profile[rid],
1865 ICE_PROFID_PPPOE_IPV4_OTHER);
1866 j = ice_is_bit_set(recipe_to_profile[rid],
1867 ICE_PROFID_PPPOE_IPV6_OTHER);
1869 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1871 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1874 if (tun_type == ICE_SW_TUN_GTP) {
1875 if (ice_is_bit_set(recipe_to_profile[rid],
1876 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1877 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1878 else if (ice_is_bit_set(recipe_to_profile[rid],
1879 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1880 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1881 else if (ice_is_bit_set(recipe_to_profile[rid],
1882 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1883 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1884 else if (ice_is_bit_set(recipe_to_profile[rid],
1885 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1886 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1889 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1890 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1891 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1893 case ICE_PROFID_IPV4_TCP:
1894 tun_type = ICE_SW_IPV4_TCP;
1896 case ICE_PROFID_IPV4_UDP:
1897 tun_type = ICE_SW_IPV4_UDP;
1899 case ICE_PROFID_IPV6_TCP:
1900 tun_type = ICE_SW_IPV6_TCP;
1902 case ICE_PROFID_IPV6_UDP:
1903 tun_type = ICE_SW_IPV6_UDP;
1905 case ICE_PROFID_PPPOE_PAY:
1906 tun_type = ICE_SW_TUN_PPPOE_PAY;
1908 case ICE_PROFID_PPPOE_IPV4_TCP:
1909 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1911 case ICE_PROFID_PPPOE_IPV4_UDP:
1912 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1914 case ICE_PROFID_PPPOE_IPV4_OTHER:
1915 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1917 case ICE_PROFID_PPPOE_IPV6_TCP:
1918 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1920 case ICE_PROFID_PPPOE_IPV6_UDP:
1921 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1923 case ICE_PROFID_PPPOE_IPV6_OTHER:
1924 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1926 case ICE_PROFID_IPV4_ESP:
1927 tun_type = ICE_SW_TUN_IPV4_ESP;
1929 case ICE_PROFID_IPV6_ESP:
1930 tun_type = ICE_SW_TUN_IPV6_ESP;
1932 case ICE_PROFID_IPV4_AH:
1933 tun_type = ICE_SW_TUN_IPV4_AH;
1935 case ICE_PROFID_IPV6_AH:
1936 tun_type = ICE_SW_TUN_IPV6_AH;
1938 case ICE_PROFID_IPV4_NAT_T:
1939 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1941 case ICE_PROFID_IPV6_NAT_T:
1942 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1944 case ICE_PROFID_IPV4_PFCP_NODE:
1946 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1948 case ICE_PROFID_IPV6_PFCP_NODE:
1950 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1952 case ICE_PROFID_IPV4_PFCP_SESSION:
1954 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1956 case ICE_PROFID_IPV6_PFCP_SESSION:
1958 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1960 case ICE_PROFID_MAC_IPV4_L2TPV3:
1961 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1963 case ICE_PROFID_MAC_IPV6_L2TPV3:
1964 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1966 case ICE_PROFID_IPV4_GTPU_TEID:
1967 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1969 case ICE_PROFID_IPV6_GTPU_TEID:
1970 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1981 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1982 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1983 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1984 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1985 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1986 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1987 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1988 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1989 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1990 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1991 else if (vlan && tun_type == ICE_NON_TUN)
1992 tun_type = ICE_NON_TUN_QINQ;
1998 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1999 * @hw: pointer to hardware structure
2000 * @recps: struct that we need to populate
2001 * @rid: recipe ID that we are populating
2002 * @refresh_required: true if we should get recipe to profile mapping from FW
2004 * This function is used to populate all the necessary entries into our
2005 * bookkeeping so that we have a current list of all the recipes that are
2006 * programmed in the firmware.
2008 static enum ice_status
2009 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2010 bool *refresh_required)
2012 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2013 struct ice_aqc_recipe_data_elem *tmp;
2014 u16 num_recps = ICE_MAX_NUM_RECIPES;
2015 struct ice_prot_lkup_ext *lkup_exts;
2016 enum ice_status status;
2021 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2023 /* we need a buffer big enough to accommodate all the recipes */
2024 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2025 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2027 return ICE_ERR_NO_MEMORY;
2029 tmp[0].recipe_indx = rid;
2030 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2031 /* non-zero status meaning recipe doesn't exist */
2035 /* Get recipe to profile map so that we can get the fv from lkups that
2036 * we read for a recipe from FW. Since we want to minimize the number of
2037 * times we make this FW call, just make one call and cache the copy
2038 * until a new recipe is added. This operation is only required the
2039 * first time to get the changes from FW. Then to search existing
2040 * entries we don't need to update the cache again until another recipe
2043 if (*refresh_required) {
2044 ice_get_recp_to_prof_map(hw);
2045 *refresh_required = false;
2048 /* Start populating all the entries for recps[rid] based on lkups from
2049 * firmware. Note that we are only creating the root recipe in our
2052 lkup_exts = &recps[rid].lkup_exts;
2054 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2055 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2056 struct ice_recp_grp_entry *rg_entry;
2057 u8 i, prof, idx, prot = 0;
2061 rg_entry = (struct ice_recp_grp_entry *)
2062 ice_malloc(hw, sizeof(*rg_entry));
2064 status = ICE_ERR_NO_MEMORY;
2068 idx = root_bufs.recipe_indx;
2069 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2071 /* Mark all result indices in this chain */
2072 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2073 ice_set_bit(root_bufs.content.result_indx &
2074 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2076 /* get the first profile that is associated with rid */
2077 prof = ice_find_first_bit(recipe_to_profile[idx],
2078 ICE_MAX_NUM_PROFILES);
2079 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2080 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2082 rg_entry->fv_idx[i] = lkup_indx;
2083 rg_entry->fv_mask[i] =
2084 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2086 /* If the recipe is a chained recipe then all its
2087 * child recipe's result will have a result index.
2088 * To fill fv_words we should not use those result
2089 * index, we only need the protocol ids and offsets.
2090 * We will skip all the fv_idx which stores result
2091 * index in them. We also need to skip any fv_idx which
2092 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2093 * valid offset value.
2095 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2096 rg_entry->fv_idx[i]) ||
2097 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2098 rg_entry->fv_idx[i] == 0)
2101 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2102 rg_entry->fv_idx[i], &prot, &off);
2103 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2104 lkup_exts->fv_words[fv_word_idx].off = off;
2105 lkup_exts->field_mask[fv_word_idx] =
2106 rg_entry->fv_mask[i];
2107 if (prot == ICE_META_DATA_ID_HW &&
2108 off == ICE_TUN_FLAG_MDID_OFF)
2112 /* populate rg_list with the data from the child entry of this
2115 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2117 /* Propagate some data to the recipe database */
2118 recps[idx].is_root = !!is_root;
2119 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2120 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2121 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2122 recps[idx].chain_idx = root_bufs.content.result_indx &
2123 ~ICE_AQ_RECIPE_RESULT_EN;
2124 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2126 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2132 /* Only do the following for root recipes entries */
2133 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2134 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2135 recps[idx].root_rid = root_bufs.content.rid &
2136 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2137 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2140 /* Complete initialization of the root recipe entry */
2141 lkup_exts->n_val_words = fv_word_idx;
2142 recps[rid].big_recp = (num_recps > 1);
2143 recps[rid].n_grp_count = (u8)num_recps;
2144 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2145 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2146 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2147 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2148 if (!recps[rid].root_buf)
2151 /* Copy result indexes */
2152 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2153 recps[rid].recp_created = true;
2161 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2162 * @hw: pointer to hardware structure
2164 * This function is used to populate recipe_to_profile matrix where index to
2165 * this array is the recipe ID and the element is the mapping of which profiles
2166 * is this recipe mapped to.
2168 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2170 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2173 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2176 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2177 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2178 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2180 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2181 ICE_MAX_NUM_RECIPES);
2182 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2183 ice_set_bit(i, recipe_to_profile[j]);
2188 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2189 * @hw: pointer to the HW struct
2190 * @recp_list: pointer to sw recipe list
2192 * Allocate memory for the entire recipe table and initialize the structures/
2193 * entries corresponding to basic recipes.
2196 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2198 struct ice_sw_recipe *recps;
2201 recps = (struct ice_sw_recipe *)
2202 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2204 return ICE_ERR_NO_MEMORY;
2206 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2207 recps[i].root_rid = i;
2208 INIT_LIST_HEAD(&recps[i].filt_rules);
2209 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2210 INIT_LIST_HEAD(&recps[i].rg_list);
2211 ice_init_lock(&recps[i].filt_rule_lock);
2220 * ice_aq_get_sw_cfg - get switch configuration
2221 * @hw: pointer to the hardware structure
2222 * @buf: pointer to the result buffer
2223 * @buf_size: length of the buffer available for response
2224 * @req_desc: pointer to requested descriptor
2225 * @num_elems: pointer to number of elements
2226 * @cd: pointer to command details structure or NULL
2228 * Get switch configuration (0x0200) to be placed in buf.
2229 * This admin command returns information such as initial VSI/port number
2230 * and switch ID it belongs to.
2232 * NOTE: *req_desc is both an input/output parameter.
2233 * The caller of this function first calls this function with *request_desc set
2234 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2235 * configuration information has been returned; if non-zero (meaning not all
2236 * the information was returned), the caller should call this function again
2237 * with *req_desc set to the previous value returned by f/w to get the
2238 * next block of switch configuration information.
2240 * *num_elems is output only parameter. This reflects the number of elements
2241 * in response buffer. The caller of this function to use *num_elems while
2242 * parsing the response buffer.
2244 static enum ice_status
2245 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2246 u16 buf_size, u16 *req_desc, u16 *num_elems,
2247 struct ice_sq_cd *cd)
2249 struct ice_aqc_get_sw_cfg *cmd;
2250 struct ice_aq_desc desc;
2251 enum ice_status status;
2253 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2254 cmd = &desc.params.get_sw_conf;
2255 cmd->element = CPU_TO_LE16(*req_desc);
2257 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2259 *req_desc = LE16_TO_CPU(cmd->element);
2260 *num_elems = LE16_TO_CPU(cmd->num_elems);
2267 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2268 * @hw: pointer to the HW struct
2269 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2270 * @global_lut_id: output parameter for the RSS global LUT's ID
2272 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2274 struct ice_aqc_alloc_free_res_elem *sw_buf;
2275 enum ice_status status;
2278 buf_len = ice_struct_size(sw_buf, elem, 1);
2279 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2281 return ICE_ERR_NO_MEMORY;
2283 sw_buf->num_elems = CPU_TO_LE16(1);
2284 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2285 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2286 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2288 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2290 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2291 shared_res ? "shared" : "dedicated", status);
2292 goto ice_alloc_global_lut_exit;
2295 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2297 ice_alloc_global_lut_exit:
2298 ice_free(hw, sw_buf);
2303 * ice_free_global_lut - free a RSS global LUT
2304 * @hw: pointer to the HW struct
2305 * @global_lut_id: ID of the RSS global LUT to free
2307 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2309 struct ice_aqc_alloc_free_res_elem *sw_buf;
2310 u16 buf_len, num_elems = 1;
2311 enum ice_status status;
2313 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2314 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2316 return ICE_ERR_NO_MEMORY;
2318 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2319 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2320 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2322 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2324 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2325 global_lut_id, status);
2327 ice_free(hw, sw_buf);
2332 * ice_alloc_sw - allocate resources specific to switch
2333 * @hw: pointer to the HW struct
2334 * @ena_stats: true to turn on VEB stats
2335 * @shared_res: true for shared resource, false for dedicated resource
2336 * @sw_id: switch ID returned
2337 * @counter_id: VEB counter ID returned
2339 * allocates switch resources (SWID and VEB counter) (0x0208)
2342 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2345 struct ice_aqc_alloc_free_res_elem *sw_buf;
2346 struct ice_aqc_res_elem *sw_ele;
2347 enum ice_status status;
2350 buf_len = ice_struct_size(sw_buf, elem, 1);
2351 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2353 return ICE_ERR_NO_MEMORY;
2355 /* Prepare buffer for switch ID.
2356 * The number of resource entries in buffer is passed as 1 since only a
2357 * single switch/VEB instance is allocated, and hence a single sw_id
2360 sw_buf->num_elems = CPU_TO_LE16(1);
2362 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2363 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2364 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2366 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2367 ice_aqc_opc_alloc_res, NULL);
2370 goto ice_alloc_sw_exit;
2372 sw_ele = &sw_buf->elem[0];
2373 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2376 /* Prepare buffer for VEB Counter */
2377 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2378 struct ice_aqc_alloc_free_res_elem *counter_buf;
2379 struct ice_aqc_res_elem *counter_ele;
2381 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2382 ice_malloc(hw, buf_len);
2384 status = ICE_ERR_NO_MEMORY;
2385 goto ice_alloc_sw_exit;
2388 /* The number of resource entries in buffer is passed as 1 since
2389 * only a single switch/VEB instance is allocated, and hence a
2390 * single VEB counter is requested.
2392 counter_buf->num_elems = CPU_TO_LE16(1);
2393 counter_buf->res_type =
2394 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2395 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2396 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2400 ice_free(hw, counter_buf);
2401 goto ice_alloc_sw_exit;
2403 counter_ele = &counter_buf->elem[0];
2404 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2405 ice_free(hw, counter_buf);
2409 ice_free(hw, sw_buf);
2414 * ice_free_sw - free resources specific to switch
2415 * @hw: pointer to the HW struct
2416 * @sw_id: switch ID returned
2417 * @counter_id: VEB counter ID returned
2419 * free switch resources (SWID and VEB counter) (0x0209)
2421 * NOTE: This function frees multiple resources. It continues
2422 * releasing other resources even after it encounters error.
2423 * The error code returned is the last error it encountered.
2425 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2427 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2428 enum ice_status status, ret_status;
2431 buf_len = ice_struct_size(sw_buf, elem, 1);
2432 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2434 return ICE_ERR_NO_MEMORY;
2436 /* Prepare buffer to free for switch ID res.
2437 * The number of resource entries in buffer is passed as 1 since only a
2438 * single switch/VEB instance is freed, and hence a single sw_id
2441 sw_buf->num_elems = CPU_TO_LE16(1);
2442 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2443 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2445 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2446 ice_aqc_opc_free_res, NULL);
2449 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2451 /* Prepare buffer to free for VEB Counter resource */
2452 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2453 ice_malloc(hw, buf_len);
2455 ice_free(hw, sw_buf);
2456 return ICE_ERR_NO_MEMORY;
2459 /* The number of resource entries in buffer is passed as 1 since only a
2460 * single switch/VEB instance is freed, and hence a single VEB counter
2463 counter_buf->num_elems = CPU_TO_LE16(1);
2464 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2465 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2467 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2468 ice_aqc_opc_free_res, NULL);
2470 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2471 ret_status = status;
2474 ice_free(hw, counter_buf);
2475 ice_free(hw, sw_buf);
2481 * @hw: pointer to the HW struct
2482 * @vsi_ctx: pointer to a VSI context struct
2483 * @cd: pointer to command details structure or NULL
2485 * Add a VSI context to the hardware (0x0210)
2488 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2489 struct ice_sq_cd *cd)
2491 struct ice_aqc_add_update_free_vsi_resp *res;
2492 struct ice_aqc_add_get_update_free_vsi *cmd;
2493 struct ice_aq_desc desc;
2494 enum ice_status status;
2496 cmd = &desc.params.vsi_cmd;
2497 res = &desc.params.add_update_free_vsi_res;
2499 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2501 if (!vsi_ctx->alloc_from_pool)
2502 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2503 ICE_AQ_VSI_IS_VALID);
2505 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2507 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2509 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2510 sizeof(vsi_ctx->info), cd);
2513 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2514 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2515 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2523 * @hw: pointer to the HW struct
2524 * @vsi_ctx: pointer to a VSI context struct
2525 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2526 * @cd: pointer to command details structure or NULL
2528 * Free VSI context info from hardware (0x0213)
2531 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2532 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2534 struct ice_aqc_add_update_free_vsi_resp *resp;
2535 struct ice_aqc_add_get_update_free_vsi *cmd;
2536 struct ice_aq_desc desc;
2537 enum ice_status status;
2539 cmd = &desc.params.vsi_cmd;
2540 resp = &desc.params.add_update_free_vsi_res;
2542 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2544 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2546 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2548 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2550 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2551 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2559 * @hw: pointer to the HW struct
2560 * @vsi_ctx: pointer to a VSI context struct
2561 * @cd: pointer to command details structure or NULL
2563 * Update VSI context in the hardware (0x0211)
2566 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2567 struct ice_sq_cd *cd)
2569 struct ice_aqc_add_update_free_vsi_resp *resp;
2570 struct ice_aqc_add_get_update_free_vsi *cmd;
2571 struct ice_aq_desc desc;
2572 enum ice_status status;
2574 cmd = &desc.params.vsi_cmd;
2575 resp = &desc.params.add_update_free_vsi_res;
2577 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2579 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2581 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2583 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2584 sizeof(vsi_ctx->info), cd);
2587 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2588 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2595 * ice_is_vsi_valid - check whether the VSI is valid or not
2596 * @hw: pointer to the HW struct
2597 * @vsi_handle: VSI handle
2599 * check whether the VSI is valid or not
2601 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2603 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2607 * ice_get_hw_vsi_num - return the HW VSI number
2608 * @hw: pointer to the HW struct
2609 * @vsi_handle: VSI handle
2611 * return the HW VSI number
2612 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2614 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2616 return hw->vsi_ctx[vsi_handle]->vsi_num;
2620 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2621 * @hw: pointer to the HW struct
2622 * @vsi_handle: VSI handle
2624 * return the VSI context entry for a given VSI handle
2626 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2628 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2632 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2633 * @hw: pointer to the HW struct
2634 * @vsi_handle: VSI handle
2635 * @vsi: VSI context pointer
2637 * save the VSI context entry for a given VSI handle
2640 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2642 hw->vsi_ctx[vsi_handle] = vsi;
2646 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2647 * @hw: pointer to the HW struct
2648 * @vsi_handle: VSI handle
2650 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2652 struct ice_vsi_ctx *vsi;
2655 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2658 ice_for_each_traffic_class(i) {
2659 if (vsi->lan_q_ctx[i]) {
2660 ice_free(hw, vsi->lan_q_ctx[i]);
2661 vsi->lan_q_ctx[i] = NULL;
2667 * ice_clear_vsi_ctx - clear the VSI context entry
2668 * @hw: pointer to the HW struct
2669 * @vsi_handle: VSI handle
2671 * clear the VSI context entry
2673 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2675 struct ice_vsi_ctx *vsi;
2677 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2679 ice_clear_vsi_q_ctx(hw, vsi_handle);
2681 hw->vsi_ctx[vsi_handle] = NULL;
2686 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2687 * @hw: pointer to the HW struct
2689 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2693 for (i = 0; i < ICE_MAX_VSI; i++)
2694 ice_clear_vsi_ctx(hw, i);
2698 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2699 * @hw: pointer to the HW struct
2700 * @vsi_handle: unique VSI handle provided by drivers
2701 * @vsi_ctx: pointer to a VSI context struct
2702 * @cd: pointer to command details structure or NULL
2704 * Add a VSI context to the hardware also add it into the VSI handle list.
2705 * If this function gets called after reset for existing VSIs then update
2706 * with the new HW VSI number in the corresponding VSI handle list entry.
2709 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2710 struct ice_sq_cd *cd)
2712 struct ice_vsi_ctx *tmp_vsi_ctx;
2713 enum ice_status status;
2715 if (vsi_handle >= ICE_MAX_VSI)
2716 return ICE_ERR_PARAM;
2717 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2720 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2722 /* Create a new VSI context */
2723 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2724 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2726 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2727 return ICE_ERR_NO_MEMORY;
2729 *tmp_vsi_ctx = *vsi_ctx;
2731 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2733 /* update with new HW VSI num */
2734 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2741 * ice_free_vsi- free VSI context from hardware and VSI handle list
2742 * @hw: pointer to the HW struct
2743 * @vsi_handle: unique VSI handle
2744 * @vsi_ctx: pointer to a VSI context struct
2745 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2746 * @cd: pointer to command details structure or NULL
2748 * Free VSI context info from hardware as well as from VSI handle list
2751 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2752 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2754 enum ice_status status;
2756 if (!ice_is_vsi_valid(hw, vsi_handle))
2757 return ICE_ERR_PARAM;
2758 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2759 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2761 ice_clear_vsi_ctx(hw, vsi_handle);
2767 * @hw: pointer to the HW struct
2768 * @vsi_handle: unique VSI handle
2769 * @vsi_ctx: pointer to a VSI context struct
2770 * @cd: pointer to command details structure or NULL
2772 * Update VSI context in the hardware
2775 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2776 struct ice_sq_cd *cd)
2778 if (!ice_is_vsi_valid(hw, vsi_handle))
2779 return ICE_ERR_PARAM;
2780 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2781 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2785 * ice_aq_get_vsi_params
2786 * @hw: pointer to the HW struct
2787 * @vsi_ctx: pointer to a VSI context struct
2788 * @cd: pointer to command details structure or NULL
2790 * Get VSI context info from hardware (0x0212)
2793 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2794 struct ice_sq_cd *cd)
2796 struct ice_aqc_add_get_update_free_vsi *cmd;
2797 struct ice_aqc_get_vsi_resp *resp;
2798 struct ice_aq_desc desc;
2799 enum ice_status status;
2801 cmd = &desc.params.vsi_cmd;
2802 resp = &desc.params.get_vsi_resp;
2804 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2806 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2808 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2809 sizeof(vsi_ctx->info), cd);
2811 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2813 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2814 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2821 * ice_aq_add_update_mir_rule - add/update a mirror rule
2822 * @hw: pointer to the HW struct
2823 * @rule_type: Rule Type
2824 * @dest_vsi: VSI number to which packets will be mirrored
2825 * @count: length of the list
2826 * @mr_buf: buffer for list of mirrored VSI numbers
2827 * @cd: pointer to command details structure or NULL
2830 * Add/Update Mirror Rule (0x260).
2833 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2834 u16 count, struct ice_mir_rule_buf *mr_buf,
2835 struct ice_sq_cd *cd, u16 *rule_id)
2837 struct ice_aqc_add_update_mir_rule *cmd;
2838 struct ice_aq_desc desc;
2839 enum ice_status status;
2840 __le16 *mr_list = NULL;
2843 switch (rule_type) {
2844 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2845 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2846 /* Make sure count and mr_buf are set for these rule_types */
2847 if (!(count && mr_buf))
2848 return ICE_ERR_PARAM;
2850 buf_size = count * sizeof(__le16);
2851 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2853 return ICE_ERR_NO_MEMORY;
2855 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2856 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2857 /* Make sure count and mr_buf are not set for these
2860 if (count || mr_buf)
2861 return ICE_ERR_PARAM;
2864 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2865 return ICE_ERR_OUT_OF_RANGE;
2868 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2870 /* Pre-process 'mr_buf' items for add/update of virtual port
2871 * ingress/egress mirroring (but not physical port ingress/egress
2877 for (i = 0; i < count; i++) {
2880 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2882 /* Validate specified VSI number, make sure it is less
2883 * than ICE_MAX_VSI, if not return with error.
2885 if (id >= ICE_MAX_VSI) {
2886 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2888 ice_free(hw, mr_list);
2889 return ICE_ERR_OUT_OF_RANGE;
2892 /* add VSI to mirror rule */
2895 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2896 else /* remove VSI from mirror rule */
2897 mr_list[i] = CPU_TO_LE16(id);
2901 cmd = &desc.params.add_update_rule;
2902 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2903 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2904 ICE_AQC_RULE_ID_VALID_M);
2905 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2906 cmd->num_entries = CPU_TO_LE16(count);
2907 cmd->dest = CPU_TO_LE16(dest_vsi);
2909 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2911 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2913 ice_free(hw, mr_list);
2919 * ice_aq_delete_mir_rule - delete a mirror rule
2920 * @hw: pointer to the HW struct
2921 * @rule_id: Mirror rule ID (to be deleted)
2922 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2923 * otherwise it is returned to the shared pool
2924 * @cd: pointer to command details structure or NULL
2926 * Delete Mirror Rule (0x261).
2929 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2930 struct ice_sq_cd *cd)
2932 struct ice_aqc_delete_mir_rule *cmd;
2933 struct ice_aq_desc desc;
2935 /* rule_id should be in the range 0...63 */
2936 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2937 return ICE_ERR_OUT_OF_RANGE;
2939 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2941 cmd = &desc.params.del_rule;
2942 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2943 cmd->rule_id = CPU_TO_LE16(rule_id);
2946 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2948 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2952 * ice_aq_alloc_free_vsi_list
2953 * @hw: pointer to the HW struct
2954 * @vsi_list_id: VSI list ID returned or used for lookup
2955 * @lkup_type: switch rule filter lookup type
2956 * @opc: switch rules population command type - pass in the command opcode
2958 * allocates or free a VSI list resource
2960 static enum ice_status
2961 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2962 enum ice_sw_lkup_type lkup_type,
2963 enum ice_adminq_opc opc)
2965 struct ice_aqc_alloc_free_res_elem *sw_buf;
2966 struct ice_aqc_res_elem *vsi_ele;
2967 enum ice_status status;
2970 buf_len = ice_struct_size(sw_buf, elem, 1);
2971 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2973 return ICE_ERR_NO_MEMORY;
2974 sw_buf->num_elems = CPU_TO_LE16(1);
2976 if (lkup_type == ICE_SW_LKUP_MAC ||
2977 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2978 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2979 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2980 lkup_type == ICE_SW_LKUP_PROMISC ||
2981 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2982 lkup_type == ICE_SW_LKUP_LAST) {
2983 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2984 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2986 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2988 status = ICE_ERR_PARAM;
2989 goto ice_aq_alloc_free_vsi_list_exit;
2992 if (opc == ice_aqc_opc_free_res)
2993 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2995 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2997 goto ice_aq_alloc_free_vsi_list_exit;
2999 if (opc == ice_aqc_opc_alloc_res) {
3000 vsi_ele = &sw_buf->elem[0];
3001 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3004 ice_aq_alloc_free_vsi_list_exit:
3005 ice_free(hw, sw_buf);
3010 * ice_aq_set_storm_ctrl - Sets storm control configuration
3011 * @hw: pointer to the HW struct
3012 * @bcast_thresh: represents the upper threshold for broadcast storm control
3013 * @mcast_thresh: represents the upper threshold for multicast storm control
3014 * @ctl_bitmask: storm control knobs
3016 * Sets the storm control configuration (0x0280)
3019 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3022 struct ice_aqc_storm_cfg *cmd;
3023 struct ice_aq_desc desc;
3025 cmd = &desc.params.storm_conf;
3027 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3029 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3030 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3031 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3033 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3037 * ice_aq_get_storm_ctrl - gets storm control configuration
3038 * @hw: pointer to the HW struct
3039 * @bcast_thresh: represents the upper threshold for broadcast storm control
3040 * @mcast_thresh: represents the upper threshold for multicast storm control
3041 * @ctl_bitmask: storm control knobs
3043 * Gets the storm control configuration (0x0281)
3046 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3049 enum ice_status status;
3050 struct ice_aq_desc desc;
3052 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3054 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3056 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3059 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3062 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3065 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3072 * ice_aq_sw_rules - add/update/remove switch rules
3073 * @hw: pointer to the HW struct
3074 * @rule_list: pointer to switch rule population list
3075 * @rule_list_sz: total size of the rule list in bytes
3076 * @num_rules: number of switch rules in the rule_list
3077 * @opc: switch rules population command type - pass in the command opcode
3078 * @cd: pointer to command details structure or NULL
3080 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3082 static enum ice_status
3083 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3084 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3086 struct ice_aq_desc desc;
3087 enum ice_status status;
3089 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3091 if (opc != ice_aqc_opc_add_sw_rules &&
3092 opc != ice_aqc_opc_update_sw_rules &&
3093 opc != ice_aqc_opc_remove_sw_rules)
3094 return ICE_ERR_PARAM;
3096 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3098 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3099 desc.params.sw_rules.num_rules_fltr_entry_index =
3100 CPU_TO_LE16(num_rules);
3101 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3102 if (opc != ice_aqc_opc_add_sw_rules &&
3103 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3104 status = ICE_ERR_DOES_NOT_EXIST;
3110 * ice_aq_add_recipe - add switch recipe
3111 * @hw: pointer to the HW struct
3112 * @s_recipe_list: pointer to switch rule population list
3113 * @num_recipes: number of switch recipes in the list
3114 * @cd: pointer to command details structure or NULL
3119 ice_aq_add_recipe(struct ice_hw *hw,
3120 struct ice_aqc_recipe_data_elem *s_recipe_list,
3121 u16 num_recipes, struct ice_sq_cd *cd)
3123 struct ice_aqc_add_get_recipe *cmd;
3124 struct ice_aq_desc desc;
3127 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3128 cmd = &desc.params.add_get_recipe;
3129 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3131 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3132 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3134 buf_size = num_recipes * sizeof(*s_recipe_list);
3136 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3140 * ice_aq_get_recipe - get switch recipe
3141 * @hw: pointer to the HW struct
3142 * @s_recipe_list: pointer to switch rule population list
3143 * @num_recipes: pointer to the number of recipes (input and output)
3144 * @recipe_root: root recipe number of recipe(s) to retrieve
3145 * @cd: pointer to command details structure or NULL
3149 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3150 * On output, *num_recipes will equal the number of entries returned in
3153 * The caller must supply enough space in s_recipe_list to hold all possible
3154 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3157 ice_aq_get_recipe(struct ice_hw *hw,
3158 struct ice_aqc_recipe_data_elem *s_recipe_list,
3159 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3161 struct ice_aqc_add_get_recipe *cmd;
3162 struct ice_aq_desc desc;
3163 enum ice_status status;
3166 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3167 return ICE_ERR_PARAM;
3169 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3170 cmd = &desc.params.add_get_recipe;
3171 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3173 cmd->return_index = CPU_TO_LE16(recipe_root);
3174 cmd->num_sub_recipes = 0;
3176 buf_size = *num_recipes * sizeof(*s_recipe_list);
3178 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3179 /* cppcheck-suppress constArgument */
3180 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3186 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3187 * @hw: pointer to the HW struct
3188 * @params: parameters used to update the default recipe
3190 * This function only supports updating default recipes and it only supports
3191 * updating a single recipe based on the lkup_idx at a time.
3193 * This is done as a read-modify-write operation. First, get the current recipe
3194 * contents based on the recipe's ID. Then modify the field vector index and
3195 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3196 * the pre-existing recipe with the modifications.
3199 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3200 struct ice_update_recipe_lkup_idx_params *params)
3202 struct ice_aqc_recipe_data_elem *rcp_list;
3203 u16 num_recps = ICE_MAX_NUM_RECIPES;
3204 enum ice_status status;
3206 rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3208 return ICE_ERR_NO_MEMORY;
3210 /* read current recipe list from firmware */
3211 rcp_list->recipe_indx = params->rid;
3212 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3214 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3215 params->rid, status);
3219 /* only modify existing recipe's lkup_idx and mask if valid, while
3220 * leaving all other fields the same, then update the recipe firmware
3222 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3223 if (params->mask_valid)
3224 rcp_list->content.mask[params->lkup_idx] =
3225 CPU_TO_LE16(params->mask);
3227 if (params->ignore_valid)
3228 rcp_list->content.lkup_indx[params->lkup_idx] |=
3229 ICE_AQ_RECIPE_LKUP_IGNORE;
3231 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3233 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3234 params->rid, params->lkup_idx, params->fv_idx,
3235 params->mask, params->mask_valid ? "true" : "false",
3239 ice_free(hw, rcp_list);
3244 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3245 * @hw: pointer to the HW struct
3246 * @profile_id: package profile ID to associate the recipe with
3247 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3248 * @cd: pointer to command details structure or NULL
3249 * Recipe to profile association (0x0291)
3252 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3253 struct ice_sq_cd *cd)
3255 struct ice_aqc_recipe_to_profile *cmd;
3256 struct ice_aq_desc desc;
3258 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3259 cmd = &desc.params.recipe_to_profile;
3260 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3261 cmd->profile_id = CPU_TO_LE16(profile_id);
3262 /* Set the recipe ID bit in the bitmask to let the device know which
3263 * profile we are associating the recipe to
3265 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3266 ICE_NONDMA_TO_NONDMA);
3268 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3272 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3273 * @hw: pointer to the HW struct
3274 * @profile_id: package profile ID to associate the recipe with
3275 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3276 * @cd: pointer to command details structure or NULL
3277 * Associate profile ID with given recipe (0x0293)
3280 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3281 struct ice_sq_cd *cd)
3283 struct ice_aqc_recipe_to_profile *cmd;
3284 struct ice_aq_desc desc;
3285 enum ice_status status;
3287 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3288 cmd = &desc.params.recipe_to_profile;
3289 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3290 cmd->profile_id = CPU_TO_LE16(profile_id);
3292 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3294 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3295 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3301 * ice_alloc_recipe - add recipe resource
3302 * @hw: pointer to the hardware structure
3303 * @rid: recipe ID returned as response to AQ call
3305 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3307 struct ice_aqc_alloc_free_res_elem *sw_buf;
3308 enum ice_status status;
3311 buf_len = ice_struct_size(sw_buf, elem, 1);
3312 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3314 return ICE_ERR_NO_MEMORY;
3316 sw_buf->num_elems = CPU_TO_LE16(1);
3317 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3318 ICE_AQC_RES_TYPE_S) |
3319 ICE_AQC_RES_TYPE_FLAG_SHARED);
3320 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3321 ice_aqc_opc_alloc_res, NULL);
3323 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3324 ice_free(hw, sw_buf);
3329 /* ice_init_port_info - Initialize port_info with switch configuration data
3330 * @pi: pointer to port_info
3331 * @vsi_port_num: VSI number or port number
3332 * @type: Type of switch element (port or VSI)
3333 * @swid: switch ID of the switch the element is attached to
3334 * @pf_vf_num: PF or VF number
3335 * @is_vf: true if the element is a VF, false otherwise
3338 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3339 u16 swid, u16 pf_vf_num, bool is_vf)
3342 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3343 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3345 pi->pf_vf_num = pf_vf_num;
3347 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3348 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3351 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3356 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3357 * @hw: pointer to the hardware structure
3359 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3361 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3362 enum ice_status status;
3369 num_total_ports = 1;
3371 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3372 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3375 return ICE_ERR_NO_MEMORY;
3377 /* Multiple calls to ice_aq_get_sw_cfg may be required
3378 * to get all the switch configuration information. The need
3379 * for additional calls is indicated by ice_aq_get_sw_cfg
3380 * writing a non-zero value in req_desc
3383 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3385 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3386 &req_desc, &num_elems, NULL);
3391 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3392 u16 pf_vf_num, swid, vsi_port_num;
3396 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3397 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3399 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3400 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3402 swid = LE16_TO_CPU(ele->swid);
3404 if (LE16_TO_CPU(ele->pf_vf_num) &
3405 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3408 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3409 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3412 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3413 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3414 if (j == num_total_ports) {
3415 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3416 status = ICE_ERR_CFG;
3419 ice_init_port_info(hw->port_info,
3420 vsi_port_num, res_type, swid,
3428 } while (req_desc && !status);
3436 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3437 * @hw: pointer to the hardware structure
3438 * @fi: filter info structure to fill/update
3440 * This helper function populates the lb_en and lan_en elements of the provided
3441 * ice_fltr_info struct using the switch's type and characteristics of the
3442 * switch rule being configured.
3444 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3446 if ((fi->flag & ICE_FLTR_RX) &&
3447 (fi->fltr_act == ICE_FWD_TO_VSI ||
3448 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3449 fi->lkup_type == ICE_SW_LKUP_LAST)
3453 if ((fi->flag & ICE_FLTR_TX) &&
3454 (fi->fltr_act == ICE_FWD_TO_VSI ||
3455 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3456 fi->fltr_act == ICE_FWD_TO_Q ||
3457 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3458 /* Setting LB for prune actions will result in replicated
3459 * packets to the internal switch that will be dropped.
3461 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3464 /* Set lan_en to TRUE if
3465 * 1. The switch is a VEB AND
3467 * 2.1 The lookup is a directional lookup like ethertype,
3468 * promiscuous, ethertype-MAC, promiscuous-VLAN
3469 * and default-port OR
3470 * 2.2 The lookup is VLAN, OR
3471 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3472 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3476 * The switch is a VEPA.
3478 * In all other cases, the LAN enable has to be set to false.
3481 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3482 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3483 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3484 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3485 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3486 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3487 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3488 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3489 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3490 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3499 * ice_fill_sw_rule - Helper function to fill switch rule structure
3500 * @hw: pointer to the hardware structure
3501 * @f_info: entry containing packet forwarding information
3502 * @s_rule: switch rule structure to be filled in based on mac_entry
3503 * @opc: switch rules population command type - pass in the command opcode
3506 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3507 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3509 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3510 u16 vlan_tpid = ICE_ETH_P_8021Q;
3518 if (opc == ice_aqc_opc_remove_sw_rules) {
3519 s_rule->pdata.lkup_tx_rx.act = 0;
3520 s_rule->pdata.lkup_tx_rx.index =
3521 CPU_TO_LE16(f_info->fltr_rule_id);
3522 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3526 eth_hdr_sz = sizeof(dummy_eth_header);
3527 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3529 /* initialize the ether header with a dummy header */
3530 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3531 ice_fill_sw_info(hw, f_info);
3533 switch (f_info->fltr_act) {
3534 case ICE_FWD_TO_VSI:
3535 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3536 ICE_SINGLE_ACT_VSI_ID_M;
3537 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3538 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3539 ICE_SINGLE_ACT_VALID_BIT;
3541 case ICE_FWD_TO_VSI_LIST:
3542 act |= ICE_SINGLE_ACT_VSI_LIST;
3543 act |= (f_info->fwd_id.vsi_list_id <<
3544 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3545 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3546 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3547 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3548 ICE_SINGLE_ACT_VALID_BIT;
3551 act |= ICE_SINGLE_ACT_TO_Q;
3552 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3553 ICE_SINGLE_ACT_Q_INDEX_M;
3555 case ICE_DROP_PACKET:
3556 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3557 ICE_SINGLE_ACT_VALID_BIT;
3559 case ICE_FWD_TO_QGRP:
3560 q_rgn = f_info->qgrp_size > 0 ?
3561 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3562 act |= ICE_SINGLE_ACT_TO_Q;
3563 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3564 ICE_SINGLE_ACT_Q_INDEX_M;
3565 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3566 ICE_SINGLE_ACT_Q_REGION_M;
3573 act |= ICE_SINGLE_ACT_LB_ENABLE;
3575 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3577 switch (f_info->lkup_type) {
3578 case ICE_SW_LKUP_MAC:
3579 daddr = f_info->l_data.mac.mac_addr;
3581 case ICE_SW_LKUP_VLAN:
3582 vlan_id = f_info->l_data.vlan.vlan_id;
3583 if (f_info->l_data.vlan.tpid_valid)
3584 vlan_tpid = f_info->l_data.vlan.tpid;
3585 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3586 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3587 act |= ICE_SINGLE_ACT_PRUNE;
3588 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3591 case ICE_SW_LKUP_ETHERTYPE_MAC:
3592 daddr = f_info->l_data.ethertype_mac.mac_addr;
3594 case ICE_SW_LKUP_ETHERTYPE:
3595 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3596 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3598 case ICE_SW_LKUP_MAC_VLAN:
3599 daddr = f_info->l_data.mac_vlan.mac_addr;
3600 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3602 case ICE_SW_LKUP_PROMISC_VLAN:
3603 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3605 case ICE_SW_LKUP_PROMISC:
3606 daddr = f_info->l_data.mac_vlan.mac_addr;
3612 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3613 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3614 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3616 /* Recipe set depending on lookup type */
3617 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3618 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3619 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3622 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3623 ICE_NONDMA_TO_NONDMA);
3625 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3626 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3627 *off = CPU_TO_BE16(vlan_id);
3628 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3629 *off = CPU_TO_BE16(vlan_tpid);
3632 /* Create the switch rule with the final dummy Ethernet header */
3633 if (opc != ice_aqc_opc_update_sw_rules)
3634 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3638 * ice_add_marker_act
3639 * @hw: pointer to the hardware structure
3640 * @m_ent: the management entry for which sw marker needs to be added
3641 * @sw_marker: sw marker to tag the Rx descriptor with
3642 * @l_id: large action resource ID
3644 * Create a large action to hold software marker and update the switch rule
3645 * entry pointed by m_ent with newly created large action
3647 static enum ice_status
3648 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3649 u16 sw_marker, u16 l_id)
3651 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3652 /* For software marker we need 3 large actions
3653 * 1. FWD action: FWD TO VSI or VSI LIST
3654 * 2. GENERIC VALUE action to hold the profile ID
3655 * 3. GENERIC VALUE action to hold the software marker ID
3657 const u16 num_lg_acts = 3;
3658 enum ice_status status;
3664 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3665 return ICE_ERR_PARAM;
3667 /* Create two back-to-back switch rules and submit them to the HW using
3668 * one memory buffer:
3672 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3673 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3674 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3676 return ICE_ERR_NO_MEMORY;
3678 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3680 /* Fill in the first switch rule i.e. large action */
3681 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3682 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3683 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3685 /* First action VSI forwarding or VSI list forwarding depending on how
3688 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3689 m_ent->fltr_info.fwd_id.hw_vsi_id;
3691 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3692 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3693 if (m_ent->vsi_count > 1)
3694 act |= ICE_LG_ACT_VSI_LIST;
3695 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3697 /* Second action descriptor type */
3698 act = ICE_LG_ACT_GENERIC;
3700 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3701 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3703 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3704 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3706 /* Third action Marker value */
3707 act |= ICE_LG_ACT_GENERIC;
3708 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3709 ICE_LG_ACT_GENERIC_VALUE_M;
3711 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3713 /* call the fill switch rule to fill the lookup Tx Rx structure */
3714 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3715 ice_aqc_opc_update_sw_rules);
3717 /* Update the action to point to the large action ID */
3718 rx_tx->pdata.lkup_tx_rx.act =
3719 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3720 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3721 ICE_SINGLE_ACT_PTR_VAL_M));
3723 /* Use the filter rule ID of the previously created rule with single
3724 * act. Once the update happens, hardware will treat this as large
3727 rx_tx->pdata.lkup_tx_rx.index =
3728 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3730 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3731 ice_aqc_opc_update_sw_rules, NULL);
3733 m_ent->lg_act_idx = l_id;
3734 m_ent->sw_marker_id = sw_marker;
3737 ice_free(hw, lg_act);
3742 * ice_add_counter_act - add/update filter rule with counter action
3743 * @hw: pointer to the hardware structure
3744 * @m_ent: the management entry for which counter needs to be added
3745 * @counter_id: VLAN counter ID returned as part of allocate resource
3746 * @l_id: large action resource ID
3748 static enum ice_status
3749 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3750 u16 counter_id, u16 l_id)
3752 struct ice_aqc_sw_rules_elem *lg_act;
3753 struct ice_aqc_sw_rules_elem *rx_tx;
3754 enum ice_status status;
3755 /* 2 actions will be added while adding a large action counter */
3756 const int num_acts = 2;
3763 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3764 return ICE_ERR_PARAM;
3766 /* Create two back-to-back switch rules and submit them to the HW using
3767 * one memory buffer:
3771 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3772 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3773 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3775 return ICE_ERR_NO_MEMORY;
3777 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3779 /* Fill in the first switch rule i.e. large action */
3780 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3781 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3782 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3784 /* First action VSI forwarding or VSI list forwarding depending on how
3787 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3788 m_ent->fltr_info.fwd_id.hw_vsi_id;
3790 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3791 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3792 ICE_LG_ACT_VSI_LIST_ID_M;
3793 if (m_ent->vsi_count > 1)
3794 act |= ICE_LG_ACT_VSI_LIST;
3795 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3797 /* Second action counter ID */
3798 act = ICE_LG_ACT_STAT_COUNT;
3799 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3800 ICE_LG_ACT_STAT_COUNT_M;
3801 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3803 /* call the fill switch rule to fill the lookup Tx Rx structure */
3804 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3805 ice_aqc_opc_update_sw_rules);
3807 act = ICE_SINGLE_ACT_PTR;
3808 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3809 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3811 /* Use the filter rule ID of the previously created rule with single
3812 * act. Once the update happens, hardware will treat this as large
3815 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3816 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3818 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3819 ice_aqc_opc_update_sw_rules, NULL);
3821 m_ent->lg_act_idx = l_id;
3822 m_ent->counter_index = counter_id;
3825 ice_free(hw, lg_act);
3830 * ice_create_vsi_list_map
3831 * @hw: pointer to the hardware structure
3832 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3833 * @num_vsi: number of VSI handles in the array
3834 * @vsi_list_id: VSI list ID generated as part of allocate resource
3836 * Helper function to create a new entry of VSI list ID to VSI mapping
3837 * using the given VSI list ID
3839 static struct ice_vsi_list_map_info *
3840 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3843 struct ice_switch_info *sw = hw->switch_info;
3844 struct ice_vsi_list_map_info *v_map;
3847 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3851 v_map->vsi_list_id = vsi_list_id;
3853 for (i = 0; i < num_vsi; i++)
3854 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3856 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3861 * ice_update_vsi_list_rule
3862 * @hw: pointer to the hardware structure
3863 * @vsi_handle_arr: array of VSI handles to form a VSI list
3864 * @num_vsi: number of VSI handles in the array
3865 * @vsi_list_id: VSI list ID generated as part of allocate resource
3866 * @remove: Boolean value to indicate if this is a remove action
3867 * @opc: switch rules population command type - pass in the command opcode
3868 * @lkup_type: lookup type of the filter
3870 * Call AQ command to add a new switch rule or update existing switch rule
3871 * using the given VSI list ID
3873 static enum ice_status
3874 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3875 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3876 enum ice_sw_lkup_type lkup_type)
3878 struct ice_aqc_sw_rules_elem *s_rule;
3879 enum ice_status status;
3885 return ICE_ERR_PARAM;
3887 if (lkup_type == ICE_SW_LKUP_MAC ||
3888 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3889 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3890 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3891 lkup_type == ICE_SW_LKUP_PROMISC ||
3892 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3893 lkup_type == ICE_SW_LKUP_LAST)
3894 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3895 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3896 else if (lkup_type == ICE_SW_LKUP_VLAN)
3897 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3898 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3900 return ICE_ERR_PARAM;
3902 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3903 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3905 return ICE_ERR_NO_MEMORY;
3906 for (i = 0; i < num_vsi; i++) {
3907 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3908 status = ICE_ERR_PARAM;
3911 /* AQ call requires hw_vsi_id(s) */
3912 s_rule->pdata.vsi_list.vsi[i] =
3913 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3916 s_rule->type = CPU_TO_LE16(rule_type);
3917 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3918 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3920 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3923 ice_free(hw, s_rule);
3928 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3929 * @hw: pointer to the HW struct
3930 * @vsi_handle_arr: array of VSI handles to form a VSI list
3931 * @num_vsi: number of VSI handles in the array
3932 * @vsi_list_id: stores the ID of the VSI list to be created
3933 * @lkup_type: switch rule filter's lookup type
3935 static enum ice_status
3936 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3937 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3939 enum ice_status status;
3941 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3942 ice_aqc_opc_alloc_res);
3946 /* Update the newly created VSI list to include the specified VSIs */
3947 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3948 *vsi_list_id, false,
3949 ice_aqc_opc_add_sw_rules, lkup_type);
3953 * ice_create_pkt_fwd_rule
3954 * @hw: pointer to the hardware structure
3955 * @recp_list: corresponding filter management list
3956 * @f_entry: entry containing packet forwarding information
3958 * Create switch rule with given filter information and add an entry
3959 * to the corresponding filter management list to track this switch rule
3962 static enum ice_status
3963 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3964 struct ice_fltr_list_entry *f_entry)
3966 struct ice_fltr_mgmt_list_entry *fm_entry;
3967 struct ice_aqc_sw_rules_elem *s_rule;
3968 enum ice_status status;
3970 s_rule = (struct ice_aqc_sw_rules_elem *)
3971 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3973 return ICE_ERR_NO_MEMORY;
3974 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3975 ice_malloc(hw, sizeof(*fm_entry));
3977 status = ICE_ERR_NO_MEMORY;
3978 goto ice_create_pkt_fwd_rule_exit;
3981 fm_entry->fltr_info = f_entry->fltr_info;
3983 /* Initialize all the fields for the management entry */
3984 fm_entry->vsi_count = 1;
3985 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3986 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3987 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3989 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3990 ice_aqc_opc_add_sw_rules);
3992 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3993 ice_aqc_opc_add_sw_rules, NULL);
3995 ice_free(hw, fm_entry);
3996 goto ice_create_pkt_fwd_rule_exit;
3999 f_entry->fltr_info.fltr_rule_id =
4000 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4001 fm_entry->fltr_info.fltr_rule_id =
4002 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4004 /* The book keeping entries will get removed when base driver
4005 * calls remove filter AQ command
4007 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4009 ice_create_pkt_fwd_rule_exit:
4010 ice_free(hw, s_rule);
4015 * ice_update_pkt_fwd_rule
4016 * @hw: pointer to the hardware structure
4017 * @f_info: filter information for switch rule
4019 * Call AQ command to update a previously created switch rule with a
4022 static enum ice_status
4023 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4025 struct ice_aqc_sw_rules_elem *s_rule;
4026 enum ice_status status;
4028 s_rule = (struct ice_aqc_sw_rules_elem *)
4029 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4031 return ICE_ERR_NO_MEMORY;
4033 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4035 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4037 /* Update switch rule with new rule set to forward VSI list */
4038 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4039 ice_aqc_opc_update_sw_rules, NULL);
4041 ice_free(hw, s_rule);
4046 * ice_update_sw_rule_bridge_mode
4047 * @hw: pointer to the HW struct
4049 * Updates unicast switch filter rules based on VEB/VEPA mode
4051 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4053 struct ice_switch_info *sw = hw->switch_info;
4054 struct ice_fltr_mgmt_list_entry *fm_entry;
4055 enum ice_status status = ICE_SUCCESS;
4056 struct LIST_HEAD_TYPE *rule_head;
4057 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4059 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4060 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4062 ice_acquire_lock(rule_lock);
4063 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4065 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4066 u8 *addr = fi->l_data.mac.mac_addr;
4068 /* Update unicast Tx rules to reflect the selected
4071 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4072 (fi->fltr_act == ICE_FWD_TO_VSI ||
4073 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4074 fi->fltr_act == ICE_FWD_TO_Q ||
4075 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4076 status = ice_update_pkt_fwd_rule(hw, fi);
4082 ice_release_lock(rule_lock);
4088 * ice_add_update_vsi_list
4089 * @hw: pointer to the hardware structure
4090 * @m_entry: pointer to current filter management list entry
4091 * @cur_fltr: filter information from the book keeping entry
4092 * @new_fltr: filter information with the new VSI to be added
4094 * Call AQ command to add or update previously created VSI list with new VSI.
4096 * Helper function to do book keeping associated with adding filter information
4097 * The algorithm to do the book keeping is described below :
4098 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4099 * if only one VSI has been added till now
4100 * Allocate a new VSI list and add two VSIs
4101 * to this list using switch rule command
4102 * Update the previously created switch rule with the
4103 * newly created VSI list ID
4104 * if a VSI list was previously created
4105 * Add the new VSI to the previously created VSI list set
4106 * using the update switch rule command
4108 static enum ice_status
4109 ice_add_update_vsi_list(struct ice_hw *hw,
4110 struct ice_fltr_mgmt_list_entry *m_entry,
4111 struct ice_fltr_info *cur_fltr,
4112 struct ice_fltr_info *new_fltr)
4114 enum ice_status status = ICE_SUCCESS;
4115 u16 vsi_list_id = 0;
4117 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4118 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4119 return ICE_ERR_NOT_IMPL;
4121 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4122 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4123 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4124 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4125 return ICE_ERR_NOT_IMPL;
4127 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4128 /* Only one entry existed in the mapping and it was not already
4129 * a part of a VSI list. So, create a VSI list with the old and
4132 struct ice_fltr_info tmp_fltr;
4133 u16 vsi_handle_arr[2];
4135 /* A rule already exists with the new VSI being added */
4136 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4137 return ICE_ERR_ALREADY_EXISTS;
4139 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4140 vsi_handle_arr[1] = new_fltr->vsi_handle;
4141 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4143 new_fltr->lkup_type);
4147 tmp_fltr = *new_fltr;
4148 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4149 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4150 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4151 /* Update the previous switch rule of "MAC forward to VSI" to
4152 * "MAC fwd to VSI list"
4154 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4158 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4159 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4160 m_entry->vsi_list_info =
4161 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4164 if (!m_entry->vsi_list_info)
4165 return ICE_ERR_NO_MEMORY;
4167 /* If this entry was large action then the large action needs
4168 * to be updated to point to FWD to VSI list
4170 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4172 ice_add_marker_act(hw, m_entry,
4173 m_entry->sw_marker_id,
4174 m_entry->lg_act_idx);
4176 u16 vsi_handle = new_fltr->vsi_handle;
4177 enum ice_adminq_opc opcode;
4179 if (!m_entry->vsi_list_info)
4182 /* A rule already exists with the new VSI being added */
4183 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4186 /* Update the previously created VSI list set with
4187 * the new VSI ID passed in
4189 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4190 opcode = ice_aqc_opc_update_sw_rules;
4192 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4193 vsi_list_id, false, opcode,
4194 new_fltr->lkup_type);
4195 /* update VSI list mapping info with new VSI ID */
4197 ice_set_bit(vsi_handle,
4198 m_entry->vsi_list_info->vsi_map);
4201 m_entry->vsi_count++;
4206 * ice_find_rule_entry - Search a rule entry
4207 * @list_head: head of rule list
4208 * @f_info: rule information
4210 * Helper function to search for a given rule entry
4211 * Returns pointer to entry storing the rule if found
4213 static struct ice_fltr_mgmt_list_entry *
4214 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4215 struct ice_fltr_info *f_info)
4217 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4219 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4221 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4222 sizeof(f_info->l_data)) &&
4223 f_info->flag == list_itr->fltr_info.flag) {
4232 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4233 * @recp_list: VSI lists needs to be searched
4234 * @vsi_handle: VSI handle to be found in VSI list
4235 * @vsi_list_id: VSI list ID found containing vsi_handle
4237 * Helper function to search a VSI list with single entry containing given VSI
4238 * handle element. This can be extended further to search VSI list with more
4239 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4241 static struct ice_vsi_list_map_info *
4242 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4245 struct ice_vsi_list_map_info *map_info = NULL;
4246 struct LIST_HEAD_TYPE *list_head;
4248 list_head = &recp_list->filt_rules;
4249 if (recp_list->adv_rule) {
4250 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4252 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4253 ice_adv_fltr_mgmt_list_entry,
4255 if (list_itr->vsi_list_info) {
4256 map_info = list_itr->vsi_list_info;
4257 if (ice_is_bit_set(map_info->vsi_map,
4259 *vsi_list_id = map_info->vsi_list_id;
4265 struct ice_fltr_mgmt_list_entry *list_itr;
4267 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4268 ice_fltr_mgmt_list_entry,
4270 if (list_itr->vsi_count == 1 &&
4271 list_itr->vsi_list_info) {
4272 map_info = list_itr->vsi_list_info;
4273 if (ice_is_bit_set(map_info->vsi_map,
4275 *vsi_list_id = map_info->vsi_list_id;
4285 * ice_add_rule_internal - add rule for a given lookup type
4286 * @hw: pointer to the hardware structure
4287 * @recp_list: recipe list for which rule has to be added
4288 * @lport: logic port number on which function add rule
4289 * @f_entry: structure containing MAC forwarding information
4291 * Adds or updates the rule lists for a given recipe
4293 static enum ice_status
4294 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4295 u8 lport, struct ice_fltr_list_entry *f_entry)
4297 struct ice_fltr_info *new_fltr, *cur_fltr;
4298 struct ice_fltr_mgmt_list_entry *m_entry;
4299 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4300 enum ice_status status = ICE_SUCCESS;
4302 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4303 return ICE_ERR_PARAM;
4305 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4306 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4307 f_entry->fltr_info.fwd_id.hw_vsi_id =
4308 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4310 rule_lock = &recp_list->filt_rule_lock;
4312 ice_acquire_lock(rule_lock);
4313 new_fltr = &f_entry->fltr_info;
4314 if (new_fltr->flag & ICE_FLTR_RX)
4315 new_fltr->src = lport;
4316 else if (new_fltr->flag & ICE_FLTR_TX)
4318 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4320 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4322 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4323 goto exit_add_rule_internal;
4326 cur_fltr = &m_entry->fltr_info;
4327 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4329 exit_add_rule_internal:
4330 ice_release_lock(rule_lock);
4335 * ice_remove_vsi_list_rule
4336 * @hw: pointer to the hardware structure
4337 * @vsi_list_id: VSI list ID generated as part of allocate resource
4338 * @lkup_type: switch rule filter lookup type
4340 * The VSI list should be emptied before this function is called to remove the
4343 static enum ice_status
4344 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4345 enum ice_sw_lkup_type lkup_type)
4347 /* Free the vsi_list resource that we allocated. It is assumed that the
4348 * list is empty at this point.
4350 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4351 ice_aqc_opc_free_res);
4355 * ice_rem_update_vsi_list
4356 * @hw: pointer to the hardware structure
4357 * @vsi_handle: VSI handle of the VSI to remove
4358 * @fm_list: filter management entry for which the VSI list management needs to
4361 static enum ice_status
4362 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4363 struct ice_fltr_mgmt_list_entry *fm_list)
4365 enum ice_sw_lkup_type lkup_type;
4366 enum ice_status status = ICE_SUCCESS;
4369 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4370 fm_list->vsi_count == 0)
4371 return ICE_ERR_PARAM;
4373 /* A rule with the VSI being removed does not exist */
4374 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4375 return ICE_ERR_DOES_NOT_EXIST;
4377 lkup_type = fm_list->fltr_info.lkup_type;
4378 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4379 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4380 ice_aqc_opc_update_sw_rules,
4385 fm_list->vsi_count--;
4386 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4388 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4389 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4390 struct ice_vsi_list_map_info *vsi_list_info =
4391 fm_list->vsi_list_info;
4394 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4396 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4397 return ICE_ERR_OUT_OF_RANGE;
4399 /* Make sure VSI list is empty before removing it below */
4400 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4402 ice_aqc_opc_update_sw_rules,
4407 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4408 tmp_fltr_info.fwd_id.hw_vsi_id =
4409 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4410 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4411 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4413 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4414 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4418 fm_list->fltr_info = tmp_fltr_info;
4421 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4422 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4423 struct ice_vsi_list_map_info *vsi_list_info =
4424 fm_list->vsi_list_info;
4426 /* Remove the VSI list since it is no longer used */
4427 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4429 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4430 vsi_list_id, status);
4434 LIST_DEL(&vsi_list_info->list_entry);
4435 ice_free(hw, vsi_list_info);
4436 fm_list->vsi_list_info = NULL;
4443 * ice_remove_rule_internal - Remove a filter rule of a given type
4445 * @hw: pointer to the hardware structure
4446 * @recp_list: recipe list for which the rule needs to removed
4447 * @f_entry: rule entry containing filter information
4449 static enum ice_status
4450 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4451 struct ice_fltr_list_entry *f_entry)
4453 struct ice_fltr_mgmt_list_entry *list_elem;
4454 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4455 enum ice_status status = ICE_SUCCESS;
4456 bool remove_rule = false;
4459 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4460 return ICE_ERR_PARAM;
4461 f_entry->fltr_info.fwd_id.hw_vsi_id =
4462 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4464 rule_lock = &recp_list->filt_rule_lock;
4465 ice_acquire_lock(rule_lock);
4466 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4467 &f_entry->fltr_info);
4469 status = ICE_ERR_DOES_NOT_EXIST;
4473 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4475 } else if (!list_elem->vsi_list_info) {
4476 status = ICE_ERR_DOES_NOT_EXIST;
4478 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4479 /* a ref_cnt > 1 indicates that the vsi_list is being
4480 * shared by multiple rules. Decrement the ref_cnt and
4481 * remove this rule, but do not modify the list, as it
4482 * is in-use by other rules.
4484 list_elem->vsi_list_info->ref_cnt--;
4487 /* a ref_cnt of 1 indicates the vsi_list is only used
4488 * by one rule. However, the original removal request is only
4489 * for a single VSI. Update the vsi_list first, and only
4490 * remove the rule if there are no further VSIs in this list.
4492 vsi_handle = f_entry->fltr_info.vsi_handle;
4493 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4496 /* if VSI count goes to zero after updating the VSI list */
4497 if (list_elem->vsi_count == 0)
4502 /* Remove the lookup rule */
4503 struct ice_aqc_sw_rules_elem *s_rule;
4505 s_rule = (struct ice_aqc_sw_rules_elem *)
4506 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4508 status = ICE_ERR_NO_MEMORY;
4512 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4513 ice_aqc_opc_remove_sw_rules);
4515 status = ice_aq_sw_rules(hw, s_rule,
4516 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4517 ice_aqc_opc_remove_sw_rules, NULL);
4519 /* Remove a book keeping from the list */
4520 ice_free(hw, s_rule);
4525 LIST_DEL(&list_elem->list_entry);
4526 ice_free(hw, list_elem);
4529 ice_release_lock(rule_lock);
4534 * ice_aq_get_res_alloc - get allocated resources
4535 * @hw: pointer to the HW struct
4536 * @num_entries: pointer to u16 to store the number of resource entries returned
4537 * @buf: pointer to buffer
4538 * @buf_size: size of buf
4539 * @cd: pointer to command details structure or NULL
4541 * The caller-supplied buffer must be large enough to store the resource
4542 * information for all resource types. Each resource type is an
4543 * ice_aqc_get_res_resp_elem structure.
4546 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4547 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4548 struct ice_sq_cd *cd)
4550 struct ice_aqc_get_res_alloc *resp;
4551 enum ice_status status;
4552 struct ice_aq_desc desc;
4555 return ICE_ERR_BAD_PTR;
4557 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4558 return ICE_ERR_INVAL_SIZE;
4560 resp = &desc.params.get_res;
4562 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4563 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4565 if (!status && num_entries)
4566 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4572 * ice_aq_get_res_descs - get allocated resource descriptors
4573 * @hw: pointer to the hardware structure
4574 * @num_entries: number of resource entries in buffer
4575 * @buf: structure to hold response data buffer
4576 * @buf_size: size of buffer
4577 * @res_type: resource type
4578 * @res_shared: is resource shared
4579 * @desc_id: input - first desc ID to start; output - next desc ID
4580 * @cd: pointer to command details structure or NULL
4583 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4584 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4585 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4587 struct ice_aqc_get_allocd_res_desc *cmd;
4588 struct ice_aq_desc desc;
4589 enum ice_status status;
4591 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4593 cmd = &desc.params.get_res_desc;
4596 return ICE_ERR_PARAM;
4598 if (buf_size != (num_entries * sizeof(*buf)))
4599 return ICE_ERR_PARAM;
4601 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4603 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4604 ICE_AQC_RES_TYPE_M) | (res_shared ?
4605 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4606 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4608 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4610 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4616 * ice_add_mac_rule - Add a MAC address based filter rule
4617 * @hw: pointer to the hardware structure
4618 * @m_list: list of MAC addresses and forwarding information
4619 * @sw: pointer to switch info struct for which function add rule
4620 * @lport: logic port number on which function add rule
4622 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4623 * multiple unicast addresses, the function assumes that all the
4624 * addresses are unique in a given add_mac call. It doesn't
4625 * check for duplicates in this case, removing duplicates from a given
4626 * list should be taken care of in the caller of this function.
4628 static enum ice_status
4629 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4630 struct ice_switch_info *sw, u8 lport)
4632 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4633 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4634 struct ice_fltr_list_entry *m_list_itr;
4635 struct LIST_HEAD_TYPE *rule_head;
4636 u16 total_elem_left, s_rule_size;
4637 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4638 enum ice_status status = ICE_SUCCESS;
4639 u16 num_unicast = 0;
4643 rule_lock = &recp_list->filt_rule_lock;
4644 rule_head = &recp_list->filt_rules;
4646 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4648 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4652 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4653 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4654 if (!ice_is_vsi_valid(hw, vsi_handle))
4655 return ICE_ERR_PARAM;
4656 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4657 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4658 /* update the src in case it is VSI num */
4659 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4660 return ICE_ERR_PARAM;
4661 m_list_itr->fltr_info.src = hw_vsi_id;
4662 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4663 IS_ZERO_ETHER_ADDR(add))
4664 return ICE_ERR_PARAM;
4665 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4666 /* Don't overwrite the unicast address */
4667 ice_acquire_lock(rule_lock);
4668 if (ice_find_rule_entry(rule_head,
4669 &m_list_itr->fltr_info)) {
4670 ice_release_lock(rule_lock);
4671 return ICE_ERR_ALREADY_EXISTS;
4673 ice_release_lock(rule_lock);
4675 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4676 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4677 m_list_itr->status =
4678 ice_add_rule_internal(hw, recp_list, lport,
4680 if (m_list_itr->status)
4681 return m_list_itr->status;
4685 ice_acquire_lock(rule_lock);
4686 /* Exit if no suitable entries were found for adding bulk switch rule */
4688 status = ICE_SUCCESS;
4689 goto ice_add_mac_exit;
4692 /* Allocate switch rule buffer for the bulk update for unicast */
4693 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4694 s_rule = (struct ice_aqc_sw_rules_elem *)
4695 ice_calloc(hw, num_unicast, s_rule_size);
4697 status = ICE_ERR_NO_MEMORY;
4698 goto ice_add_mac_exit;
4702 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4704 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4705 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4707 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4708 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4709 ice_aqc_opc_add_sw_rules);
4710 r_iter = (struct ice_aqc_sw_rules_elem *)
4711 ((u8 *)r_iter + s_rule_size);
4715 /* Call AQ bulk switch rule update for all unicast addresses */
4717 /* Call AQ switch rule in AQ_MAX chunk */
4718 for (total_elem_left = num_unicast; total_elem_left > 0;
4719 total_elem_left -= elem_sent) {
4720 struct ice_aqc_sw_rules_elem *entry = r_iter;
4722 elem_sent = MIN_T(u8, total_elem_left,
4723 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4724 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4725 elem_sent, ice_aqc_opc_add_sw_rules,
4728 goto ice_add_mac_exit;
4729 r_iter = (struct ice_aqc_sw_rules_elem *)
4730 ((u8 *)r_iter + (elem_sent * s_rule_size));
4733 /* Fill up rule ID based on the value returned from FW */
4735 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4737 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4738 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4739 struct ice_fltr_mgmt_list_entry *fm_entry;
4741 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4742 f_info->fltr_rule_id =
4743 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4744 f_info->fltr_act = ICE_FWD_TO_VSI;
4745 /* Create an entry to track this MAC address */
4746 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4747 ice_malloc(hw, sizeof(*fm_entry));
4749 status = ICE_ERR_NO_MEMORY;
4750 goto ice_add_mac_exit;
4752 fm_entry->fltr_info = *f_info;
4753 fm_entry->vsi_count = 1;
4754 /* The book keeping entries will get removed when
4755 * base driver calls remove filter AQ command
4758 LIST_ADD(&fm_entry->list_entry, rule_head);
4759 r_iter = (struct ice_aqc_sw_rules_elem *)
4760 ((u8 *)r_iter + s_rule_size);
4765 ice_release_lock(rule_lock);
4767 ice_free(hw, s_rule);
4772 * ice_add_mac - Add a MAC address based filter rule
4773 * @hw: pointer to the hardware structure
4774 * @m_list: list of MAC addresses and forwarding information
4776 * Function add MAC rule for logical port from HW struct
4778 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4781 return ICE_ERR_PARAM;
4783 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4784 hw->port_info->lport);
4788 * ice_add_vlan_internal - Add one VLAN based filter rule
4789 * @hw: pointer to the hardware structure
4790 * @recp_list: recipe list for which rule has to be added
4791 * @f_entry: filter entry containing one VLAN information
4793 static enum ice_status
4794 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4795 struct ice_fltr_list_entry *f_entry)
4797 struct ice_fltr_mgmt_list_entry *v_list_itr;
4798 struct ice_fltr_info *new_fltr, *cur_fltr;
4799 enum ice_sw_lkup_type lkup_type;
4800 u16 vsi_list_id = 0, vsi_handle;
4801 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4802 enum ice_status status = ICE_SUCCESS;
4804 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4805 return ICE_ERR_PARAM;
4807 f_entry->fltr_info.fwd_id.hw_vsi_id =
4808 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4809 new_fltr = &f_entry->fltr_info;
4811 /* VLAN ID should only be 12 bits */
4812 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4813 return ICE_ERR_PARAM;
4815 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4816 return ICE_ERR_PARAM;
4818 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4819 lkup_type = new_fltr->lkup_type;
4820 vsi_handle = new_fltr->vsi_handle;
4821 rule_lock = &recp_list->filt_rule_lock;
4822 ice_acquire_lock(rule_lock);
4823 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4825 struct ice_vsi_list_map_info *map_info = NULL;
4827 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4828 /* All VLAN pruning rules use a VSI list. Check if
4829 * there is already a VSI list containing VSI that we
4830 * want to add. If found, use the same vsi_list_id for
4831 * this new VLAN rule or else create a new list.
4833 map_info = ice_find_vsi_list_entry(recp_list,
4837 status = ice_create_vsi_list_rule(hw,
4845 /* Convert the action to forwarding to a VSI list. */
4846 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4847 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4850 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4852 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4855 status = ICE_ERR_DOES_NOT_EXIST;
4858 /* reuse VSI list for new rule and increment ref_cnt */
4860 v_list_itr->vsi_list_info = map_info;
4861 map_info->ref_cnt++;
4863 v_list_itr->vsi_list_info =
4864 ice_create_vsi_list_map(hw, &vsi_handle,
4868 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4869 /* Update existing VSI list to add new VSI ID only if it used
4872 cur_fltr = &v_list_itr->fltr_info;
4873 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4876 /* If VLAN rule exists and VSI list being used by this rule is
4877 * referenced by more than 1 VLAN rule. Then create a new VSI
4878 * list appending previous VSI with new VSI and update existing
4879 * VLAN rule to point to new VSI list ID
4881 struct ice_fltr_info tmp_fltr;
4882 u16 vsi_handle_arr[2];
4885 /* Current implementation only supports reusing VSI list with
4886 * one VSI count. We should never hit below condition
4888 if (v_list_itr->vsi_count > 1 &&
4889 v_list_itr->vsi_list_info->ref_cnt > 1) {
4890 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4891 status = ICE_ERR_CFG;
4896 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4899 /* A rule already exists with the new VSI being added */
4900 if (cur_handle == vsi_handle) {
4901 status = ICE_ERR_ALREADY_EXISTS;
4905 vsi_handle_arr[0] = cur_handle;
4906 vsi_handle_arr[1] = vsi_handle;
4907 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4908 &vsi_list_id, lkup_type);
4912 tmp_fltr = v_list_itr->fltr_info;
4913 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4914 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4915 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4916 /* Update the previous switch rule to a new VSI list which
4917 * includes current VSI that is requested
4919 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4923 /* before overriding VSI list map info. decrement ref_cnt of
4926 v_list_itr->vsi_list_info->ref_cnt--;
4928 /* now update to newly created list */
4929 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4930 v_list_itr->vsi_list_info =
4931 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4933 v_list_itr->vsi_count++;
4937 ice_release_lock(rule_lock);
4942 * ice_add_vlan_rule - Add VLAN based filter rule
4943 * @hw: pointer to the hardware structure
4944 * @v_list: list of VLAN entries and forwarding information
4945 * @sw: pointer to switch info struct for which function add rule
4947 static enum ice_status
4948 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4949 struct ice_switch_info *sw)
4951 struct ice_fltr_list_entry *v_list_itr;
4952 struct ice_sw_recipe *recp_list;
4954 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4955 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4957 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4958 return ICE_ERR_PARAM;
4959 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4960 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4962 if (v_list_itr->status)
4963 return v_list_itr->status;
4969 * ice_add_vlan - Add a VLAN based filter rule
4970 * @hw: pointer to the hardware structure
4971 * @v_list: list of VLAN and forwarding information
4973 * Function add VLAN rule for logical port from HW struct
4975 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4978 return ICE_ERR_PARAM;
4980 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4984 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4985 * @hw: pointer to the hardware structure
4986 * @mv_list: list of MAC and VLAN filters
4987 * @sw: pointer to switch info struct for which function add rule
4988 * @lport: logic port number on which function add rule
4990 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4991 * pruning bits enabled, then it is the responsibility of the caller to make
4992 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4993 * VLAN won't be received on that VSI otherwise.
4995 static enum ice_status
4996 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4997 struct ice_switch_info *sw, u8 lport)
4999 struct ice_fltr_list_entry *mv_list_itr;
5000 struct ice_sw_recipe *recp_list;
5002 if (!mv_list || !hw)
5003 return ICE_ERR_PARAM;
5005 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5006 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5008 enum ice_sw_lkup_type l_type =
5009 mv_list_itr->fltr_info.lkup_type;
5011 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5012 return ICE_ERR_PARAM;
5013 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5014 mv_list_itr->status =
5015 ice_add_rule_internal(hw, recp_list, lport,
5017 if (mv_list_itr->status)
5018 return mv_list_itr->status;
5024 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5025 * @hw: pointer to the hardware structure
5026 * @mv_list: list of MAC VLAN addresses and forwarding information
5028 * Function add MAC VLAN rule for logical port from HW struct
5031 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5033 if (!mv_list || !hw)
5034 return ICE_ERR_PARAM;
5036 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5037 hw->port_info->lport);
5041 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5042 * @hw: pointer to the hardware structure
5043 * @em_list: list of ether type MAC filter, MAC is optional
5044 * @sw: pointer to switch info struct for which function add rule
5045 * @lport: logic port number on which function add rule
5047 * This function requires the caller to populate the entries in
5048 * the filter list with the necessary fields (including flags to
5049 * indicate Tx or Rx rules).
5051 static enum ice_status
5052 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5053 struct ice_switch_info *sw, u8 lport)
5055 struct ice_fltr_list_entry *em_list_itr;
5057 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5059 struct ice_sw_recipe *recp_list;
5060 enum ice_sw_lkup_type l_type;
5062 l_type = em_list_itr->fltr_info.lkup_type;
5063 recp_list = &sw->recp_list[l_type];
5065 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5066 l_type != ICE_SW_LKUP_ETHERTYPE)
5067 return ICE_ERR_PARAM;
5069 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5072 if (em_list_itr->status)
5073 return em_list_itr->status;
5079 * ice_add_eth_mac - Add a ethertype based filter rule
5080 * @hw: pointer to the hardware structure
5081 * @em_list: list of ethertype and forwarding information
5083 * Function add ethertype rule for logical port from HW struct
5086 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5088 if (!em_list || !hw)
5089 return ICE_ERR_PARAM;
5091 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5092 hw->port_info->lport);
5096 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5097 * @hw: pointer to the hardware structure
5098 * @em_list: list of ethertype or ethertype MAC entries
5099 * @sw: pointer to switch info struct for which function add rule
5101 static enum ice_status
5102 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5103 struct ice_switch_info *sw)
5105 struct ice_fltr_list_entry *em_list_itr, *tmp;
5107 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5109 struct ice_sw_recipe *recp_list;
5110 enum ice_sw_lkup_type l_type;
5112 l_type = em_list_itr->fltr_info.lkup_type;
5114 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5115 l_type != ICE_SW_LKUP_ETHERTYPE)
5116 return ICE_ERR_PARAM;
5118 recp_list = &sw->recp_list[l_type];
5119 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5121 if (em_list_itr->status)
5122 return em_list_itr->status;
5128 * ice_remove_eth_mac - remove a ethertype based filter rule
5129 * @hw: pointer to the hardware structure
5130 * @em_list: list of ethertype and forwarding information
5134 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5136 if (!em_list || !hw)
5137 return ICE_ERR_PARAM;
5139 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5143 * ice_rem_sw_rule_info
5144 * @hw: pointer to the hardware structure
5145 * @rule_head: pointer to the switch list structure that we want to delete
5148 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5150 if (!LIST_EMPTY(rule_head)) {
5151 struct ice_fltr_mgmt_list_entry *entry;
5152 struct ice_fltr_mgmt_list_entry *tmp;
5154 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5155 ice_fltr_mgmt_list_entry, list_entry) {
5156 LIST_DEL(&entry->list_entry);
5157 ice_free(hw, entry);
5163 * ice_rem_adv_rule_info
5164 * @hw: pointer to the hardware structure
5165 * @rule_head: pointer to the switch list structure that we want to delete
5168 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5170 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5171 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5173 if (LIST_EMPTY(rule_head))
5176 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5177 ice_adv_fltr_mgmt_list_entry, list_entry) {
5178 LIST_DEL(&lst_itr->list_entry);
5179 ice_free(hw, lst_itr->lkups);
5180 ice_free(hw, lst_itr);
5185 * ice_rem_all_sw_rules_info
5186 * @hw: pointer to the hardware structure
5188 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5190 struct ice_switch_info *sw = hw->switch_info;
5193 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5194 struct LIST_HEAD_TYPE *rule_head;
5196 rule_head = &sw->recp_list[i].filt_rules;
5197 if (!sw->recp_list[i].adv_rule)
5198 ice_rem_sw_rule_info(hw, rule_head);
5200 ice_rem_adv_rule_info(hw, rule_head);
5201 if (sw->recp_list[i].adv_rule &&
5202 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5203 sw->recp_list[i].adv_rule = false;
5208 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5209 * @pi: pointer to the port_info structure
5210 * @vsi_handle: VSI handle to set as default
5211 * @set: true to add the above mentioned switch rule, false to remove it
5212 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5214 * add filter rule to set/unset given VSI as default VSI for the switch
5215 * (represented by swid)
5218 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5221 struct ice_aqc_sw_rules_elem *s_rule;
5222 struct ice_fltr_info f_info;
5223 struct ice_hw *hw = pi->hw;
5224 enum ice_adminq_opc opcode;
5225 enum ice_status status;
5229 if (!ice_is_vsi_valid(hw, vsi_handle))
5230 return ICE_ERR_PARAM;
5231 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5233 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5234 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5236 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5238 return ICE_ERR_NO_MEMORY;
5240 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5242 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5243 f_info.flag = direction;
5244 f_info.fltr_act = ICE_FWD_TO_VSI;
5245 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5247 if (f_info.flag & ICE_FLTR_RX) {
5248 f_info.src = pi->lport;
5249 f_info.src_id = ICE_SRC_ID_LPORT;
5251 f_info.fltr_rule_id =
5252 pi->dflt_rx_vsi_rule_id;
5253 } else if (f_info.flag & ICE_FLTR_TX) {
5254 f_info.src_id = ICE_SRC_ID_VSI;
5255 f_info.src = hw_vsi_id;
5257 f_info.fltr_rule_id =
5258 pi->dflt_tx_vsi_rule_id;
5262 opcode = ice_aqc_opc_add_sw_rules;
5264 opcode = ice_aqc_opc_remove_sw_rules;
5266 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5268 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5269 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5272 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5274 if (f_info.flag & ICE_FLTR_TX) {
5275 pi->dflt_tx_vsi_num = hw_vsi_id;
5276 pi->dflt_tx_vsi_rule_id = index;
5277 } else if (f_info.flag & ICE_FLTR_RX) {
5278 pi->dflt_rx_vsi_num = hw_vsi_id;
5279 pi->dflt_rx_vsi_rule_id = index;
5282 if (f_info.flag & ICE_FLTR_TX) {
5283 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5284 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5285 } else if (f_info.flag & ICE_FLTR_RX) {
5286 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5287 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5292 ice_free(hw, s_rule);
5297 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5298 * @list_head: head of rule list
5299 * @f_info: rule information
5301 * Helper function to search for a unicast rule entry - this is to be used
5302 * to remove unicast MAC filter that is not shared with other VSIs on the
5305 * Returns pointer to entry storing the rule if found
5307 static struct ice_fltr_mgmt_list_entry *
5308 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5309 struct ice_fltr_info *f_info)
5311 struct ice_fltr_mgmt_list_entry *list_itr;
5313 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5315 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5316 sizeof(f_info->l_data)) &&
5317 f_info->fwd_id.hw_vsi_id ==
5318 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5319 f_info->flag == list_itr->fltr_info.flag)
5326 * ice_remove_mac_rule - remove a MAC based filter rule
5327 * @hw: pointer to the hardware structure
5328 * @m_list: list of MAC addresses and forwarding information
5329 * @recp_list: list from which function remove MAC address
5331 * This function removes either a MAC filter rule or a specific VSI from a
5332 * VSI list for a multicast MAC address.
5334 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5335 * ice_add_mac. Caller should be aware that this call will only work if all
5336 * the entries passed into m_list were added previously. It will not attempt to
5337 * do a partial remove of entries that were found.
5339 static enum ice_status
5340 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5341 struct ice_sw_recipe *recp_list)
5343 struct ice_fltr_list_entry *list_itr, *tmp;
5344 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5347 return ICE_ERR_PARAM;
5349 rule_lock = &recp_list->filt_rule_lock;
5350 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5352 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5353 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5356 if (l_type != ICE_SW_LKUP_MAC)
5357 return ICE_ERR_PARAM;
5359 vsi_handle = list_itr->fltr_info.vsi_handle;
5360 if (!ice_is_vsi_valid(hw, vsi_handle))
5361 return ICE_ERR_PARAM;
5363 list_itr->fltr_info.fwd_id.hw_vsi_id =
5364 ice_get_hw_vsi_num(hw, vsi_handle);
5365 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5366 /* Don't remove the unicast address that belongs to
5367 * another VSI on the switch, since it is not being
5370 ice_acquire_lock(rule_lock);
5371 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5372 &list_itr->fltr_info)) {
5373 ice_release_lock(rule_lock);
5374 return ICE_ERR_DOES_NOT_EXIST;
5376 ice_release_lock(rule_lock);
5378 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5380 if (list_itr->status)
5381 return list_itr->status;
5387 * ice_remove_mac - remove a MAC address based filter rule
5388 * @hw: pointer to the hardware structure
5389 * @m_list: list of MAC addresses and forwarding information
5392 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5394 struct ice_sw_recipe *recp_list;
5396 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5397 return ice_remove_mac_rule(hw, m_list, recp_list);
5401 * ice_remove_vlan_rule - Remove VLAN based filter rule
5402 * @hw: pointer to the hardware structure
5403 * @v_list: list of VLAN entries and forwarding information
5404 * @recp_list: list from which function remove VLAN
5406 static enum ice_status
5407 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5408 struct ice_sw_recipe *recp_list)
5410 struct ice_fltr_list_entry *v_list_itr, *tmp;
5412 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5414 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5416 if (l_type != ICE_SW_LKUP_VLAN)
5417 return ICE_ERR_PARAM;
5418 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5420 if (v_list_itr->status)
5421 return v_list_itr->status;
5427 * ice_remove_vlan - remove a VLAN address based filter rule
5428 * @hw: pointer to the hardware structure
5429 * @v_list: list of VLAN and forwarding information
5433 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5435 struct ice_sw_recipe *recp_list;
5438 return ICE_ERR_PARAM;
5440 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5441 return ice_remove_vlan_rule(hw, v_list, recp_list);
5445 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5446 * @hw: pointer to the hardware structure
5447 * @v_list: list of MAC VLAN entries and forwarding information
5448 * @recp_list: list from which function remove MAC VLAN
5450 static enum ice_status
5451 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5452 struct ice_sw_recipe *recp_list)
5454 struct ice_fltr_list_entry *v_list_itr, *tmp;
5456 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5457 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5459 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5461 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5462 return ICE_ERR_PARAM;
5463 v_list_itr->status =
5464 ice_remove_rule_internal(hw, recp_list,
5466 if (v_list_itr->status)
5467 return v_list_itr->status;
5473 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5474 * @hw: pointer to the hardware structure
5475 * @mv_list: list of MAC VLAN and forwarding information
5478 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5480 struct ice_sw_recipe *recp_list;
5482 if (!mv_list || !hw)
5483 return ICE_ERR_PARAM;
5485 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5486 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5490 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5491 * @fm_entry: filter entry to inspect
5492 * @vsi_handle: VSI handle to compare with filter info
5495 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5497 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5498 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5499 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5500 fm_entry->vsi_list_info &&
5501 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5506 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5507 * @hw: pointer to the hardware structure
5508 * @vsi_handle: VSI handle to remove filters from
5509 * @vsi_list_head: pointer to the list to add entry to
5510 * @fi: pointer to fltr_info of filter entry to copy & add
5512 * Helper function, used when creating a list of filters to remove from
5513 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5514 * original filter entry, with the exception of fltr_info.fltr_act and
5515 * fltr_info.fwd_id fields. These are set such that later logic can
5516 * extract which VSI to remove the fltr from, and pass on that information.
5518 static enum ice_status
5519 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5520 struct LIST_HEAD_TYPE *vsi_list_head,
5521 struct ice_fltr_info *fi)
5523 struct ice_fltr_list_entry *tmp;
5525 /* this memory is freed up in the caller function
5526 * once filters for this VSI are removed
5528 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5530 return ICE_ERR_NO_MEMORY;
5532 tmp->fltr_info = *fi;
5534 /* Overwrite these fields to indicate which VSI to remove filter from,
5535 * so find and remove logic can extract the information from the
5536 * list entries. Note that original entries will still have proper
5539 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5540 tmp->fltr_info.vsi_handle = vsi_handle;
5541 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5543 LIST_ADD(&tmp->list_entry, vsi_list_head);
5549 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5550 * @hw: pointer to the hardware structure
5551 * @vsi_handle: VSI handle to remove filters from
5552 * @lkup_list_head: pointer to the list that has certain lookup type filters
5553 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5555 * Locates all filters in lkup_list_head that are used by the given VSI,
5556 * and adds COPIES of those entries to vsi_list_head (intended to be used
5557 * to remove the listed filters).
5558 * Note that this means all entries in vsi_list_head must be explicitly
5559 * deallocated by the caller when done with list.
5561 static enum ice_status
5562 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5563 struct LIST_HEAD_TYPE *lkup_list_head,
5564 struct LIST_HEAD_TYPE *vsi_list_head)
5566 struct ice_fltr_mgmt_list_entry *fm_entry;
5567 enum ice_status status = ICE_SUCCESS;
5569 /* check to make sure VSI ID is valid and within boundary */
5570 if (!ice_is_vsi_valid(hw, vsi_handle))
5571 return ICE_ERR_PARAM;
5573 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5574 ice_fltr_mgmt_list_entry, list_entry) {
5575 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5578 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5580 &fm_entry->fltr_info);
5588 * ice_determine_promisc_mask
5589 * @fi: filter info to parse
5591 * Helper function to determine which ICE_PROMISC_ mask corresponds
5592 * to given filter into.
5594 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5596 u16 vid = fi->l_data.mac_vlan.vlan_id;
5597 u8 *macaddr = fi->l_data.mac.mac_addr;
5598 bool is_tx_fltr = false;
5599 u8 promisc_mask = 0;
5601 if (fi->flag == ICE_FLTR_TX)
5604 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5605 promisc_mask |= is_tx_fltr ?
5606 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5607 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5608 promisc_mask |= is_tx_fltr ?
5609 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5610 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5611 promisc_mask |= is_tx_fltr ?
5612 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5614 promisc_mask |= is_tx_fltr ?
5615 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5617 return promisc_mask;
5621 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5622 * @hw: pointer to the hardware structure
5623 * @vsi_handle: VSI handle to retrieve info from
5624 * @promisc_mask: pointer to mask to be filled in
5625 * @vid: VLAN ID of promisc VLAN VSI
5626 * @sw: pointer to switch info struct for which function add rule
5628 static enum ice_status
5629 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5630 u16 *vid, struct ice_switch_info *sw)
5632 struct ice_fltr_mgmt_list_entry *itr;
5633 struct LIST_HEAD_TYPE *rule_head;
5634 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5636 if (!ice_is_vsi_valid(hw, vsi_handle))
5637 return ICE_ERR_PARAM;
5641 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5642 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5644 ice_acquire_lock(rule_lock);
5645 LIST_FOR_EACH_ENTRY(itr, rule_head,
5646 ice_fltr_mgmt_list_entry, list_entry) {
5647 /* Continue if this filter doesn't apply to this VSI or the
5648 * VSI ID is not in the VSI map for this filter
5650 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5653 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5655 ice_release_lock(rule_lock);
5661 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5662 * @hw: pointer to the hardware structure
5663 * @vsi_handle: VSI handle to retrieve info from
5664 * @promisc_mask: pointer to mask to be filled in
5665 * @vid: VLAN ID of promisc VLAN VSI
5668 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5671 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5672 vid, hw->switch_info);
5676 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5677 * @hw: pointer to the hardware structure
5678 * @vsi_handle: VSI handle to retrieve info from
5679 * @promisc_mask: pointer to mask to be filled in
5680 * @vid: VLAN ID of promisc VLAN VSI
5681 * @sw: pointer to switch info struct for which function add rule
5683 static enum ice_status
5684 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5685 u16 *vid, struct ice_switch_info *sw)
5687 struct ice_fltr_mgmt_list_entry *itr;
5688 struct LIST_HEAD_TYPE *rule_head;
5689 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5691 if (!ice_is_vsi_valid(hw, vsi_handle))
5692 return ICE_ERR_PARAM;
5696 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5697 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5699 ice_acquire_lock(rule_lock);
5700 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5702 /* Continue if this filter doesn't apply to this VSI or the
5703 * VSI ID is not in the VSI map for this filter
5705 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5708 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5710 ice_release_lock(rule_lock);
5716 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5717 * @hw: pointer to the hardware structure
5718 * @vsi_handle: VSI handle to retrieve info from
5719 * @promisc_mask: pointer to mask to be filled in
5720 * @vid: VLAN ID of promisc VLAN VSI
5723 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5726 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5727 vid, hw->switch_info);
5731 * ice_remove_promisc - Remove promisc based filter rules
5732 * @hw: pointer to the hardware structure
5733 * @recp_id: recipe ID for which the rule needs to removed
5734 * @v_list: list of promisc entries
5736 static enum ice_status
5737 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5738 struct LIST_HEAD_TYPE *v_list)
5740 struct ice_fltr_list_entry *v_list_itr, *tmp;
5741 struct ice_sw_recipe *recp_list;
5743 recp_list = &hw->switch_info->recp_list[recp_id];
5744 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5746 v_list_itr->status =
5747 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5748 if (v_list_itr->status)
5749 return v_list_itr->status;
5755 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5756 * @hw: pointer to the hardware structure
5757 * @vsi_handle: VSI handle to clear mode
5758 * @promisc_mask: mask of promiscuous config bits to clear
5759 * @vid: VLAN ID to clear VLAN promiscuous
5760 * @sw: pointer to switch info struct for which function add rule
5762 static enum ice_status
5763 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5764 u16 vid, struct ice_switch_info *sw)
5766 struct ice_fltr_list_entry *fm_entry, *tmp;
5767 struct LIST_HEAD_TYPE remove_list_head;
5768 struct ice_fltr_mgmt_list_entry *itr;
5769 struct LIST_HEAD_TYPE *rule_head;
5770 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5771 enum ice_status status = ICE_SUCCESS;
5774 if (!ice_is_vsi_valid(hw, vsi_handle))
5775 return ICE_ERR_PARAM;
5777 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5778 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5780 recipe_id = ICE_SW_LKUP_PROMISC;
5782 rule_head = &sw->recp_list[recipe_id].filt_rules;
5783 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5785 INIT_LIST_HEAD(&remove_list_head);
5787 ice_acquire_lock(rule_lock);
5788 LIST_FOR_EACH_ENTRY(itr, rule_head,
5789 ice_fltr_mgmt_list_entry, list_entry) {
5790 struct ice_fltr_info *fltr_info;
5791 u8 fltr_promisc_mask = 0;
5793 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5795 fltr_info = &itr->fltr_info;
5797 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5798 vid != fltr_info->l_data.mac_vlan.vlan_id)
5801 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5803 /* Skip if filter is not completely specified by given mask */
5804 if (fltr_promisc_mask & ~promisc_mask)
5807 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5811 ice_release_lock(rule_lock);
5812 goto free_fltr_list;
5815 ice_release_lock(rule_lock);
5817 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5820 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5821 ice_fltr_list_entry, list_entry) {
5822 LIST_DEL(&fm_entry->list_entry);
5823 ice_free(hw, fm_entry);
5830 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5831 * @hw: pointer to the hardware structure
5832 * @vsi_handle: VSI handle to clear mode
5833 * @promisc_mask: mask of promiscuous config bits to clear
5834 * @vid: VLAN ID to clear VLAN promiscuous
5837 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5838 u8 promisc_mask, u16 vid)
5840 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5841 vid, hw->switch_info);
5845 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5846 * @hw: pointer to the hardware structure
5847 * @vsi_handle: VSI handle to configure
5848 * @promisc_mask: mask of promiscuous config bits
5849 * @vid: VLAN ID to set VLAN promiscuous
5850 * @lport: logical port number to configure promisc mode
5851 * @sw: pointer to switch info struct for which function add rule
5853 static enum ice_status
5854 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5855 u16 vid, u8 lport, struct ice_switch_info *sw)
5857 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5858 struct ice_fltr_list_entry f_list_entry;
5859 struct ice_fltr_info new_fltr;
5860 enum ice_status status = ICE_SUCCESS;
5866 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5868 if (!ice_is_vsi_valid(hw, vsi_handle))
5869 return ICE_ERR_PARAM;
5870 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5872 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5874 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5875 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5876 new_fltr.l_data.mac_vlan.vlan_id = vid;
5877 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5879 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5880 recipe_id = ICE_SW_LKUP_PROMISC;
5883 /* Separate filters must be set for each direction/packet type
5884 * combination, so we will loop over the mask value, store the
5885 * individual type, and clear it out in the input mask as it
5888 while (promisc_mask) {
5889 struct ice_sw_recipe *recp_list;
5895 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5896 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5897 pkt_type = UCAST_FLTR;
5898 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5899 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5900 pkt_type = UCAST_FLTR;
5902 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5903 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5904 pkt_type = MCAST_FLTR;
5905 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5906 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5907 pkt_type = MCAST_FLTR;
5909 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5910 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5911 pkt_type = BCAST_FLTR;
5912 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5913 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5914 pkt_type = BCAST_FLTR;
5918 /* Check for VLAN promiscuous flag */
5919 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5920 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5921 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5922 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5926 /* Set filter DA based on packet type */
5927 mac_addr = new_fltr.l_data.mac.mac_addr;
5928 if (pkt_type == BCAST_FLTR) {
5929 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5930 } else if (pkt_type == MCAST_FLTR ||
5931 pkt_type == UCAST_FLTR) {
5932 /* Use the dummy ether header DA */
5933 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5934 ICE_NONDMA_TO_NONDMA);
5935 if (pkt_type == MCAST_FLTR)
5936 mac_addr[0] |= 0x1; /* Set multicast bit */
5939 /* Need to reset this to zero for all iterations */
5942 new_fltr.flag |= ICE_FLTR_TX;
5943 new_fltr.src = hw_vsi_id;
5945 new_fltr.flag |= ICE_FLTR_RX;
5946 new_fltr.src = lport;
5949 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5950 new_fltr.vsi_handle = vsi_handle;
5951 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5952 f_list_entry.fltr_info = new_fltr;
5953 recp_list = &sw->recp_list[recipe_id];
5955 status = ice_add_rule_internal(hw, recp_list, lport,
5957 if (status != ICE_SUCCESS)
5958 goto set_promisc_exit;
5966 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5967 * @hw: pointer to the hardware structure
5968 * @vsi_handle: VSI handle to configure
5969 * @promisc_mask: mask of promiscuous config bits
5970 * @vid: VLAN ID to set VLAN promiscuous
5973 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5976 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5977 hw->port_info->lport,
5982 * _ice_set_vlan_vsi_promisc
5983 * @hw: pointer to the hardware structure
5984 * @vsi_handle: VSI handle to configure
5985 * @promisc_mask: mask of promiscuous config bits
5986 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5987 * @lport: logical port number to configure promisc mode
5988 * @sw: pointer to switch info struct for which function add rule
5990 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5992 static enum ice_status
5993 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5994 bool rm_vlan_promisc, u8 lport,
5995 struct ice_switch_info *sw)
5997 struct ice_fltr_list_entry *list_itr, *tmp;
5998 struct LIST_HEAD_TYPE vsi_list_head;
5999 struct LIST_HEAD_TYPE *vlan_head;
6000 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6001 enum ice_status status;
6004 INIT_LIST_HEAD(&vsi_list_head);
6005 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6006 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6007 ice_acquire_lock(vlan_lock);
6008 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6010 ice_release_lock(vlan_lock);
6012 goto free_fltr_list;
6014 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6016 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6017 if (rm_vlan_promisc)
6018 status = _ice_clear_vsi_promisc(hw, vsi_handle,
6022 status = _ice_set_vsi_promisc(hw, vsi_handle,
6023 promisc_mask, vlan_id,
6030 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6031 ice_fltr_list_entry, list_entry) {
6032 LIST_DEL(&list_itr->list_entry);
6033 ice_free(hw, list_itr);
6039 * ice_set_vlan_vsi_promisc
6040 * @hw: pointer to the hardware structure
6041 * @vsi_handle: VSI handle to configure
6042 * @promisc_mask: mask of promiscuous config bits
6043 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6045 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6048 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6049 bool rm_vlan_promisc)
6051 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6052 rm_vlan_promisc, hw->port_info->lport,
6057 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6058 * @hw: pointer to the hardware structure
6059 * @vsi_handle: VSI handle to remove filters from
6060 * @recp_list: recipe list from which function remove fltr
6061 * @lkup: switch rule filter lookup type
6064 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6065 struct ice_sw_recipe *recp_list,
6066 enum ice_sw_lkup_type lkup)
6068 struct ice_fltr_list_entry *fm_entry;
6069 struct LIST_HEAD_TYPE remove_list_head;
6070 struct LIST_HEAD_TYPE *rule_head;
6071 struct ice_fltr_list_entry *tmp;
6072 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6073 enum ice_status status;
6075 INIT_LIST_HEAD(&remove_list_head);
6076 rule_lock = &recp_list[lkup].filt_rule_lock;
6077 rule_head = &recp_list[lkup].filt_rules;
6078 ice_acquire_lock(rule_lock);
6079 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6081 ice_release_lock(rule_lock);
6086 case ICE_SW_LKUP_MAC:
6087 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6089 case ICE_SW_LKUP_VLAN:
6090 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6092 case ICE_SW_LKUP_PROMISC:
6093 case ICE_SW_LKUP_PROMISC_VLAN:
6094 ice_remove_promisc(hw, lkup, &remove_list_head);
6096 case ICE_SW_LKUP_MAC_VLAN:
6097 ice_remove_mac_vlan(hw, &remove_list_head);
6099 case ICE_SW_LKUP_ETHERTYPE:
6100 case ICE_SW_LKUP_ETHERTYPE_MAC:
6101 ice_remove_eth_mac(hw, &remove_list_head);
6103 case ICE_SW_LKUP_DFLT:
6104 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6106 case ICE_SW_LKUP_LAST:
6107 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6111 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6112 ice_fltr_list_entry, list_entry) {
6113 LIST_DEL(&fm_entry->list_entry);
6114 ice_free(hw, fm_entry);
6119 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6120 * @hw: pointer to the hardware structure
6121 * @vsi_handle: VSI handle to remove filters from
6122 * @sw: pointer to switch info struct
6125 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6126 struct ice_switch_info *sw)
6128 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6130 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6131 sw->recp_list, ICE_SW_LKUP_MAC);
6132 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6133 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6134 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6135 sw->recp_list, ICE_SW_LKUP_PROMISC);
6136 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6137 sw->recp_list, ICE_SW_LKUP_VLAN);
6138 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6139 sw->recp_list, ICE_SW_LKUP_DFLT);
6140 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6141 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6142 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6143 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6144 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6145 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6149 * ice_remove_vsi_fltr - Remove all filters for a VSI
6150 * @hw: pointer to the hardware structure
6151 * @vsi_handle: VSI handle to remove filters from
6153 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6155 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6159 * ice_alloc_res_cntr - allocating resource counter
6160 * @hw: pointer to the hardware structure
6161 * @type: type of resource
6162 * @alloc_shared: if set it is shared else dedicated
6163 * @num_items: number of entries requested for FD resource type
6164 * @counter_id: counter index returned by AQ call
6167 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6170 struct ice_aqc_alloc_free_res_elem *buf;
6171 enum ice_status status;
6174 /* Allocate resource */
6175 buf_len = ice_struct_size(buf, elem, 1);
6176 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6178 return ICE_ERR_NO_MEMORY;
6180 buf->num_elems = CPU_TO_LE16(num_items);
6181 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6182 ICE_AQC_RES_TYPE_M) | alloc_shared);
6184 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6185 ice_aqc_opc_alloc_res, NULL);
6189 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6197 * ice_free_res_cntr - free resource counter
6198 * @hw: pointer to the hardware structure
6199 * @type: type of resource
6200 * @alloc_shared: if set it is shared else dedicated
6201 * @num_items: number of entries to be freed for FD resource type
6202 * @counter_id: counter ID resource which needs to be freed
6205 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6208 struct ice_aqc_alloc_free_res_elem *buf;
6209 enum ice_status status;
6213 buf_len = ice_struct_size(buf, elem, 1);
6214 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6216 return ICE_ERR_NO_MEMORY;
6218 buf->num_elems = CPU_TO_LE16(num_items);
6219 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6220 ICE_AQC_RES_TYPE_M) | alloc_shared);
6221 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6223 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6224 ice_aqc_opc_free_res, NULL);
6226 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6233 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6234 * @hw: pointer to the hardware structure
6235 * @counter_id: returns counter index
6237 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6239 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6240 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6245 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6246 * @hw: pointer to the hardware structure
6247 * @counter_id: counter index to be freed
6249 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6251 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6252 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6257 * ice_alloc_res_lg_act - add large action resource
6258 * @hw: pointer to the hardware structure
6259 * @l_id: large action ID to fill it in
6260 * @num_acts: number of actions to hold with a large action entry
6262 static enum ice_status
6263 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6265 struct ice_aqc_alloc_free_res_elem *sw_buf;
6266 enum ice_status status;
6269 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6270 return ICE_ERR_PARAM;
6272 /* Allocate resource for large action */
6273 buf_len = ice_struct_size(sw_buf, elem, 1);
6274 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6276 return ICE_ERR_NO_MEMORY;
6278 sw_buf->num_elems = CPU_TO_LE16(1);
6280 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6281 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6282 * If num_acts is greater than 2, then use
6283 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6284 * The num_acts cannot exceed 4. This was ensured at the
6285 * beginning of the function.
6288 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6289 else if (num_acts == 2)
6290 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6292 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6294 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6295 ice_aqc_opc_alloc_res, NULL);
6297 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6299 ice_free(hw, sw_buf);
6304 * ice_add_mac_with_sw_marker - add filter with sw marker
6305 * @hw: pointer to the hardware structure
6306 * @f_info: filter info structure containing the MAC filter information
6307 * @sw_marker: sw marker to tag the Rx descriptor with
6310 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6313 struct ice_fltr_mgmt_list_entry *m_entry;
6314 struct ice_fltr_list_entry fl_info;
6315 struct ice_sw_recipe *recp_list;
6316 struct LIST_HEAD_TYPE l_head;
6317 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6318 enum ice_status ret;
6322 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6323 return ICE_ERR_PARAM;
6325 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6326 return ICE_ERR_PARAM;
6328 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6329 return ICE_ERR_PARAM;
6331 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6332 return ICE_ERR_PARAM;
6333 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6335 /* Add filter if it doesn't exist so then the adding of large
6336 * action always results in update
6339 INIT_LIST_HEAD(&l_head);
6340 fl_info.fltr_info = *f_info;
6341 LIST_ADD(&fl_info.list_entry, &l_head);
6343 entry_exists = false;
6344 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6345 hw->port_info->lport);
6346 if (ret == ICE_ERR_ALREADY_EXISTS)
6347 entry_exists = true;
6351 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6352 rule_lock = &recp_list->filt_rule_lock;
6353 ice_acquire_lock(rule_lock);
6354 /* Get the book keeping entry for the filter */
6355 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6359 /* If counter action was enabled for this rule then don't enable
6360 * sw marker large action
6362 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6363 ret = ICE_ERR_PARAM;
6367 /* if same marker was added before */
6368 if (m_entry->sw_marker_id == sw_marker) {
6369 ret = ICE_ERR_ALREADY_EXISTS;
6373 /* Allocate a hardware table entry to hold large act. Three actions
6374 * for marker based large action
6376 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6380 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6383 /* Update the switch rule to add the marker action */
6384 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6386 ice_release_lock(rule_lock);
6391 ice_release_lock(rule_lock);
6392 /* only remove entry if it did not exist previously */
6394 ret = ice_remove_mac(hw, &l_head);
6400 * ice_add_mac_with_counter - add filter with counter enabled
6401 * @hw: pointer to the hardware structure
6402 * @f_info: pointer to filter info structure containing the MAC filter
6406 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6408 struct ice_fltr_mgmt_list_entry *m_entry;
6409 struct ice_fltr_list_entry fl_info;
6410 struct ice_sw_recipe *recp_list;
6411 struct LIST_HEAD_TYPE l_head;
6412 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6413 enum ice_status ret;
6418 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6419 return ICE_ERR_PARAM;
6421 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6422 return ICE_ERR_PARAM;
6424 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6425 return ICE_ERR_PARAM;
6426 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6427 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6429 entry_exist = false;
6431 rule_lock = &recp_list->filt_rule_lock;
6433 /* Add filter if it doesn't exist so then the adding of large
6434 * action always results in update
6436 INIT_LIST_HEAD(&l_head);
6438 fl_info.fltr_info = *f_info;
6439 LIST_ADD(&fl_info.list_entry, &l_head);
6441 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6442 hw->port_info->lport);
6443 if (ret == ICE_ERR_ALREADY_EXISTS)
6448 ice_acquire_lock(rule_lock);
6449 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6451 ret = ICE_ERR_BAD_PTR;
6455 /* Don't enable counter for a filter for which sw marker was enabled */
6456 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6457 ret = ICE_ERR_PARAM;
6461 /* If a counter was already enabled then don't need to add again */
6462 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6463 ret = ICE_ERR_ALREADY_EXISTS;
6467 /* Allocate a hardware table entry to VLAN counter */
6468 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6472 /* Allocate a hardware table entry to hold large act. Two actions for
6473 * counter based large action
6475 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6479 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6482 /* Update the switch rule to add the counter action */
6483 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6485 ice_release_lock(rule_lock);
6490 ice_release_lock(rule_lock);
6491 /* only remove entry if it did not exist previously */
6493 ret = ice_remove_mac(hw, &l_head);
6498 /* This is mapping table entry that maps every word within a given protocol
6499 * structure to the real byte offset as per the specification of that
6501 * for example dst address is 3 words in ethertype header and corresponding
6502 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6503 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6504 * matching entry describing its field. This needs to be updated if new
6505 * structure is added to that union.
6507 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6508 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6509 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6510 { ICE_ETYPE_OL, { 0 } },
6511 { ICE_VLAN_OFOS, { 0, 2 } },
6512 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6513 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6514 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6515 26, 28, 30, 32, 34, 36, 38 } },
6516 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6517 26, 28, 30, 32, 34, 36, 38 } },
6518 { ICE_TCP_IL, { 0, 2 } },
6519 { ICE_UDP_OF, { 0, 2 } },
6520 { ICE_UDP_ILOS, { 0, 2 } },
6521 { ICE_SCTP_IL, { 0, 2 } },
6522 { ICE_VXLAN, { 8, 10, 12, 14 } },
6523 { ICE_GENEVE, { 8, 10, 12, 14 } },
6524 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6525 { ICE_NVGRE, { 0, 2, 4, 6 } },
6526 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6527 { ICE_PPPOE, { 0, 2, 4, 6 } },
6528 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6529 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6530 { ICE_ESP, { 0, 2, 4, 6 } },
6531 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6532 { ICE_NAT_T, { 8, 10, 12, 14 } },
6533 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6534 { ICE_VLAN_EX, { 0, 2 } },
6537 /* The following table describes preferred grouping of recipes.
6538 * If a recipe that needs to be programmed is a superset or matches one of the
6539 * following combinations, then the recipe needs to be chained as per the
6543 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6544 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6545 { ICE_MAC_IL, ICE_MAC_IL_HW },
6546 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6547 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6548 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6549 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6550 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6551 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6552 { ICE_TCP_IL, ICE_TCP_IL_HW },
6553 { ICE_UDP_OF, ICE_UDP_OF_HW },
6554 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6555 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6556 { ICE_VXLAN, ICE_UDP_OF_HW },
6557 { ICE_GENEVE, ICE_UDP_OF_HW },
6558 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6559 { ICE_NVGRE, ICE_GRE_OF_HW },
6560 { ICE_GTP, ICE_UDP_OF_HW },
6561 { ICE_PPPOE, ICE_PPPOE_HW },
6562 { ICE_PFCP, ICE_UDP_ILOS_HW },
6563 { ICE_L2TPV3, ICE_L2TPV3_HW },
6564 { ICE_ESP, ICE_ESP_HW },
6565 { ICE_AH, ICE_AH_HW },
6566 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6567 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6568 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6572 * ice_find_recp - find a recipe
6573 * @hw: pointer to the hardware structure
6574 * @lkup_exts: extension sequence to match
6576 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6578 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6579 enum ice_sw_tunnel_type tun_type)
6581 bool refresh_required = true;
6582 struct ice_sw_recipe *recp;
6585 /* Walk through existing recipes to find a match */
6586 recp = hw->switch_info->recp_list;
6587 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6588 /* If recipe was not created for this ID, in SW bookkeeping,
6589 * check if FW has an entry for this recipe. If the FW has an
6590 * entry update it in our SW bookkeeping and continue with the
6593 if (!recp[i].recp_created)
6594 if (ice_get_recp_frm_fw(hw,
6595 hw->switch_info->recp_list, i,
6599 /* Skip inverse action recipes */
6600 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6601 ICE_AQ_RECIPE_ACT_INV_ACT)
6604 /* if number of words we are looking for match */
6605 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6606 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6607 struct ice_fv_word *be = lkup_exts->fv_words;
6608 u16 *cr = recp[i].lkup_exts.field_mask;
6609 u16 *de = lkup_exts->field_mask;
6613 /* ar, cr, and qr are related to the recipe words, while
6614 * be, de, and pe are related to the lookup words
6616 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6617 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6619 if (ar[qr].off == be[pe].off &&
6620 ar[qr].prot_id == be[pe].prot_id &&
6622 /* Found the "pe"th word in the
6627 /* After walking through all the words in the
6628 * "i"th recipe if "p"th word was not found then
6629 * this recipe is not what we are looking for.
6630 * So break out from this loop and try the next
6633 if (qr >= recp[i].lkup_exts.n_val_words) {
6638 /* If for "i"th recipe the found was never set to false
6639 * then it means we found our match
6641 if (tun_type == recp[i].tun_type && found)
6642 return i; /* Return the recipe ID */
6645 return ICE_MAX_NUM_RECIPES;
6649 * ice_prot_type_to_id - get protocol ID from protocol type
6650 * @type: protocol type
6651 * @id: pointer to variable that will receive the ID
6653 * Returns true if found, false otherwise
6655 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6659 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6660 if (ice_prot_id_tbl[i].type == type) {
6661 *id = ice_prot_id_tbl[i].protocol_id;
6668 * ice_find_valid_words - count valid words
6669 * @rule: advanced rule with lookup information
6670 * @lkup_exts: byte offset extractions of the words that are valid
6672 * calculate valid words in a lookup rule using mask value
6675 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6676 struct ice_prot_lkup_ext *lkup_exts)
6678 u8 j, word, prot_id, ret_val;
6680 if (!ice_prot_type_to_id(rule->type, &prot_id))
6683 word = lkup_exts->n_val_words;
6685 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6686 if (((u16 *)&rule->m_u)[j] &&
6687 rule->type < ARRAY_SIZE(ice_prot_ext)) {
6688 /* No more space to accommodate */
6689 if (word >= ICE_MAX_CHAIN_WORDS)
6691 lkup_exts->fv_words[word].off =
6692 ice_prot_ext[rule->type].offs[j];
6693 lkup_exts->fv_words[word].prot_id =
6694 ice_prot_id_tbl[rule->type].protocol_id;
6695 lkup_exts->field_mask[word] =
6696 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6700 ret_val = word - lkup_exts->n_val_words;
6701 lkup_exts->n_val_words = word;
6707 * ice_create_first_fit_recp_def - Create a recipe grouping
6708 * @hw: pointer to the hardware structure
6709 * @lkup_exts: an array of protocol header extractions
6710 * @rg_list: pointer to a list that stores new recipe groups
6711 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6713 * Using first fit algorithm, take all the words that are still not done
6714 * and start grouping them in 4-word groups. Each group makes up one
6717 static enum ice_status
6718 ice_create_first_fit_recp_def(struct ice_hw *hw,
6719 struct ice_prot_lkup_ext *lkup_exts,
6720 struct LIST_HEAD_TYPE *rg_list,
6723 struct ice_pref_recipe_group *grp = NULL;
6728 if (!lkup_exts->n_val_words) {
6729 struct ice_recp_grp_entry *entry;
6731 entry = (struct ice_recp_grp_entry *)
6732 ice_malloc(hw, sizeof(*entry));
6734 return ICE_ERR_NO_MEMORY;
6735 LIST_ADD(&entry->l_entry, rg_list);
6736 grp = &entry->r_group;
6738 grp->n_val_pairs = 0;
6741 /* Walk through every word in the rule to check if it is not done. If so
6742 * then this word needs to be part of a new recipe.
6744 for (j = 0; j < lkup_exts->n_val_words; j++)
6745 if (!ice_is_bit_set(lkup_exts->done, j)) {
6747 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6748 struct ice_recp_grp_entry *entry;
6750 entry = (struct ice_recp_grp_entry *)
6751 ice_malloc(hw, sizeof(*entry));
6753 return ICE_ERR_NO_MEMORY;
6754 LIST_ADD(&entry->l_entry, rg_list);
6755 grp = &entry->r_group;
6759 grp->pairs[grp->n_val_pairs].prot_id =
6760 lkup_exts->fv_words[j].prot_id;
6761 grp->pairs[grp->n_val_pairs].off =
6762 lkup_exts->fv_words[j].off;
6763 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6771 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6772 * @hw: pointer to the hardware structure
6773 * @fv_list: field vector with the extraction sequence information
6774 * @rg_list: recipe groupings with protocol-offset pairs
6776 * Helper function to fill in the field vector indices for protocol-offset
6777 * pairs. These indexes are then ultimately programmed into a recipe.
6779 static enum ice_status
6780 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6781 struct LIST_HEAD_TYPE *rg_list)
6783 struct ice_sw_fv_list_entry *fv;
6784 struct ice_recp_grp_entry *rg;
6785 struct ice_fv_word *fv_ext;
6787 if (LIST_EMPTY(fv_list))
6790 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6791 fv_ext = fv->fv_ptr->ew;
6793 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6796 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6797 struct ice_fv_word *pr;
6802 pr = &rg->r_group.pairs[i];
6803 mask = rg->r_group.mask[i];
6805 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6806 if (fv_ext[j].prot_id == pr->prot_id &&
6807 fv_ext[j].off == pr->off) {
6810 /* Store index of field vector */
6812 rg->fv_mask[i] = mask;
6816 /* Protocol/offset could not be found, caller gave an
6820 return ICE_ERR_PARAM;
6828 * ice_find_free_recp_res_idx - find free result indexes for recipe
6829 * @hw: pointer to hardware structure
6830 * @profiles: bitmap of profiles that will be associated with the new recipe
6831 * @free_idx: pointer to variable to receive the free index bitmap
6833 * The algorithm used here is:
6834 * 1. When creating a new recipe, create a set P which contains all
6835 * Profiles that will be associated with our new recipe
6837 * 2. For each Profile p in set P:
6838 * a. Add all recipes associated with Profile p into set R
6839 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6840 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6841 * i. Or just assume they all have the same possible indexes:
6843 * i.e., PossibleIndexes = 0x0000F00000000000
6845 * 3. For each Recipe r in set R:
6846 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6847 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6849 * FreeIndexes will contain the bits indicating the indexes free for use,
6850 * then the code needs to update the recipe[r].used_result_idx_bits to
6851 * indicate which indexes were selected for use by this recipe.
6854 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6855 ice_bitmap_t *free_idx)
6857 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6858 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6859 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6862 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6863 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6864 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6865 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6867 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6869 /* For each profile we are going to associate the recipe with, add the
6870 * recipes that are associated with that profile. This will give us
6871 * the set of recipes that our recipe may collide with. Also, determine
6872 * what possible result indexes are usable given this set of profiles.
6874 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6875 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6876 ICE_MAX_NUM_RECIPES);
6877 ice_and_bitmap(possible_idx, possible_idx,
6878 hw->switch_info->prof_res_bm[bit],
6882 /* For each recipe that our new recipe may collide with, determine
6883 * which indexes have been used.
6885 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6886 ice_or_bitmap(used_idx, used_idx,
6887 hw->switch_info->recp_list[bit].res_idxs,
6890 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6892 /* return number of free indexes */
6893 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6897 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6898 * @hw: pointer to hardware structure
6899 * @rm: recipe management list entry
6900 * @profiles: bitmap of profiles that will be associated.
6902 static enum ice_status
6903 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6904 ice_bitmap_t *profiles)
6906 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6907 struct ice_aqc_recipe_data_elem *tmp;
6908 struct ice_aqc_recipe_data_elem *buf;
6909 struct ice_recp_grp_entry *entry;
6910 enum ice_status status;
6916 /* When more than one recipe are required, another recipe is needed to
6917 * chain them together. Matching a tunnel metadata ID takes up one of
6918 * the match fields in the chaining recipe reducing the number of
6919 * chained recipes by one.
6921 /* check number of free result indices */
6922 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6923 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6925 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6926 free_res_idx, rm->n_grp_count);
6928 if (rm->n_grp_count > 1) {
6929 if (rm->n_grp_count > free_res_idx)
6930 return ICE_ERR_MAX_LIMIT;
6935 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6936 return ICE_ERR_MAX_LIMIT;
6938 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6939 ICE_MAX_NUM_RECIPES,
6942 return ICE_ERR_NO_MEMORY;
6944 buf = (struct ice_aqc_recipe_data_elem *)
6945 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6947 status = ICE_ERR_NO_MEMORY;
6951 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6952 recipe_count = ICE_MAX_NUM_RECIPES;
6953 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6955 if (status || recipe_count == 0)
6958 /* Allocate the recipe resources, and configure them according to the
6959 * match fields from protocol headers and extracted field vectors.
6961 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6962 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6965 status = ice_alloc_recipe(hw, &entry->rid);
6969 /* Clear the result index of the located recipe, as this will be
6970 * updated, if needed, later in the recipe creation process.
6972 tmp[0].content.result_indx = 0;
6974 buf[recps] = tmp[0];
6975 buf[recps].recipe_indx = (u8)entry->rid;
6976 /* if the recipe is a non-root recipe RID should be programmed
6977 * as 0 for the rules to be applied correctly.
6979 buf[recps].content.rid = 0;
6980 ice_memset(&buf[recps].content.lkup_indx, 0,
6981 sizeof(buf[recps].content.lkup_indx),
6984 /* All recipes use look-up index 0 to match switch ID. */
6985 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6986 buf[recps].content.mask[0] =
6987 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6988 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6991 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6992 buf[recps].content.lkup_indx[i] = 0x80;
6993 buf[recps].content.mask[i] = 0;
6996 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6997 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6998 buf[recps].content.mask[i + 1] =
6999 CPU_TO_LE16(entry->fv_mask[i]);
7002 if (rm->n_grp_count > 1) {
7003 /* Checks to see if there really is a valid result index
7006 if (chain_idx >= ICE_MAX_FV_WORDS) {
7007 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7008 status = ICE_ERR_MAX_LIMIT;
7012 entry->chain_idx = chain_idx;
7013 buf[recps].content.result_indx =
7014 ICE_AQ_RECIPE_RESULT_EN |
7015 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7016 ICE_AQ_RECIPE_RESULT_DATA_M);
7017 ice_clear_bit(chain_idx, result_idx_bm);
7018 chain_idx = ice_find_first_bit(result_idx_bm,
7022 /* fill recipe dependencies */
7023 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7024 ICE_MAX_NUM_RECIPES);
7025 ice_set_bit(buf[recps].recipe_indx,
7026 (ice_bitmap_t *)buf[recps].recipe_bitmap);
7027 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7031 if (rm->n_grp_count == 1) {
7032 rm->root_rid = buf[0].recipe_indx;
7033 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7034 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7035 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7036 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7037 sizeof(buf[0].recipe_bitmap),
7038 ICE_NONDMA_TO_NONDMA);
7040 status = ICE_ERR_BAD_PTR;
7043 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7044 * the recipe which is getting created if specified
7045 * by user. Usually any advanced switch filter, which results
7046 * into new extraction sequence, ended up creating a new recipe
7047 * of type ROOT and usually recipes are associated with profiles
7048 * Switch rule referreing newly created recipe, needs to have
7049 * either/or 'fwd' or 'join' priority, otherwise switch rule
7050 * evaluation will not happen correctly. In other words, if
7051 * switch rule to be evaluated on priority basis, then recipe
7052 * needs to have priority, otherwise it will be evaluated last.
7054 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7056 struct ice_recp_grp_entry *last_chain_entry;
7059 /* Allocate the last recipe that will chain the outcomes of the
7060 * other recipes together
7062 status = ice_alloc_recipe(hw, &rid);
7066 buf[recps].recipe_indx = (u8)rid;
7067 buf[recps].content.rid = (u8)rid;
7068 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7069 /* the new entry created should also be part of rg_list to
7070 * make sure we have complete recipe
7072 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7073 sizeof(*last_chain_entry));
7074 if (!last_chain_entry) {
7075 status = ICE_ERR_NO_MEMORY;
7078 last_chain_entry->rid = rid;
7079 ice_memset(&buf[recps].content.lkup_indx, 0,
7080 sizeof(buf[recps].content.lkup_indx),
7082 /* All recipes use look-up index 0 to match switch ID. */
7083 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7084 buf[recps].content.mask[0] =
7085 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7086 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7087 buf[recps].content.lkup_indx[i] =
7088 ICE_AQ_RECIPE_LKUP_IGNORE;
7089 buf[recps].content.mask[i] = 0;
7093 /* update r_bitmap with the recp that is used for chaining */
7094 ice_set_bit(rid, rm->r_bitmap);
7095 /* this is the recipe that chains all the other recipes so it
7096 * should not have a chaining ID to indicate the same
7098 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7099 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7101 last_chain_entry->fv_idx[i] = entry->chain_idx;
7102 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7103 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7104 ice_set_bit(entry->rid, rm->r_bitmap);
7106 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7107 if (sizeof(buf[recps].recipe_bitmap) >=
7108 sizeof(rm->r_bitmap)) {
7109 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7110 sizeof(buf[recps].recipe_bitmap),
7111 ICE_NONDMA_TO_NONDMA);
7113 status = ICE_ERR_BAD_PTR;
7116 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7119 rm->root_rid = (u8)rid;
7121 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7125 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7126 ice_release_change_lock(hw);
7130 /* Every recipe that just got created add it to the recipe
7133 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7134 struct ice_switch_info *sw = hw->switch_info;
7135 bool is_root, idx_found = false;
7136 struct ice_sw_recipe *recp;
7137 u16 idx, buf_idx = 0;
7139 /* find buffer index for copying some data */
7140 for (idx = 0; idx < rm->n_grp_count; idx++)
7141 if (buf[idx].recipe_indx == entry->rid) {
7147 status = ICE_ERR_OUT_OF_RANGE;
7151 recp = &sw->recp_list[entry->rid];
7152 is_root = (rm->root_rid == entry->rid);
7153 recp->is_root = is_root;
7155 recp->root_rid = entry->rid;
7156 recp->big_recp = (is_root && rm->n_grp_count > 1);
7158 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7159 entry->r_group.n_val_pairs *
7160 sizeof(struct ice_fv_word),
7161 ICE_NONDMA_TO_NONDMA);
7163 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7164 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7166 /* Copy non-result fv index values and masks to recipe. This
7167 * call will also update the result recipe bitmask.
7169 ice_collect_result_idx(&buf[buf_idx], recp);
7171 /* for non-root recipes, also copy to the root, this allows
7172 * easier matching of a complete chained recipe
7175 ice_collect_result_idx(&buf[buf_idx],
7176 &sw->recp_list[rm->root_rid]);
7178 recp->n_ext_words = entry->r_group.n_val_pairs;
7179 recp->chain_idx = entry->chain_idx;
7180 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7181 recp->n_grp_count = rm->n_grp_count;
7182 recp->tun_type = rm->tun_type;
7183 recp->recp_created = true;
7197 * ice_create_recipe_group - creates recipe group
7198 * @hw: pointer to hardware structure
7199 * @rm: recipe management list entry
7200 * @lkup_exts: lookup elements
7202 static enum ice_status
7203 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7204 struct ice_prot_lkup_ext *lkup_exts)
7206 enum ice_status status;
7209 rm->n_grp_count = 0;
7211 /* Create recipes for words that are marked not done by packing them
7214 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7215 &rm->rg_list, &recp_count);
7217 rm->n_grp_count += recp_count;
7218 rm->n_ext_words = lkup_exts->n_val_words;
7219 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7220 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7221 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7222 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7229 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7230 * @hw: pointer to hardware structure
7231 * @lkups: lookup elements or match criteria for the advanced recipe, one
7232 * structure per protocol header
7233 * @lkups_cnt: number of protocols
7234 * @bm: bitmap of field vectors to consider
7235 * @fv_list: pointer to a list that holds the returned field vectors
7237 static enum ice_status
7238 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7239 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7241 enum ice_status status;
7248 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7250 return ICE_ERR_NO_MEMORY;
7252 for (i = 0; i < lkups_cnt; i++)
7253 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7254 status = ICE_ERR_CFG;
7258 /* Find field vectors that include all specified protocol types */
7259 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7262 ice_free(hw, prot_ids);
7267 * ice_tun_type_match_mask - determine if tun type needs a match mask
7268 * @tun_type: tunnel type
7269 * @mask: mask to be used for the tunnel
7271 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7274 case ICE_SW_TUN_VXLAN_GPE:
7275 case ICE_SW_TUN_GENEVE:
7276 case ICE_SW_TUN_VXLAN:
7277 case ICE_SW_TUN_NVGRE:
7278 case ICE_SW_TUN_UDP:
7279 case ICE_ALL_TUNNELS:
7280 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7281 case ICE_NON_TUN_QINQ:
7282 case ICE_SW_TUN_PPPOE_QINQ:
7283 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7284 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7285 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7286 *mask = ICE_TUN_FLAG_MASK;
7289 case ICE_SW_TUN_GENEVE_VLAN:
7290 case ICE_SW_TUN_VXLAN_VLAN:
7291 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7301 * ice_add_special_words - Add words that are not protocols, such as metadata
7302 * @rinfo: other information regarding the rule e.g. priority and action info
7303 * @lkup_exts: lookup word structure
7305 static enum ice_status
7306 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7307 struct ice_prot_lkup_ext *lkup_exts)
7311 /* If this is a tunneled packet, then add recipe index to match the
7312 * tunnel bit in the packet metadata flags.
7314 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7315 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7316 u8 word = lkup_exts->n_val_words++;
7318 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7319 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7320 lkup_exts->field_mask[word] = mask;
7322 return ICE_ERR_MAX_LIMIT;
7329 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7330 * @hw: pointer to hardware structure
7331 * @rinfo: other information regarding the rule e.g. priority and action info
7332 * @bm: pointer to memory for returning the bitmap of field vectors
7335 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7338 enum ice_prof_type prof_type;
7340 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7342 switch (rinfo->tun_type) {
7344 case ICE_NON_TUN_QINQ:
7345 prof_type = ICE_PROF_NON_TUN;
7347 case ICE_ALL_TUNNELS:
7348 prof_type = ICE_PROF_TUN_ALL;
7350 case ICE_SW_TUN_VXLAN_GPE:
7351 case ICE_SW_TUN_GENEVE:
7352 case ICE_SW_TUN_GENEVE_VLAN:
7353 case ICE_SW_TUN_VXLAN:
7354 case ICE_SW_TUN_VXLAN_VLAN:
7355 case ICE_SW_TUN_UDP:
7356 case ICE_SW_TUN_GTP:
7357 prof_type = ICE_PROF_TUN_UDP;
7359 case ICE_SW_TUN_NVGRE:
7360 prof_type = ICE_PROF_TUN_GRE;
7362 case ICE_SW_TUN_PPPOE:
7363 case ICE_SW_TUN_PPPOE_QINQ:
7364 prof_type = ICE_PROF_TUN_PPPOE;
7366 case ICE_SW_TUN_PPPOE_PAY:
7367 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7368 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7370 case ICE_SW_TUN_PPPOE_IPV4:
7371 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7372 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7373 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7374 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7376 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7377 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7379 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7380 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7382 case ICE_SW_TUN_PPPOE_IPV6:
7383 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7384 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7385 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7386 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7388 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7389 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7391 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7392 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7394 case ICE_SW_TUN_PROFID_IPV6_ESP:
7395 case ICE_SW_TUN_IPV6_ESP:
7396 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7398 case ICE_SW_TUN_PROFID_IPV6_AH:
7399 case ICE_SW_TUN_IPV6_AH:
7400 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7402 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7403 case ICE_SW_TUN_IPV6_L2TPV3:
7404 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7406 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7407 case ICE_SW_TUN_IPV6_NAT_T:
7408 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7410 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7411 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7413 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7414 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7416 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7417 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7419 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7420 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7422 case ICE_SW_TUN_IPV4_NAT_T:
7423 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7425 case ICE_SW_TUN_IPV4_L2TPV3:
7426 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7428 case ICE_SW_TUN_IPV4_ESP:
7429 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7431 case ICE_SW_TUN_IPV4_AH:
7432 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7434 case ICE_SW_IPV4_TCP:
7435 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7437 case ICE_SW_IPV4_UDP:
7438 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7440 case ICE_SW_IPV6_TCP:
7441 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7443 case ICE_SW_IPV6_UDP:
7444 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7446 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7447 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7448 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7449 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7450 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7451 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7452 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7454 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7455 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7456 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7457 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7458 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7459 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7460 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7462 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7463 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7464 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7465 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7466 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7467 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7468 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7470 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7471 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7472 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7473 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7474 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7475 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7476 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7478 case ICE_SW_TUN_AND_NON_TUN:
7479 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7481 prof_type = ICE_PROF_ALL;
7485 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7489 * ice_is_prof_rule - determine if rule type is a profile rule
7490 * @type: the rule type
7492 * if the rule type is a profile rule, that means that there no field value
7493 * match required, in this case just a profile hit is required.
7495 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7498 case ICE_SW_TUN_PROFID_IPV6_ESP:
7499 case ICE_SW_TUN_PROFID_IPV6_AH:
7500 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7501 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7502 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7503 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7504 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7505 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7515 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7516 * @hw: pointer to hardware structure
7517 * @lkups: lookup elements or match criteria for the advanced recipe, one
7518 * structure per protocol header
7519 * @lkups_cnt: number of protocols
7520 * @rinfo: other information regarding the rule e.g. priority and action info
7521 * @rid: return the recipe ID of the recipe created
7523 static enum ice_status
7524 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7525 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7527 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7528 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7529 struct ice_prot_lkup_ext *lkup_exts;
7530 struct ice_recp_grp_entry *r_entry;
7531 struct ice_sw_fv_list_entry *fvit;
7532 struct ice_recp_grp_entry *r_tmp;
7533 struct ice_sw_fv_list_entry *tmp;
7534 enum ice_status status = ICE_SUCCESS;
7535 struct ice_sw_recipe *rm;
7538 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7539 return ICE_ERR_PARAM;
7541 lkup_exts = (struct ice_prot_lkup_ext *)
7542 ice_malloc(hw, sizeof(*lkup_exts));
7544 return ICE_ERR_NO_MEMORY;
7546 /* Determine the number of words to be matched and if it exceeds a
7547 * recipe's restrictions
7549 for (i = 0; i < lkups_cnt; i++) {
7552 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7553 status = ICE_ERR_CFG;
7554 goto err_free_lkup_exts;
7557 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7559 status = ICE_ERR_CFG;
7560 goto err_free_lkup_exts;
7564 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7566 status = ICE_ERR_NO_MEMORY;
7567 goto err_free_lkup_exts;
7570 /* Get field vectors that contain fields extracted from all the protocol
7571 * headers being programmed.
7573 INIT_LIST_HEAD(&rm->fv_list);
7574 INIT_LIST_HEAD(&rm->rg_list);
7576 /* Get bitmap of field vectors (profiles) that are compatible with the
7577 * rule request; only these will be searched in the subsequent call to
7580 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7582 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7586 /* Create any special protocol/offset pairs, such as looking at tunnel
7587 * bits by extracting metadata
7589 status = ice_add_special_words(rinfo, lkup_exts);
7591 goto err_free_lkup_exts;
7593 /* Group match words into recipes using preferred recipe grouping
7596 status = ice_create_recipe_group(hw, rm, lkup_exts);
7600 /* set the recipe priority if specified */
7601 rm->priority = (u8)rinfo->priority;
7603 /* Find offsets from the field vector. Pick the first one for all the
7606 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7610 /* An empty FV list means to use all the profiles returned in the
7613 if (LIST_EMPTY(&rm->fv_list)) {
7616 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7617 struct ice_sw_fv_list_entry *fvl;
7619 fvl = (struct ice_sw_fv_list_entry *)
7620 ice_malloc(hw, sizeof(*fvl));
7624 fvl->profile_id = j;
7625 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7629 /* get bitmap of all profiles the recipe will be associated with */
7630 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7631 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7633 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7634 ice_set_bit((u16)fvit->profile_id, profiles);
7637 /* Look for a recipe which matches our requested fv / mask list */
7638 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7639 if (*rid < ICE_MAX_NUM_RECIPES)
7640 /* Success if found a recipe that match the existing criteria */
7643 rm->tun_type = rinfo->tun_type;
7644 /* Recipe we need does not exist, add a recipe */
7645 status = ice_add_sw_recipe(hw, rm, profiles);
7649 /* Associate all the recipes created with all the profiles in the
7650 * common field vector.
7652 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7654 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7657 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7658 (u8 *)r_bitmap, NULL);
7662 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7663 ICE_MAX_NUM_RECIPES);
7664 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7668 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7671 ice_release_change_lock(hw);
7676 /* Update profile to recipe bitmap array */
7677 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7678 ICE_MAX_NUM_RECIPES);
7680 /* Update recipe to profile bitmap array */
7681 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7682 ice_set_bit((u16)fvit->profile_id,
7683 recipe_to_profile[j]);
7686 *rid = rm->root_rid;
7687 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7688 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7690 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7691 ice_recp_grp_entry, l_entry) {
7692 LIST_DEL(&r_entry->l_entry);
7693 ice_free(hw, r_entry);
7696 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7698 LIST_DEL(&fvit->list_entry);
7703 ice_free(hw, rm->root_buf);
7708 ice_free(hw, lkup_exts);
7714 * ice_find_dummy_packet - find dummy packet by tunnel type
7716 * @lkups: lookup elements or match criteria for the advanced recipe, one
7717 * structure per protocol header
7718 * @lkups_cnt: number of protocols
7719 * @tun_type: tunnel type from the match criteria
7720 * @pkt: dummy packet to fill according to filter match criteria
7721 * @pkt_len: packet length of dummy packet
7722 * @offsets: pointer to receive the pointer to the offsets for the packet
7725 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7726 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7728 const struct ice_dummy_pkt_offsets **offsets)
7730 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7734 for (i = 0; i < lkups_cnt; i++) {
7735 if (lkups[i].type == ICE_UDP_ILOS)
7737 else if (lkups[i].type == ICE_TCP_IL)
7739 else if (lkups[i].type == ICE_IPV6_OFOS)
7741 else if (lkups[i].type == ICE_VLAN_OFOS)
7743 else if (lkups[i].type == ICE_IPV4_OFOS &&
7744 lkups[i].h_u.ipv4_hdr.protocol ==
7745 ICE_IPV4_NVGRE_PROTO_ID &&
7746 lkups[i].m_u.ipv4_hdr.protocol ==
7749 else if (lkups[i].type == ICE_PPPOE &&
7750 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7751 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7752 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7755 else if (lkups[i].type == ICE_ETYPE_OL &&
7756 lkups[i].h_u.ethertype.ethtype_id ==
7757 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7758 lkups[i].m_u.ethertype.ethtype_id ==
7761 else if (lkups[i].type == ICE_IPV4_IL &&
7762 lkups[i].h_u.ipv4_hdr.protocol ==
7764 lkups[i].m_u.ipv4_hdr.protocol ==
7769 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7770 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7771 *pkt = dummy_qinq_ipv6_pkt;
7772 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7773 *offsets = dummy_qinq_ipv6_packet_offsets;
7775 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7776 tun_type == ICE_NON_TUN_QINQ) {
7777 *pkt = dummy_qinq_ipv4_pkt;
7778 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7779 *offsets = dummy_qinq_ipv4_packet_offsets;
7783 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7784 *pkt = dummy_qinq_pppoe_ipv6_packet;
7785 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7786 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7788 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7789 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7790 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7791 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7793 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7794 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7795 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7796 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7797 *offsets = dummy_qinq_pppoe_packet_offsets;
7801 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7802 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7803 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7804 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7806 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7807 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7808 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7809 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7811 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7812 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7813 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7814 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7816 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7817 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7818 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7819 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7821 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7822 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7823 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7824 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7826 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7827 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7828 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7829 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7833 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7834 *pkt = dummy_ipv4_esp_pkt;
7835 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7836 *offsets = dummy_ipv4_esp_packet_offsets;
7840 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7841 *pkt = dummy_ipv6_esp_pkt;
7842 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7843 *offsets = dummy_ipv6_esp_packet_offsets;
7847 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7848 *pkt = dummy_ipv4_ah_pkt;
7849 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7850 *offsets = dummy_ipv4_ah_packet_offsets;
7854 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7855 *pkt = dummy_ipv6_ah_pkt;
7856 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7857 *offsets = dummy_ipv6_ah_packet_offsets;
7861 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7862 *pkt = dummy_ipv4_nat_pkt;
7863 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7864 *offsets = dummy_ipv4_nat_packet_offsets;
7868 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7869 *pkt = dummy_ipv6_nat_pkt;
7870 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7871 *offsets = dummy_ipv6_nat_packet_offsets;
7875 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7876 *pkt = dummy_ipv4_l2tpv3_pkt;
7877 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7878 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7882 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7883 *pkt = dummy_ipv6_l2tpv3_pkt;
7884 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7885 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7889 if (tun_type == ICE_SW_TUN_GTP) {
7890 *pkt = dummy_udp_gtp_packet;
7891 *pkt_len = sizeof(dummy_udp_gtp_packet);
7892 *offsets = dummy_udp_gtp_packet_offsets;
7896 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7897 *pkt = dummy_pppoe_ipv6_packet;
7898 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7899 *offsets = dummy_pppoe_packet_offsets;
7901 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7902 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7903 *pkt = dummy_pppoe_ipv4_packet;
7904 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7905 *offsets = dummy_pppoe_packet_offsets;
7909 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7910 *pkt = dummy_pppoe_ipv4_packet;
7911 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7912 *offsets = dummy_pppoe_packet_ipv4_offsets;
7916 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7917 *pkt = dummy_pppoe_ipv4_tcp_packet;
7918 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7919 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7923 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7924 *pkt = dummy_pppoe_ipv4_udp_packet;
7925 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7926 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7930 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7931 *pkt = dummy_pppoe_ipv6_packet;
7932 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7933 *offsets = dummy_pppoe_packet_ipv6_offsets;
7937 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7938 *pkt = dummy_pppoe_ipv6_tcp_packet;
7939 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7940 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7944 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7945 *pkt = dummy_pppoe_ipv6_udp_packet;
7946 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7947 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7951 if (tun_type == ICE_SW_IPV4_TCP) {
7952 *pkt = dummy_tcp_packet;
7953 *pkt_len = sizeof(dummy_tcp_packet);
7954 *offsets = dummy_tcp_packet_offsets;
7958 if (tun_type == ICE_SW_IPV4_UDP) {
7959 *pkt = dummy_udp_packet;
7960 *pkt_len = sizeof(dummy_udp_packet);
7961 *offsets = dummy_udp_packet_offsets;
7965 if (tun_type == ICE_SW_IPV6_TCP) {
7966 *pkt = dummy_tcp_ipv6_packet;
7967 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7968 *offsets = dummy_tcp_ipv6_packet_offsets;
7972 if (tun_type == ICE_SW_IPV6_UDP) {
7973 *pkt = dummy_udp_ipv6_packet;
7974 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7975 *offsets = dummy_udp_ipv6_packet_offsets;
7979 if (tun_type == ICE_ALL_TUNNELS) {
7980 *pkt = dummy_gre_udp_packet;
7981 *pkt_len = sizeof(dummy_gre_udp_packet);
7982 *offsets = dummy_gre_udp_packet_offsets;
7986 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7988 *pkt = dummy_gre_tcp_packet;
7989 *pkt_len = sizeof(dummy_gre_tcp_packet);
7990 *offsets = dummy_gre_tcp_packet_offsets;
7994 *pkt = dummy_gre_udp_packet;
7995 *pkt_len = sizeof(dummy_gre_udp_packet);
7996 *offsets = dummy_gre_udp_packet_offsets;
8000 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8001 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8002 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8003 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8005 *pkt = dummy_udp_tun_tcp_packet;
8006 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8007 *offsets = dummy_udp_tun_tcp_packet_offsets;
8011 *pkt = dummy_udp_tun_udp_packet;
8012 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8013 *offsets = dummy_udp_tun_udp_packet_offsets;
8019 *pkt = dummy_vlan_udp_packet;
8020 *pkt_len = sizeof(dummy_vlan_udp_packet);
8021 *offsets = dummy_vlan_udp_packet_offsets;
8024 *pkt = dummy_udp_packet;
8025 *pkt_len = sizeof(dummy_udp_packet);
8026 *offsets = dummy_udp_packet_offsets;
8028 } else if (udp && ipv6) {
8030 *pkt = dummy_vlan_udp_ipv6_packet;
8031 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8032 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8035 *pkt = dummy_udp_ipv6_packet;
8036 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8037 *offsets = dummy_udp_ipv6_packet_offsets;
8039 } else if ((tcp && ipv6) || ipv6) {
8041 *pkt = dummy_vlan_tcp_ipv6_packet;
8042 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8043 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8046 *pkt = dummy_tcp_ipv6_packet;
8047 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8048 *offsets = dummy_tcp_ipv6_packet_offsets;
8053 *pkt = dummy_vlan_tcp_packet;
8054 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8055 *offsets = dummy_vlan_tcp_packet_offsets;
8057 *pkt = dummy_tcp_packet;
8058 *pkt_len = sizeof(dummy_tcp_packet);
8059 *offsets = dummy_tcp_packet_offsets;
8064 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8066 * @lkups: lookup elements or match criteria for the advanced recipe, one
8067 * structure per protocol header
8068 * @lkups_cnt: number of protocols
8069 * @s_rule: stores rule information from the match criteria
8070 * @dummy_pkt: dummy packet to fill according to filter match criteria
8071 * @pkt_len: packet length of dummy packet
8072 * @offsets: offset info for the dummy packet
8074 static enum ice_status
8075 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8076 struct ice_aqc_sw_rules_elem *s_rule,
8077 const u8 *dummy_pkt, u16 pkt_len,
8078 const struct ice_dummy_pkt_offsets *offsets)
8083 /* Start with a packet with a pre-defined/dummy content. Then, fill
8084 * in the header values to be looked up or matched.
8086 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8088 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8090 for (i = 0; i < lkups_cnt; i++) {
8091 enum ice_protocol_type type;
8092 u16 offset = 0, len = 0, j;
8095 /* find the start of this layer; it should be found since this
8096 * was already checked when search for the dummy packet
8098 type = lkups[i].type;
8099 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8100 if (type == offsets[j].type) {
8101 offset = offsets[j].offset;
8106 /* this should never happen in a correct calling sequence */
8108 return ICE_ERR_PARAM;
8110 switch (lkups[i].type) {
8113 len = sizeof(struct ice_ether_hdr);
8116 len = sizeof(struct ice_ethtype_hdr);
8120 len = sizeof(struct ice_vlan_hdr);
8124 len = sizeof(struct ice_ipv4_hdr);
8128 len = sizeof(struct ice_ipv6_hdr);
8133 len = sizeof(struct ice_l4_hdr);
8136 len = sizeof(struct ice_sctp_hdr);
8139 len = sizeof(struct ice_nvgre);
8144 len = sizeof(struct ice_udp_tnl_hdr);
8148 case ICE_GTP_NO_PAY:
8149 len = sizeof(struct ice_udp_gtp_hdr);
8152 len = sizeof(struct ice_pppoe_hdr);
8155 len = sizeof(struct ice_esp_hdr);
8158 len = sizeof(struct ice_nat_t_hdr);
8161 len = sizeof(struct ice_ah_hdr);
8164 len = sizeof(struct ice_l2tpv3_sess_hdr);
8167 return ICE_ERR_PARAM;
8170 /* the length should be a word multiple */
8171 if (len % ICE_BYTES_PER_WORD)
8174 /* We have the offset to the header start, the length, the
8175 * caller's header values and mask. Use this information to
8176 * copy the data into the dummy packet appropriately based on
8177 * the mask. Note that we need to only write the bits as
8178 * indicated by the mask to make sure we don't improperly write
8179 * over any significant packet data.
8181 for (j = 0; j < len / sizeof(u16); j++)
8182 if (((u16 *)&lkups[i].m_u)[j])
8183 ((u16 *)(pkt + offset))[j] =
8184 (((u16 *)(pkt + offset))[j] &
8185 ~((u16 *)&lkups[i].m_u)[j]) |
8186 (((u16 *)&lkups[i].h_u)[j] &
8187 ((u16 *)&lkups[i].m_u)[j]);
8190 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8196 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8197 * @hw: pointer to the hardware structure
8198 * @tun_type: tunnel type
8199 * @pkt: dummy packet to fill in
8200 * @offsets: offset info for the dummy packet
8202 static enum ice_status
8203 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8204 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8209 case ICE_SW_TUN_AND_NON_TUN:
8210 case ICE_SW_TUN_VXLAN_GPE:
8211 case ICE_SW_TUN_VXLAN:
8212 case ICE_SW_TUN_VXLAN_VLAN:
8213 case ICE_SW_TUN_UDP:
8214 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8218 case ICE_SW_TUN_GENEVE:
8219 case ICE_SW_TUN_GENEVE_VLAN:
8220 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8225 /* Nothing needs to be done for this tunnel type */
8229 /* Find the outer UDP protocol header and insert the port number */
8230 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8231 if (offsets[i].type == ICE_UDP_OF) {
8232 struct ice_l4_hdr *hdr;
8235 offset = offsets[i].offset;
8236 hdr = (struct ice_l4_hdr *)&pkt[offset];
8237 hdr->dst_port = CPU_TO_BE16(open_port);
8247 * ice_find_adv_rule_entry - Search a rule entry
8248 * @hw: pointer to the hardware structure
8249 * @lkups: lookup elements or match criteria for the advanced recipe, one
8250 * structure per protocol header
8251 * @lkups_cnt: number of protocols
8252 * @recp_id: recipe ID for which we are finding the rule
8253 * @rinfo: other information regarding the rule e.g. priority and action info
8255 * Helper function to search for a given advance rule entry
8256 * Returns pointer to entry storing the rule if found
8258 static struct ice_adv_fltr_mgmt_list_entry *
8259 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8260 u16 lkups_cnt, u16 recp_id,
8261 struct ice_adv_rule_info *rinfo)
8263 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8264 struct ice_switch_info *sw = hw->switch_info;
8267 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8268 ice_adv_fltr_mgmt_list_entry, list_entry) {
8269 bool lkups_matched = true;
8271 if (lkups_cnt != list_itr->lkups_cnt)
8273 for (i = 0; i < list_itr->lkups_cnt; i++)
8274 if (memcmp(&list_itr->lkups[i], &lkups[i],
8276 lkups_matched = false;
8279 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8280 rinfo->tun_type == list_itr->rule_info.tun_type &&
8288 * ice_adv_add_update_vsi_list
8289 * @hw: pointer to the hardware structure
8290 * @m_entry: pointer to current adv filter management list entry
8291 * @cur_fltr: filter information from the book keeping entry
8292 * @new_fltr: filter information with the new VSI to be added
8294 * Call AQ command to add or update previously created VSI list with new VSI.
8296 * Helper function to do book keeping associated with adding filter information
8297 * The algorithm to do the booking keeping is described below :
8298 * When a VSI needs to subscribe to a given advanced filter
8299 * if only one VSI has been added till now
8300 * Allocate a new VSI list and add two VSIs
8301 * to this list using switch rule command
8302 * Update the previously created switch rule with the
8303 * newly created VSI list ID
8304 * if a VSI list was previously created
8305 * Add the new VSI to the previously created VSI list set
8306 * using the update switch rule command
8308 static enum ice_status
8309 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8310 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8311 struct ice_adv_rule_info *cur_fltr,
8312 struct ice_adv_rule_info *new_fltr)
8314 enum ice_status status;
8315 u16 vsi_list_id = 0;
8317 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8318 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8319 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8320 return ICE_ERR_NOT_IMPL;
8322 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8323 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8324 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8325 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8326 return ICE_ERR_NOT_IMPL;
8328 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8329 /* Only one entry existed in the mapping and it was not already
8330 * a part of a VSI list. So, create a VSI list with the old and
8333 struct ice_fltr_info tmp_fltr;
8334 u16 vsi_handle_arr[2];
8336 /* A rule already exists with the new VSI being added */
8337 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8338 new_fltr->sw_act.fwd_id.hw_vsi_id)
8339 return ICE_ERR_ALREADY_EXISTS;
8341 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8342 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8343 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8349 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8350 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8351 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8352 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8353 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8354 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8356 /* Update the previous switch rule of "forward to VSI" to
8359 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8363 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8364 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8365 m_entry->vsi_list_info =
8366 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8369 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8371 if (!m_entry->vsi_list_info)
8374 /* A rule already exists with the new VSI being added */
8375 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8378 /* Update the previously created VSI list set with
8379 * the new VSI ID passed in
8381 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8383 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8385 ice_aqc_opc_update_sw_rules,
8387 /* update VSI list mapping info with new VSI ID */
8389 ice_set_bit(vsi_handle,
8390 m_entry->vsi_list_info->vsi_map);
8393 m_entry->vsi_count++;
8398 * ice_add_adv_rule - helper function to create an advanced switch rule
8399 * @hw: pointer to the hardware structure
8400 * @lkups: information on the words that needs to be looked up. All words
8401 * together makes one recipe
8402 * @lkups_cnt: num of entries in the lkups array
8403 * @rinfo: other information related to the rule that needs to be programmed
8404 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8405 * ignored is case of error.
8407 * This function can program only 1 rule at a time. The lkups is used to
8408 * describe the all the words that forms the "lookup" portion of the recipe.
8409 * These words can span multiple protocols. Callers to this function need to
8410 * pass in a list of protocol headers with lookup information along and mask
8411 * that determines which words are valid from the given protocol header.
8412 * rinfo describes other information related to this rule such as forwarding
8413 * IDs, priority of this rule, etc.
8416 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8417 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8418 struct ice_rule_query_data *added_entry)
8420 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8421 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8422 const struct ice_dummy_pkt_offsets *pkt_offsets;
8423 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8424 struct LIST_HEAD_TYPE *rule_head;
8425 struct ice_switch_info *sw;
8426 enum ice_status status;
8427 const u8 *pkt = NULL;
8433 /* Initialize profile to result index bitmap */
8434 if (!hw->switch_info->prof_res_bm_init) {
8435 hw->switch_info->prof_res_bm_init = 1;
8436 ice_init_prof_result_bm(hw);
8439 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8440 if (!prof_rule && !lkups_cnt)
8441 return ICE_ERR_PARAM;
8443 /* get # of words we need to match */
8445 for (i = 0; i < lkups_cnt; i++) {
8448 ptr = (u16 *)&lkups[i].m_u;
8449 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8455 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8456 return ICE_ERR_PARAM;
8458 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8459 return ICE_ERR_PARAM;
8462 /* make sure that we can locate a dummy packet */
8463 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8466 status = ICE_ERR_PARAM;
8467 goto err_ice_add_adv_rule;
8470 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8471 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8472 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8473 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8476 vsi_handle = rinfo->sw_act.vsi_handle;
8477 if (!ice_is_vsi_valid(hw, vsi_handle))
8478 return ICE_ERR_PARAM;
8480 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8481 rinfo->sw_act.fwd_id.hw_vsi_id =
8482 ice_get_hw_vsi_num(hw, vsi_handle);
8483 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8484 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8486 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8489 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8491 /* we have to add VSI to VSI_LIST and increment vsi_count.
8492 * Also Update VSI list so that we can change forwarding rule
8493 * if the rule already exists, we will check if it exists with
8494 * same vsi_id, if not then add it to the VSI list if it already
8495 * exists if not then create a VSI list and add the existing VSI
8496 * ID and the new VSI ID to the list
8497 * We will add that VSI to the list
8499 status = ice_adv_add_update_vsi_list(hw, m_entry,
8500 &m_entry->rule_info,
8503 added_entry->rid = rid;
8504 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8505 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8509 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8510 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8512 return ICE_ERR_NO_MEMORY;
8513 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8514 switch (rinfo->sw_act.fltr_act) {
8515 case ICE_FWD_TO_VSI:
8516 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8517 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8518 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8521 act |= ICE_SINGLE_ACT_TO_Q;
8522 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8523 ICE_SINGLE_ACT_Q_INDEX_M;
8525 case ICE_FWD_TO_QGRP:
8526 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8527 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8528 act |= ICE_SINGLE_ACT_TO_Q;
8529 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8530 ICE_SINGLE_ACT_Q_INDEX_M;
8531 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8532 ICE_SINGLE_ACT_Q_REGION_M;
8534 case ICE_DROP_PACKET:
8535 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8536 ICE_SINGLE_ACT_VALID_BIT;
8539 status = ICE_ERR_CFG;
8540 goto err_ice_add_adv_rule;
8543 /* set the rule LOOKUP type based on caller specified 'RX'
8544 * instead of hardcoding it to be either LOOKUP_TX/RX
8546 * for 'RX' set the source to be the port number
8547 * for 'TX' set the source to be the source HW VSI number (determined
8551 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8552 s_rule->pdata.lkup_tx_rx.src =
8553 CPU_TO_LE16(hw->port_info->lport);
8555 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8556 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8559 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8560 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8562 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8563 pkt_len, pkt_offsets);
8565 goto err_ice_add_adv_rule;
8567 if (rinfo->tun_type != ICE_NON_TUN &&
8568 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8569 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8570 s_rule->pdata.lkup_tx_rx.hdr,
8573 goto err_ice_add_adv_rule;
8576 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8577 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8580 goto err_ice_add_adv_rule;
8581 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8582 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8584 status = ICE_ERR_NO_MEMORY;
8585 goto err_ice_add_adv_rule;
8588 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8589 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8590 ICE_NONDMA_TO_NONDMA);
8591 if (!adv_fltr->lkups && !prof_rule) {
8592 status = ICE_ERR_NO_MEMORY;
8593 goto err_ice_add_adv_rule;
8596 adv_fltr->lkups_cnt = lkups_cnt;
8597 adv_fltr->rule_info = *rinfo;
8598 adv_fltr->rule_info.fltr_rule_id =
8599 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8600 sw = hw->switch_info;
8601 sw->recp_list[rid].adv_rule = true;
8602 rule_head = &sw->recp_list[rid].filt_rules;
8604 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8605 adv_fltr->vsi_count = 1;
8607 /* Add rule entry to book keeping list */
8608 LIST_ADD(&adv_fltr->list_entry, rule_head);
8610 added_entry->rid = rid;
8611 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8612 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8614 err_ice_add_adv_rule:
8615 if (status && adv_fltr) {
8616 ice_free(hw, adv_fltr->lkups);
8617 ice_free(hw, adv_fltr);
8620 ice_free(hw, s_rule);
8626 * ice_adv_rem_update_vsi_list
8627 * @hw: pointer to the hardware structure
8628 * @vsi_handle: VSI handle of the VSI to remove
8629 * @fm_list: filter management entry for which the VSI list management needs to
8632 static enum ice_status
8633 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8634 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8636 struct ice_vsi_list_map_info *vsi_list_info;
8637 enum ice_sw_lkup_type lkup_type;
8638 enum ice_status status;
8641 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8642 fm_list->vsi_count == 0)
8643 return ICE_ERR_PARAM;
8645 /* A rule with the VSI being removed does not exist */
8646 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8647 return ICE_ERR_DOES_NOT_EXIST;
8649 lkup_type = ICE_SW_LKUP_LAST;
8650 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8651 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8652 ice_aqc_opc_update_sw_rules,
8657 fm_list->vsi_count--;
8658 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8659 vsi_list_info = fm_list->vsi_list_info;
8660 if (fm_list->vsi_count == 1) {
8661 struct ice_fltr_info tmp_fltr;
8664 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8666 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8667 return ICE_ERR_OUT_OF_RANGE;
8669 /* Make sure VSI list is empty before removing it below */
8670 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8672 ice_aqc_opc_update_sw_rules,
8677 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8678 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8679 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8680 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8681 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8682 tmp_fltr.fwd_id.hw_vsi_id =
8683 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8684 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8685 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8686 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8688 /* Update the previous switch rule of "MAC forward to VSI" to
8689 * "MAC fwd to VSI list"
8691 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8693 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8694 tmp_fltr.fwd_id.hw_vsi_id, status);
8697 fm_list->vsi_list_info->ref_cnt--;
8699 /* Remove the VSI list since it is no longer used */
8700 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8702 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8703 vsi_list_id, status);
8707 LIST_DEL(&vsi_list_info->list_entry);
8708 ice_free(hw, vsi_list_info);
8709 fm_list->vsi_list_info = NULL;
8716 * ice_rem_adv_rule - removes existing advanced switch rule
8717 * @hw: pointer to the hardware structure
8718 * @lkups: information on the words that needs to be looked up. All words
8719 * together makes one recipe
8720 * @lkups_cnt: num of entries in the lkups array
8721 * @rinfo: Its the pointer to the rule information for the rule
8723 * This function can be used to remove 1 rule at a time. The lkups is
8724 * used to describe all the words that forms the "lookup" portion of the
8725 * rule. These words can span multiple protocols. Callers to this function
8726 * need to pass in a list of protocol headers with lookup information along
8727 * and mask that determines which words are valid from the given protocol
8728 * header. rinfo describes other information related to this rule such as
8729 * forwarding IDs, priority of this rule, etc.
8732 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8733 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8735 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8736 struct ice_prot_lkup_ext lkup_exts;
8737 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8738 enum ice_status status = ICE_SUCCESS;
8739 bool remove_rule = false;
8740 u16 i, rid, vsi_handle;
8742 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8743 for (i = 0; i < lkups_cnt; i++) {
8746 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8749 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8754 /* Create any special protocol/offset pairs, such as looking at tunnel
8755 * bits by extracting metadata
8757 status = ice_add_special_words(rinfo, &lkup_exts);
8761 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8762 /* If did not find a recipe that match the existing criteria */
8763 if (rid == ICE_MAX_NUM_RECIPES)
8764 return ICE_ERR_PARAM;
8766 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8767 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8768 /* the rule is already removed */
8771 ice_acquire_lock(rule_lock);
8772 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8774 } else if (list_elem->vsi_count > 1) {
8775 remove_rule = false;
8776 vsi_handle = rinfo->sw_act.vsi_handle;
8777 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8779 vsi_handle = rinfo->sw_act.vsi_handle;
8780 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8782 ice_release_lock(rule_lock);
8785 if (list_elem->vsi_count == 0)
8788 ice_release_lock(rule_lock);
8790 struct ice_aqc_sw_rules_elem *s_rule;
8793 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8794 s_rule = (struct ice_aqc_sw_rules_elem *)
8795 ice_malloc(hw, rule_buf_sz);
8797 return ICE_ERR_NO_MEMORY;
8798 s_rule->pdata.lkup_tx_rx.act = 0;
8799 s_rule->pdata.lkup_tx_rx.index =
8800 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8801 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8802 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8804 ice_aqc_opc_remove_sw_rules, NULL);
8805 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8806 struct ice_switch_info *sw = hw->switch_info;
8808 ice_acquire_lock(rule_lock);
8809 LIST_DEL(&list_elem->list_entry);
8810 ice_free(hw, list_elem->lkups);
8811 ice_free(hw, list_elem);
8812 ice_release_lock(rule_lock);
8813 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8814 sw->recp_list[rid].adv_rule = false;
8816 ice_free(hw, s_rule);
8822 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8823 * @hw: pointer to the hardware structure
8824 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8826 * This function is used to remove 1 rule at a time. The removal is based on
8827 * the remove_entry parameter. This function will remove rule for a given
8828 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8831 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8832 struct ice_rule_query_data *remove_entry)
8834 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8835 struct LIST_HEAD_TYPE *list_head;
8836 struct ice_adv_rule_info rinfo;
8837 struct ice_switch_info *sw;
8839 sw = hw->switch_info;
8840 if (!sw->recp_list[remove_entry->rid].recp_created)
8841 return ICE_ERR_PARAM;
8842 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8843 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8845 if (list_itr->rule_info.fltr_rule_id ==
8846 remove_entry->rule_id) {
8847 rinfo = list_itr->rule_info;
8848 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8849 return ice_rem_adv_rule(hw, list_itr->lkups,
8850 list_itr->lkups_cnt, &rinfo);
8853 /* either list is empty or unable to find rule */
8854 return ICE_ERR_DOES_NOT_EXIST;
8858 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8860 * @hw: pointer to the hardware structure
8861 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8863 * This function is used to remove all the rules for a given VSI and as soon
8864 * as removing a rule fails, it will return immediately with the error code,
8865 * else it will return ICE_SUCCESS
8867 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8869 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8870 struct ice_vsi_list_map_info *map_info;
8871 struct LIST_HEAD_TYPE *list_head;
8872 struct ice_adv_rule_info rinfo;
8873 struct ice_switch_info *sw;
8874 enum ice_status status;
8877 sw = hw->switch_info;
8878 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8879 if (!sw->recp_list[rid].recp_created)
8881 if (!sw->recp_list[rid].adv_rule)
8884 list_head = &sw->recp_list[rid].filt_rules;
8885 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8886 ice_adv_fltr_mgmt_list_entry,
8888 rinfo = list_itr->rule_info;
8890 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8891 map_info = list_itr->vsi_list_info;
8895 if (!ice_is_bit_set(map_info->vsi_map,
8898 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8902 rinfo.sw_act.vsi_handle = vsi_handle;
8903 status = ice_rem_adv_rule(hw, list_itr->lkups,
8904 list_itr->lkups_cnt, &rinfo);
8914 * ice_replay_fltr - Replay all the filters stored by a specific list head
8915 * @hw: pointer to the hardware structure
8916 * @list_head: list for which filters needs to be replayed
8917 * @recp_id: Recipe ID for which rules need to be replayed
8919 static enum ice_status
8920 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8922 struct ice_fltr_mgmt_list_entry *itr;
8923 enum ice_status status = ICE_SUCCESS;
8924 struct ice_sw_recipe *recp_list;
8925 u8 lport = hw->port_info->lport;
8926 struct LIST_HEAD_TYPE l_head;
8928 if (LIST_EMPTY(list_head))
8931 recp_list = &hw->switch_info->recp_list[recp_id];
8932 /* Move entries from the given list_head to a temporary l_head so that
8933 * they can be replayed. Otherwise when trying to re-add the same
8934 * filter, the function will return already exists
8936 LIST_REPLACE_INIT(list_head, &l_head);
8938 /* Mark the given list_head empty by reinitializing it so filters
8939 * could be added again by *handler
8941 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8943 struct ice_fltr_list_entry f_entry;
8946 f_entry.fltr_info = itr->fltr_info;
8947 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8948 status = ice_add_rule_internal(hw, recp_list, lport,
8950 if (status != ICE_SUCCESS)
8955 /* Add a filter per VSI separately */
8956 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8958 if (!ice_is_vsi_valid(hw, vsi_handle))
8961 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8962 f_entry.fltr_info.vsi_handle = vsi_handle;
8963 f_entry.fltr_info.fwd_id.hw_vsi_id =
8964 ice_get_hw_vsi_num(hw, vsi_handle);
8965 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8966 if (recp_id == ICE_SW_LKUP_VLAN)
8967 status = ice_add_vlan_internal(hw, recp_list,
8970 status = ice_add_rule_internal(hw, recp_list,
8973 if (status != ICE_SUCCESS)
8978 /* Clear the filter management list */
8979 ice_rem_sw_rule_info(hw, &l_head);
8984 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8985 * @hw: pointer to the hardware structure
8987 * NOTE: This function does not clean up partially added filters on error.
8988 * It is up to caller of the function to issue a reset or fail early.
8990 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8992 struct ice_switch_info *sw = hw->switch_info;
8993 enum ice_status status = ICE_SUCCESS;
8996 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8997 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8999 status = ice_replay_fltr(hw, i, head);
9000 if (status != ICE_SUCCESS)
9007 * ice_replay_vsi_fltr - Replay filters for requested VSI
9008 * @hw: pointer to the hardware structure
9009 * @pi: pointer to port information structure
9010 * @sw: pointer to switch info struct for which function replays filters
9011 * @vsi_handle: driver VSI handle
9012 * @recp_id: Recipe ID for which rules need to be replayed
9013 * @list_head: list for which filters need to be replayed
9015 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9016 * It is required to pass valid VSI handle.
9018 static enum ice_status
9019 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9020 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9021 struct LIST_HEAD_TYPE *list_head)
9023 struct ice_fltr_mgmt_list_entry *itr;
9024 enum ice_status status = ICE_SUCCESS;
9025 struct ice_sw_recipe *recp_list;
9028 if (LIST_EMPTY(list_head))
9030 recp_list = &sw->recp_list[recp_id];
9031 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9033 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9035 struct ice_fltr_list_entry f_entry;
9037 f_entry.fltr_info = itr->fltr_info;
9038 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9039 itr->fltr_info.vsi_handle == vsi_handle) {
9040 /* update the src in case it is VSI num */
9041 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9042 f_entry.fltr_info.src = hw_vsi_id;
9043 status = ice_add_rule_internal(hw, recp_list,
9046 if (status != ICE_SUCCESS)
9050 if (!itr->vsi_list_info ||
9051 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9053 /* Clearing it so that the logic can add it back */
9054 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9055 f_entry.fltr_info.vsi_handle = vsi_handle;
9056 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9057 /* update the src in case it is VSI num */
9058 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9059 f_entry.fltr_info.src = hw_vsi_id;
9060 if (recp_id == ICE_SW_LKUP_VLAN)
9061 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9063 status = ice_add_rule_internal(hw, recp_list,
9066 if (status != ICE_SUCCESS)
9074 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9075 * @hw: pointer to the hardware structure
9076 * @vsi_handle: driver VSI handle
9077 * @list_head: list for which filters need to be replayed
9079 * Replay the advanced rule for the given VSI.
9081 static enum ice_status
9082 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9083 struct LIST_HEAD_TYPE *list_head)
9085 struct ice_rule_query_data added_entry = { 0 };
9086 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9087 enum ice_status status = ICE_SUCCESS;
9089 if (LIST_EMPTY(list_head))
9091 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9093 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9094 u16 lk_cnt = adv_fltr->lkups_cnt;
9096 if (vsi_handle != rinfo->sw_act.vsi_handle)
9098 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9107 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9108 * @hw: pointer to the hardware structure
9109 * @pi: pointer to port information structure
9110 * @vsi_handle: driver VSI handle
9112 * Replays filters for requested VSI via vsi_handle.
9115 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9118 struct ice_switch_info *sw = hw->switch_info;
9119 enum ice_status status;
9122 /* Update the recipes that were created */
9123 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9124 struct LIST_HEAD_TYPE *head;
9126 head = &sw->recp_list[i].filt_replay_rules;
9127 if (!sw->recp_list[i].adv_rule)
9128 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9131 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9132 if (status != ICE_SUCCESS)
9140 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
9141 * @hw: pointer to the HW struct
9142 * @sw: pointer to switch info struct for which function removes filters
9144 * Deletes the filter replay rules for given switch
9146 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9153 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9154 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9155 struct LIST_HEAD_TYPE *l_head;
9157 l_head = &sw->recp_list[i].filt_replay_rules;
9158 if (!sw->recp_list[i].adv_rule)
9159 ice_rem_sw_rule_info(hw, l_head);
9161 ice_rem_adv_rule_info(hw, l_head);
9167 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9168 * @hw: pointer to the HW struct
9170 * Deletes the filter replay rules.
9172 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9174 ice_rm_sw_replay_rule_info(hw, hw->switch_info);