1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV6_ETHER_ID 0x86DD
14 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
15 #define ICE_PPP_IPV6_PROTO_ID 0x0057
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_GTPU_PROFILE 24
18 #define ICE_ETH_P_8021Q 0x8100
19 #define ICE_MPLS_ETHER_ID 0x8847
21 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
22 * struct to configure any switch filter rules.
23 * {DA (6 bytes), SA(6 bytes),
24 * Ether type (2 bytes for header without VLAN tag) OR
25 * VLAN tag (4 bytes for header with VLAN tag) }
27 * Word on Hardcoded values
28 * byte 0 = 0x2: to identify it as locally administered DA MAC
29 * byte 6 = 0x2: to identify it as locally administered SA MAC
30 * byte 12 = 0x81 & byte 13 = 0x00:
31 * In case of VLAN filter first two bytes defines ether type (0x8100)
32 * and remaining two bytes are placeholder for programming a given VLAN ID
33 * In case of Ether type filter it is treated as header without VLAN tag
34 * and byte 12 and 13 is used to program a given Ether type instead
36 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
40 struct ice_dummy_pkt_offsets {
41 enum ice_protocol_type type;
42 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
45 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
48 { ICE_IPV4_OFOS, 14 },
53 { ICE_PROTOCOL_LAST, 0 },
56 static const u8 dummy_gre_tcp_packet[] = {
57 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
58 0x00, 0x00, 0x00, 0x00,
59 0x00, 0x00, 0x00, 0x00,
61 0x08, 0x00, /* ICE_ETYPE_OL 12 */
63 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
64 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x2F, 0x00, 0x00,
66 0x00, 0x00, 0x00, 0x00,
67 0x00, 0x00, 0x00, 0x00,
69 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
70 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
77 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
78 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x06, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
86 0x50, 0x02, 0x20, 0x00,
87 0x00, 0x00, 0x00, 0x00
90 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
93 { ICE_IPV4_OFOS, 14 },
98 { ICE_PROTOCOL_LAST, 0 },
101 static const u8 dummy_gre_udp_packet[] = {
102 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
103 0x00, 0x00, 0x00, 0x00,
104 0x00, 0x00, 0x00, 0x00,
106 0x08, 0x00, /* ICE_ETYPE_OL 12 */
108 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x2F, 0x00, 0x00,
111 0x00, 0x00, 0x00, 0x00,
112 0x00, 0x00, 0x00, 0x00,
114 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
115 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
122 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x11, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00,
128 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
129 0x00, 0x08, 0x00, 0x00,
132 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
134 { ICE_ETYPE_OL, 12 },
135 { ICE_IPV4_OFOS, 14 },
139 { ICE_VXLAN_GPE, 42 },
143 { ICE_PROTOCOL_LAST, 0 },
146 static const u8 dummy_udp_tun_tcp_packet[] = {
147 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
148 0x00, 0x00, 0x00, 0x00,
149 0x00, 0x00, 0x00, 0x00,
151 0x08, 0x00, /* ICE_ETYPE_OL 12 */
153 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
154 0x00, 0x01, 0x00, 0x00,
155 0x40, 0x11, 0x00, 0x00,
156 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x00, 0x00,
159 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
160 0x00, 0x46, 0x00, 0x00,
162 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
163 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
166 0x00, 0x00, 0x00, 0x00,
167 0x00, 0x00, 0x00, 0x00,
170 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
171 0x00, 0x01, 0x00, 0x00,
172 0x40, 0x06, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
179 0x50, 0x02, 0x20, 0x00,
180 0x00, 0x00, 0x00, 0x00
183 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
185 { ICE_ETYPE_OL, 12 },
186 { ICE_IPV4_OFOS, 14 },
190 { ICE_VXLAN_GPE, 42 },
193 { ICE_UDP_ILOS, 84 },
194 { ICE_PROTOCOL_LAST, 0 },
197 static const u8 dummy_udp_tun_udp_packet[] = {
198 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
199 0x00, 0x00, 0x00, 0x00,
200 0x00, 0x00, 0x00, 0x00,
202 0x08, 0x00, /* ICE_ETYPE_OL 12 */
204 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
205 0x00, 0x01, 0x00, 0x00,
206 0x00, 0x11, 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
211 0x00, 0x3a, 0x00, 0x00,
213 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
214 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
217 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00,
221 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
222 0x00, 0x01, 0x00, 0x00,
223 0x00, 0x11, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
228 0x00, 0x08, 0x00, 0x00,
231 /* offset info for MAC + IPv4 + UDP dummy packet */
232 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
234 { ICE_ETYPE_OL, 12 },
235 { ICE_IPV4_OFOS, 14 },
236 { ICE_UDP_ILOS, 34 },
237 { ICE_PROTOCOL_LAST, 0 },
240 /* Dummy packet for MAC + IPv4 + UDP */
241 static const u8 dummy_udp_packet[] = {
242 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
243 0x00, 0x00, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
246 0x08, 0x00, /* ICE_ETYPE_OL 12 */
248 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
249 0x00, 0x01, 0x00, 0x00,
250 0x00, 0x11, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00,
254 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
255 0x00, 0x08, 0x00, 0x00,
257 0x00, 0x00, /* 2 bytes for 4 byte alignment */
260 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
261 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
263 { ICE_VLAN_OFOS, 12 },
264 { ICE_ETYPE_OL, 16 },
265 { ICE_IPV4_OFOS, 18 },
266 { ICE_UDP_ILOS, 38 },
267 { ICE_PROTOCOL_LAST, 0 },
270 /* C-tag (801.1Q), IPv4:UDP dummy packet */
271 static const u8 dummy_vlan_udp_packet[] = {
272 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
273 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
276 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
278 0x08, 0x00, /* ICE_ETYPE_OL 16 */
280 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
281 0x00, 0x01, 0x00, 0x00,
282 0x00, 0x11, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00,
286 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
287 0x00, 0x08, 0x00, 0x00,
289 0x00, 0x00, /* 2 bytes for 4 byte alignment */
292 /* offset info for MAC + IPv4 + TCP dummy packet */
293 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
295 { ICE_ETYPE_OL, 12 },
296 { ICE_IPV4_OFOS, 14 },
298 { ICE_PROTOCOL_LAST, 0 },
301 /* Dummy packet for MAC + IPv4 + TCP */
302 static const u8 dummy_tcp_packet[] = {
303 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
307 0x08, 0x00, /* ICE_ETYPE_OL 12 */
309 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
310 0x00, 0x01, 0x00, 0x00,
311 0x00, 0x06, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
316 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
318 0x50, 0x00, 0x00, 0x00,
319 0x00, 0x00, 0x00, 0x00,
321 0x00, 0x00, /* 2 bytes for 4 byte alignment */
324 /* offset info for MAC + MPLS dummy packet */
325 static const struct ice_dummy_pkt_offsets dummy_mpls_packet_offsets[] = {
327 { ICE_ETYPE_OL, 12 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* Dummy packet for MAC + MPLS */
332 static const u8 dummy_mpls_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x88, 0x47, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x01, 0x00,
340 0x00, 0x00, /* 2 bytes for 4 byte alignment */
343 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
344 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
346 { ICE_VLAN_OFOS, 12 },
347 { ICE_ETYPE_OL, 16 },
348 { ICE_IPV4_OFOS, 18 },
350 { ICE_PROTOCOL_LAST, 0 },
353 /* C-tag (801.1Q), IPv4:TCP dummy packet */
354 static const u8 dummy_vlan_tcp_packet[] = {
355 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
356 0x00, 0x00, 0x00, 0x00,
357 0x00, 0x00, 0x00, 0x00,
359 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
361 0x08, 0x00, /* ICE_ETYPE_OL 16 */
363 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
364 0x00, 0x01, 0x00, 0x00,
365 0x00, 0x06, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x50, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, /* 2 bytes for 4 byte alignment */
378 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
380 { ICE_ETYPE_OL, 12 },
381 { ICE_IPV6_OFOS, 14 },
383 { ICE_PROTOCOL_LAST, 0 },
386 static const u8 dummy_tcp_ipv6_packet[] = {
387 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
388 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, 0x00, 0x00,
391 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
393 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
394 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
407 0x50, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x00, 0x00,
410 0x00, 0x00, /* 2 bytes for 4 byte alignment */
413 /* C-tag (802.1Q): IPv6 + TCP */
414 static const struct ice_dummy_pkt_offsets
415 dummy_vlan_tcp_ipv6_packet_offsets[] = {
417 { ICE_VLAN_OFOS, 12 },
418 { ICE_ETYPE_OL, 16 },
419 { ICE_IPV6_OFOS, 18 },
421 { ICE_PROTOCOL_LAST, 0 },
424 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
425 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
426 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
427 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
430 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
432 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
434 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
435 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
446 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, 0x00, 0x00,
448 0x50, 0x00, 0x00, 0x00,
449 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, /* 2 bytes for 4 byte alignment */
455 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
457 { ICE_ETYPE_OL, 12 },
458 { ICE_IPV6_OFOS, 14 },
459 { ICE_UDP_ILOS, 54 },
460 { ICE_PROTOCOL_LAST, 0 },
463 /* IPv6 + UDP dummy packet */
464 static const u8 dummy_udp_ipv6_packet[] = {
465 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
466 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, 0x00, 0x00,
469 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
471 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
472 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00,
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
482 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
483 0x00, 0x10, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
486 0x00, 0x00, 0x00, 0x00,
488 0x00, 0x00, /* 2 bytes for 4 byte alignment */
491 /* C-tag (802.1Q): IPv6 + UDP */
492 static const struct ice_dummy_pkt_offsets
493 dummy_vlan_udp_ipv6_packet_offsets[] = {
495 { ICE_VLAN_OFOS, 12 },
496 { ICE_ETYPE_OL, 16 },
497 { ICE_IPV6_OFOS, 18 },
498 { ICE_UDP_ILOS, 58 },
499 { ICE_PROTOCOL_LAST, 0 },
502 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
503 static const u8 dummy_vlan_udp_ipv6_packet[] = {
504 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
505 0x00, 0x00, 0x00, 0x00,
506 0x00, 0x00, 0x00, 0x00,
508 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
510 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
512 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
513 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
514 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x00, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
524 0x00, 0x08, 0x00, 0x00,
526 0x00, 0x00, /* 2 bytes for 4 byte alignment */
529 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
530 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
532 { ICE_IPV4_OFOS, 14 },
537 { ICE_PROTOCOL_LAST, 0 },
540 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
541 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
546 0x45, 0x00, 0x00, 0x58, /* IP 14 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x11, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
553 0x00, 0x44, 0x00, 0x00,
555 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
556 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x85,
559 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
560 0x00, 0x00, 0x00, 0x00,
562 0x45, 0x00, 0x00, 0x28, /* IP 62 */
563 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x06, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x00,
566 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
571 0x50, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, /* 2 bytes for 4 byte alignment */
577 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
578 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
580 { ICE_IPV4_OFOS, 14 },
584 { ICE_UDP_ILOS, 82 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
594 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
595 0x00, 0x00, 0x00, 0x00,
596 0x00, 0x11, 0x00, 0x00,
597 0x00, 0x00, 0x00, 0x00,
598 0x00, 0x00, 0x00, 0x00,
600 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
601 0x00, 0x38, 0x00, 0x00,
603 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x85,
607 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
608 0x00, 0x00, 0x00, 0x00,
610 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
611 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x11, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
617 0x00, 0x08, 0x00, 0x00,
619 0x00, 0x00, /* 2 bytes for 4 byte alignment */
622 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
623 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
625 { ICE_IPV4_OFOS, 14 },
630 { ICE_PROTOCOL_LAST, 0 },
633 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
634 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
639 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x11, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
646 0x00, 0x58, 0x00, 0x00,
648 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
649 0x00, 0x00, 0x00, 0x00,
650 0x00, 0x00, 0x00, 0x85,
652 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
653 0x00, 0x00, 0x00, 0x00,
655 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
656 0x00, 0x14, 0x06, 0x00,
657 0x00, 0x00, 0x00, 0x00,
658 0x00, 0x00, 0x00, 0x00,
659 0x00, 0x00, 0x00, 0x00,
660 0x00, 0x00, 0x00, 0x00,
661 0x00, 0x00, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
667 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, 0x00, 0x00,
669 0x50, 0x00, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x00, /* 2 bytes for 4 byte alignment */
675 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
677 { ICE_IPV4_OFOS, 14 },
681 { ICE_UDP_ILOS, 102 },
682 { ICE_PROTOCOL_LAST, 0 },
685 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
686 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
687 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00,
691 0x45, 0x00, 0x00, 0x60, /* IP 14 */
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x11, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
698 0x00, 0x4c, 0x00, 0x00,
700 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x85,
704 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
705 0x00, 0x00, 0x00, 0x00,
707 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
708 0x00, 0x08, 0x11, 0x00,
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
713 0x00, 0x00, 0x00, 0x00,
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
719 0x00, 0x08, 0x00, 0x00,
721 0x00, 0x00, /* 2 bytes for 4 byte alignment */
724 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
726 { ICE_IPV6_OFOS, 14 },
731 { ICE_PROTOCOL_LAST, 0 },
734 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
735 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
736 0x00, 0x00, 0x00, 0x00,
737 0x00, 0x00, 0x00, 0x00,
740 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
741 0x00, 0x44, 0x11, 0x00,
742 0x00, 0x00, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
752 0x00, 0x44, 0x00, 0x00,
754 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
755 0x00, 0x00, 0x00, 0x00,
756 0x00, 0x00, 0x00, 0x85,
758 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
759 0x00, 0x00, 0x00, 0x00,
761 0x45, 0x00, 0x00, 0x28, /* IP 82 */
762 0x00, 0x00, 0x00, 0x00,
763 0x00, 0x06, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x50, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, /* 2 bytes for 4 byte alignment */
776 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
778 { ICE_IPV6_OFOS, 14 },
782 { ICE_UDP_ILOS, 102 },
783 { ICE_PROTOCOL_LAST, 0 },
786 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
787 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
788 0x00, 0x00, 0x00, 0x00,
789 0x00, 0x00, 0x00, 0x00,
792 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
793 0x00, 0x38, 0x11, 0x00,
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, 0x00, 0x00,
801 0x00, 0x00, 0x00, 0x00,
803 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
804 0x00, 0x38, 0x00, 0x00,
806 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x85,
810 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
811 0x00, 0x00, 0x00, 0x00,
813 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
814 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x11, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
820 0x00, 0x08, 0x00, 0x00,
822 0x00, 0x00, /* 2 bytes for 4 byte alignment */
825 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
827 { ICE_IPV6_OFOS, 14 },
832 { ICE_PROTOCOL_LAST, 0 },
835 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
836 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
837 0x00, 0x00, 0x00, 0x00,
838 0x00, 0x00, 0x00, 0x00,
841 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
842 0x00, 0x58, 0x11, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
853 0x00, 0x58, 0x00, 0x00,
855 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
856 0x00, 0x00, 0x00, 0x00,
857 0x00, 0x00, 0x00, 0x85,
859 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
860 0x00, 0x00, 0x00, 0x00,
862 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
863 0x00, 0x14, 0x06, 0x00,
864 0x00, 0x00, 0x00, 0x00,
865 0x00, 0x00, 0x00, 0x00,
866 0x00, 0x00, 0x00, 0x00,
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x00, 0x00, 0x00,
869 0x00, 0x00, 0x00, 0x00,
870 0x00, 0x00, 0x00, 0x00,
871 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x50, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, /* 2 bytes for 4 byte alignment */
882 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
884 { ICE_IPV6_OFOS, 14 },
888 { ICE_UDP_ILOS, 122 },
889 { ICE_PROTOCOL_LAST, 0 },
892 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
893 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 0x00, 0x00, 0x00, 0x00,
895 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 0x00, 0x4c, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 0x00, 0x4c, 0x00, 0x00,
912 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
913 0x00, 0x00, 0x00, 0x00,
914 0x00, 0x00, 0x00, 0x85,
916 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 0x00, 0x00, 0x00, 0x00,
919 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
920 0x00, 0x08, 0x11, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
928 0x00, 0x00, 0x00, 0x00,
930 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
931 0x00, 0x08, 0x00, 0x00,
933 0x00, 0x00, /* 2 bytes for 4 byte alignment */
936 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
938 { ICE_IPV4_OFOS, 14 },
942 { ICE_PROTOCOL_LAST, 0 },
945 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
946 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
947 0x00, 0x00, 0x00, 0x00,
948 0x00, 0x00, 0x00, 0x00,
951 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
952 0x00, 0x00, 0x40, 0x00,
953 0x40, 0x11, 0x00, 0x00,
954 0x00, 0x00, 0x00, 0x00,
955 0x00, 0x00, 0x00, 0x00,
957 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
958 0x00, 0x00, 0x00, 0x00,
960 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
961 0x00, 0x00, 0x00, 0x00,
962 0x00, 0x00, 0x00, 0x85,
964 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
965 0x00, 0x00, 0x00, 0x00,
967 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
968 0x00, 0x00, 0x40, 0x00,
969 0x40, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
976 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
978 { ICE_IPV4_OFOS, 14 },
982 { ICE_PROTOCOL_LAST, 0 },
985 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
986 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
987 0x00, 0x00, 0x00, 0x00,
988 0x00, 0x00, 0x00, 0x00,
991 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
992 0x00, 0x00, 0x40, 0x00,
993 0x40, 0x11, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
997 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
998 0x00, 0x00, 0x00, 0x00,
1000 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1001 0x00, 0x00, 0x00, 0x00,
1002 0x00, 0x00, 0x00, 0x85,
1004 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1005 0x00, 0x00, 0x00, 0x00,
1007 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1008 0x00, 0x00, 0x3b, 0x00,
1009 0x00, 0x00, 0x00, 0x00,
1010 0x00, 0x00, 0x00, 0x00,
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x00, 0x00,
1015 0x00, 0x00, 0x00, 0x00,
1016 0x00, 0x00, 0x00, 0x00,
1022 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1023 { ICE_MAC_OFOS, 0 },
1024 { ICE_IPV6_OFOS, 14 },
1027 { ICE_IPV4_IL, 82 },
1028 { ICE_PROTOCOL_LAST, 0 },
1031 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1032 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1033 0x00, 0x00, 0x00, 0x00,
1034 0x00, 0x00, 0x00, 0x00,
1037 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1038 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1039 0x00, 0x00, 0x00, 0x00,
1040 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00,
1042 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00,
1044 0x00, 0x00, 0x00, 0x00,
1045 0x00, 0x00, 0x00, 0x00,
1046 0x00, 0x00, 0x00, 0x00,
1048 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1049 0x00, 0x00, 0x00, 0x00,
1051 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1052 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x85,
1055 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1056 0x00, 0x00, 0x00, 0x00,
1058 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1059 0x00, 0x00, 0x40, 0x00,
1060 0x40, 0x00, 0x00, 0x00,
1061 0x00, 0x00, 0x00, 0x00,
1062 0x00, 0x00, 0x00, 0x00,
1068 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1069 { ICE_MAC_OFOS, 0 },
1070 { ICE_IPV6_OFOS, 14 },
1073 { ICE_IPV6_IL, 82 },
1074 { ICE_PROTOCOL_LAST, 0 },
1077 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1078 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1079 0x00, 0x00, 0x00, 0x00,
1080 0x00, 0x00, 0x00, 0x00,
1083 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1084 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1085 0x00, 0x00, 0x00, 0x00,
1086 0x00, 0x00, 0x00, 0x00,
1087 0x00, 0x00, 0x00, 0x00,
1088 0x00, 0x00, 0x00, 0x00,
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1092 0x00, 0x00, 0x00, 0x00,
1094 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1095 0x00, 0x00, 0x00, 0x00,
1097 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x85,
1101 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1102 0x00, 0x00, 0x00, 0x00,
1104 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1105 0x00, 0x00, 0x3b, 0x00,
1106 0x00, 0x00, 0x00, 0x00,
1107 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x00, 0x00,
1109 0x00, 0x00, 0x00, 0x00,
1110 0x00, 0x00, 0x00, 0x00,
1111 0x00, 0x00, 0x00, 0x00,
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x00, 0x00, 0x00,
1118 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1119 { ICE_MAC_OFOS, 0 },
1120 { ICE_IPV4_OFOS, 14 },
1123 { ICE_PROTOCOL_LAST, 0 },
1126 static const u8 dummy_udp_gtp_packet[] = {
1127 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1128 0x00, 0x00, 0x00, 0x00,
1129 0x00, 0x00, 0x00, 0x00,
1132 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1133 0x00, 0x00, 0x00, 0x00,
1134 0x00, 0x11, 0x00, 0x00,
1135 0x00, 0x00, 0x00, 0x00,
1136 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1139 0x00, 0x1c, 0x00, 0x00,
1141 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1142 0x00, 0x00, 0x00, 0x00,
1143 0x00, 0x00, 0x00, 0x85,
1145 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1146 0x00, 0x00, 0x00, 0x00,
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_IPV4_OFOS, 14 },
1154 { ICE_GTP_NO_PAY, 42 },
1155 { ICE_PROTOCOL_LAST, 0 },
1159 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1160 { ICE_MAC_OFOS, 0 },
1161 { ICE_IPV6_OFOS, 14 },
1163 { ICE_GTP_NO_PAY, 62 },
1164 { ICE_PROTOCOL_LAST, 0 },
1167 static const u8 dummy_ipv6_gtp_packet[] = {
1168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1169 0x00, 0x00, 0x00, 0x00,
1170 0x00, 0x00, 0x00, 0x00,
1173 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1174 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1175 0x00, 0x00, 0x00, 0x00,
1176 0x00, 0x00, 0x00, 0x00,
1177 0x00, 0x00, 0x00, 0x00,
1178 0x00, 0x00, 0x00, 0x00,
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1181 0x00, 0x00, 0x00, 0x00,
1182 0x00, 0x00, 0x00, 0x00,
1184 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1185 0x00, 0x00, 0x00, 0x00,
1187 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1188 0x00, 0x00, 0x00, 0x00,
1193 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1194 { ICE_MAC_OFOS, 0 },
1195 { ICE_VLAN_OFOS, 12 },
1196 { ICE_ETYPE_OL, 16 },
1198 { ICE_PROTOCOL_LAST, 0 },
1201 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1202 { ICE_MAC_OFOS, 0 },
1203 { ICE_VLAN_OFOS, 12 },
1204 { ICE_ETYPE_OL, 16 },
1206 { ICE_IPV4_OFOS, 26 },
1207 { ICE_PROTOCOL_LAST, 0 },
1210 static const u8 dummy_pppoe_ipv4_packet[] = {
1211 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212 0x00, 0x00, 0x00, 0x00,
1213 0x00, 0x00, 0x00, 0x00,
1215 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1217 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1219 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1222 0x00, 0x21, /* PPP Link Layer 24 */
1224 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1225 0x00, 0x00, 0x00, 0x00,
1226 0x00, 0x00, 0x00, 0x00,
1227 0x00, 0x00, 0x00, 0x00,
1228 0x00, 0x00, 0x00, 0x00,
1230 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1234 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1235 { ICE_MAC_OFOS, 0 },
1236 { ICE_VLAN_OFOS, 12 },
1237 { ICE_ETYPE_OL, 16 },
1239 { ICE_IPV4_OFOS, 26 },
1241 { ICE_PROTOCOL_LAST, 0 },
1244 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1245 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1246 0x00, 0x00, 0x00, 0x00,
1247 0x00, 0x00, 0x00, 0x00,
1249 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1251 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1253 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1256 0x00, 0x21, /* PPP Link Layer 24 */
1258 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1259 0x00, 0x01, 0x00, 0x00,
1260 0x00, 0x06, 0x00, 0x00,
1261 0x00, 0x00, 0x00, 0x00,
1262 0x00, 0x00, 0x00, 0x00,
1264 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1265 0x00, 0x00, 0x00, 0x00,
1266 0x00, 0x00, 0x00, 0x00,
1267 0x50, 0x00, 0x00, 0x00,
1268 0x00, 0x00, 0x00, 0x00,
1270 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1274 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1275 { ICE_MAC_OFOS, 0 },
1276 { ICE_VLAN_OFOS, 12 },
1277 { ICE_ETYPE_OL, 16 },
1279 { ICE_IPV4_OFOS, 26 },
1280 { ICE_UDP_ILOS, 46 },
1281 { ICE_PROTOCOL_LAST, 0 },
1284 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1285 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1286 0x00, 0x00, 0x00, 0x00,
1287 0x00, 0x00, 0x00, 0x00,
1289 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1291 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1293 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1296 0x00, 0x21, /* PPP Link Layer 24 */
1298 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1299 0x00, 0x01, 0x00, 0x00,
1300 0x00, 0x11, 0x00, 0x00,
1301 0x00, 0x00, 0x00, 0x00,
1302 0x00, 0x00, 0x00, 0x00,
1304 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1305 0x00, 0x08, 0x00, 0x00,
1307 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1310 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1311 { ICE_MAC_OFOS, 0 },
1312 { ICE_VLAN_OFOS, 12 },
1313 { ICE_ETYPE_OL, 16 },
1315 { ICE_IPV6_OFOS, 26 },
1316 { ICE_PROTOCOL_LAST, 0 },
1319 static const u8 dummy_pppoe_ipv6_packet[] = {
1320 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1321 0x00, 0x00, 0x00, 0x00,
1322 0x00, 0x00, 0x00, 0x00,
1324 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1326 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1328 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1331 0x00, 0x57, /* PPP Link Layer 24 */
1333 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1334 0x00, 0x00, 0x3b, 0x00,
1335 0x00, 0x00, 0x00, 0x00,
1336 0x00, 0x00, 0x00, 0x00,
1337 0x00, 0x00, 0x00, 0x00,
1338 0x00, 0x00, 0x00, 0x00,
1339 0x00, 0x00, 0x00, 0x00,
1340 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, 0x00, 0x00,
1344 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1348 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1349 { ICE_MAC_OFOS, 0 },
1350 { ICE_VLAN_OFOS, 12 },
1351 { ICE_ETYPE_OL, 16 },
1353 { ICE_IPV6_OFOS, 26 },
1355 { ICE_PROTOCOL_LAST, 0 },
1358 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1359 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1360 0x00, 0x00, 0x00, 0x00,
1361 0x00, 0x00, 0x00, 0x00,
1363 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1365 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1367 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1370 0x00, 0x57, /* PPP Link Layer 24 */
1372 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1373 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1379 0x00, 0x00, 0x00, 0x00,
1380 0x00, 0x00, 0x00, 0x00,
1381 0x00, 0x00, 0x00, 0x00,
1383 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1384 0x00, 0x00, 0x00, 0x00,
1385 0x00, 0x00, 0x00, 0x00,
1386 0x50, 0x00, 0x00, 0x00,
1387 0x00, 0x00, 0x00, 0x00,
1389 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1393 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1394 { ICE_MAC_OFOS, 0 },
1395 { ICE_VLAN_OFOS, 12 },
1396 { ICE_ETYPE_OL, 16 },
1398 { ICE_IPV6_OFOS, 26 },
1399 { ICE_UDP_ILOS, 66 },
1400 { ICE_PROTOCOL_LAST, 0 },
1403 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1405 0x00, 0x00, 0x00, 0x00,
1406 0x00, 0x00, 0x00, 0x00,
1408 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1410 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1412 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1415 0x00, 0x57, /* PPP Link Layer 24 */
1417 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1418 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1419 0x00, 0x00, 0x00, 0x00,
1420 0x00, 0x00, 0x00, 0x00,
1421 0x00, 0x00, 0x00, 0x00,
1422 0x00, 0x00, 0x00, 0x00,
1423 0x00, 0x00, 0x00, 0x00,
1424 0x00, 0x00, 0x00, 0x00,
1425 0x00, 0x00, 0x00, 0x00,
1426 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1429 0x00, 0x08, 0x00, 0x00,
1431 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1434 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1435 { ICE_MAC_OFOS, 0 },
1436 { ICE_IPV4_OFOS, 14 },
1438 { ICE_PROTOCOL_LAST, 0 },
1441 static const u8 dummy_ipv4_esp_pkt[] = {
1442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1443 0x00, 0x00, 0x00, 0x00,
1444 0x00, 0x00, 0x00, 0x00,
1447 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1448 0x00, 0x00, 0x40, 0x00,
1449 0x40, 0x32, 0x00, 0x00,
1450 0x00, 0x00, 0x00, 0x00,
1451 0x00, 0x00, 0x00, 0x00,
1453 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1454 0x00, 0x00, 0x00, 0x00,
1455 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1458 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1459 { ICE_MAC_OFOS, 0 },
1460 { ICE_IPV6_OFOS, 14 },
1462 { ICE_PROTOCOL_LAST, 0 },
1465 static const u8 dummy_ipv6_esp_pkt[] = {
1466 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1467 0x00, 0x00, 0x00, 0x00,
1468 0x00, 0x00, 0x00, 0x00,
1471 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1472 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1473 0x00, 0x00, 0x00, 0x00,
1474 0x00, 0x00, 0x00, 0x00,
1475 0x00, 0x00, 0x00, 0x00,
1476 0x00, 0x00, 0x00, 0x00,
1477 0x00, 0x00, 0x00, 0x00,
1478 0x00, 0x00, 0x00, 0x00,
1479 0x00, 0x00, 0x00, 0x00,
1480 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1487 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1488 { ICE_MAC_OFOS, 0 },
1489 { ICE_IPV4_OFOS, 14 },
1491 { ICE_PROTOCOL_LAST, 0 },
1494 static const u8 dummy_ipv4_ah_pkt[] = {
1495 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1496 0x00, 0x00, 0x00, 0x00,
1497 0x00, 0x00, 0x00, 0x00,
1500 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1501 0x00, 0x00, 0x40, 0x00,
1502 0x40, 0x33, 0x00, 0x00,
1503 0x00, 0x00, 0x00, 0x00,
1504 0x00, 0x00, 0x00, 0x00,
1506 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1507 0x00, 0x00, 0x00, 0x00,
1508 0x00, 0x00, 0x00, 0x00,
1509 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1512 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1513 { ICE_MAC_OFOS, 0 },
1514 { ICE_IPV6_OFOS, 14 },
1516 { ICE_PROTOCOL_LAST, 0 },
1519 static const u8 dummy_ipv6_ah_pkt[] = {
1520 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1521 0x00, 0x00, 0x00, 0x00,
1522 0x00, 0x00, 0x00, 0x00,
1525 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1526 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1527 0x00, 0x00, 0x00, 0x00,
1528 0x00, 0x00, 0x00, 0x00,
1529 0x00, 0x00, 0x00, 0x00,
1530 0x00, 0x00, 0x00, 0x00,
1531 0x00, 0x00, 0x00, 0x00,
1532 0x00, 0x00, 0x00, 0x00,
1533 0x00, 0x00, 0x00, 0x00,
1534 0x00, 0x00, 0x00, 0x00,
1536 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1537 0x00, 0x00, 0x00, 0x00,
1538 0x00, 0x00, 0x00, 0x00,
1539 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1542 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1543 { ICE_MAC_OFOS, 0 },
1544 { ICE_IPV4_OFOS, 14 },
1545 { ICE_UDP_ILOS, 34 },
1547 { ICE_PROTOCOL_LAST, 0 },
1550 static const u8 dummy_ipv4_nat_pkt[] = {
1551 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1552 0x00, 0x00, 0x00, 0x00,
1553 0x00, 0x00, 0x00, 0x00,
1556 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1557 0x00, 0x00, 0x40, 0x00,
1558 0x40, 0x11, 0x00, 0x00,
1559 0x00, 0x00, 0x00, 0x00,
1560 0x00, 0x00, 0x00, 0x00,
1562 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1563 0x00, 0x00, 0x00, 0x00,
1565 0x00, 0x00, 0x00, 0x00,
1566 0x00, 0x00, 0x00, 0x00,
1567 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1570 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1571 { ICE_MAC_OFOS, 0 },
1572 { ICE_IPV6_OFOS, 14 },
1573 { ICE_UDP_ILOS, 54 },
1575 { ICE_PROTOCOL_LAST, 0 },
1578 static const u8 dummy_ipv6_nat_pkt[] = {
1579 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1580 0x00, 0x00, 0x00, 0x00,
1581 0x00, 0x00, 0x00, 0x00,
1584 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1585 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1586 0x00, 0x00, 0x00, 0x00,
1587 0x00, 0x00, 0x00, 0x00,
1588 0x00, 0x00, 0x00, 0x00,
1589 0x00, 0x00, 0x00, 0x00,
1590 0x00, 0x00, 0x00, 0x00,
1591 0x00, 0x00, 0x00, 0x00,
1592 0x00, 0x00, 0x00, 0x00,
1593 0x00, 0x00, 0x00, 0x00,
1595 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1596 0x00, 0x00, 0x00, 0x00,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1604 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1605 { ICE_MAC_OFOS, 0 },
1606 { ICE_IPV4_OFOS, 14 },
1608 { ICE_PROTOCOL_LAST, 0 },
1611 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1612 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1613 0x00, 0x00, 0x00, 0x00,
1614 0x00, 0x00, 0x00, 0x00,
1617 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1618 0x00, 0x00, 0x40, 0x00,
1619 0x40, 0x73, 0x00, 0x00,
1620 0x00, 0x00, 0x00, 0x00,
1621 0x00, 0x00, 0x00, 0x00,
1623 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1624 0x00, 0x00, 0x00, 0x00,
1625 0x00, 0x00, 0x00, 0x00,
1626 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1629 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1630 { ICE_MAC_OFOS, 0 },
1631 { ICE_IPV6_OFOS, 14 },
1633 { ICE_PROTOCOL_LAST, 0 },
1636 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1637 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1638 0x00, 0x00, 0x00, 0x00,
1639 0x00, 0x00, 0x00, 0x00,
1642 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1643 0x00, 0x0c, 0x73, 0x40,
1644 0x00, 0x00, 0x00, 0x00,
1645 0x00, 0x00, 0x00, 0x00,
1646 0x00, 0x00, 0x00, 0x00,
1647 0x00, 0x00, 0x00, 0x00,
1648 0x00, 0x00, 0x00, 0x00,
1649 0x00, 0x00, 0x00, 0x00,
1650 0x00, 0x00, 0x00, 0x00,
1651 0x00, 0x00, 0x00, 0x00,
1653 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1654 0x00, 0x00, 0x00, 0x00,
1655 0x00, 0x00, 0x00, 0x00,
1656 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1659 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1660 { ICE_MAC_OFOS, 0 },
1661 { ICE_VLAN_EX, 12 },
1662 { ICE_VLAN_IN, 16 },
1663 { ICE_ETYPE_OL, 20 },
1664 { ICE_IPV4_OFOS, 22 },
1665 { ICE_PROTOCOL_LAST, 0 },
1668 static const u8 dummy_qinq_ipv4_pkt[] = {
1669 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1670 0x00, 0x00, 0x00, 0x00,
1671 0x00, 0x00, 0x00, 0x00,
1673 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1674 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1675 0x08, 0x00, /* ICE_ETYPE_OL 20 */
1677 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 22 */
1678 0x00, 0x01, 0x00, 0x00,
1679 0x00, 0x00, 0x00, 0x00,
1680 0x00, 0x00, 0x00, 0x00,
1681 0x00, 0x00, 0x00, 0x00,
1683 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1687 struct ice_dummy_pkt_offsets dummy_qinq_ipv4_udp_packet_offsets[] = {
1688 { ICE_MAC_OFOS, 0 },
1689 { ICE_VLAN_EX, 12 },
1690 { ICE_VLAN_IN, 16 },
1691 { ICE_ETYPE_OL, 20 },
1692 { ICE_IPV4_OFOS, 22 },
1693 { ICE_UDP_ILOS, 42 },
1694 { ICE_PROTOCOL_LAST, 0 },
1697 static const u8 dummy_qinq_ipv4_udp_pkt[] = {
1698 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1699 0x00, 0x00, 0x00, 0x00,
1700 0x00, 0x00, 0x00, 0x00,
1702 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1703 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1704 0x08, 0x00, /* ICE_ETYPE_OL 20 */
1706 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1707 0x00, 0x01, 0x00, 0x00,
1708 0x00, 0x11, 0x00, 0x00,
1709 0x00, 0x00, 0x00, 0x00,
1710 0x00, 0x00, 0x00, 0x00,
1712 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1713 0x00, 0x08, 0x00, 0x00,
1715 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1719 struct ice_dummy_pkt_offsets dummy_qinq_ipv4_tcp_packet_offsets[] = {
1720 { ICE_MAC_OFOS, 0 },
1721 { ICE_VLAN_EX, 12 },
1722 { ICE_VLAN_IN, 16 },
1723 { ICE_ETYPE_OL, 20 },
1724 { ICE_IPV4_OFOS, 22 },
1726 { ICE_PROTOCOL_LAST, 0 },
1729 static const u8 dummy_qinq_ipv4_tcp_pkt[] = {
1730 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1731 0x00, 0x00, 0x00, 0x00,
1732 0x00, 0x00, 0x00, 0x00,
1734 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1735 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1736 0x08, 0x00, /* ICE_ETYPE_OL 20 */
1738 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1739 0x00, 0x01, 0x00, 0x00,
1740 0x00, 0x06, 0x00, 0x00,
1741 0x00, 0x00, 0x00, 0x00,
1742 0x00, 0x00, 0x00, 0x00,
1744 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1745 0x00, 0x00, 0x00, 0x00,
1746 0x00, 0x00, 0x00, 0x00,
1747 0x50, 0x00, 0x00, 0x00,
1748 0x00, 0x00, 0x00, 0x00,
1750 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1753 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1754 { ICE_MAC_OFOS, 0 },
1755 { ICE_VLAN_EX, 12 },
1756 { ICE_VLAN_IN, 16 },
1757 { ICE_ETYPE_OL, 20 },
1758 { ICE_IPV6_OFOS, 22 },
1759 { ICE_PROTOCOL_LAST, 0 },
1762 static const u8 dummy_qinq_ipv6_pkt[] = {
1763 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1764 0x00, 0x00, 0x00, 0x00,
1765 0x00, 0x00, 0x00, 0x00,
1767 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1768 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1769 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
1771 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1772 0x00, 0x00, 0x3b, 0x00,
1773 0x00, 0x00, 0x00, 0x00,
1774 0x00, 0x00, 0x00, 0x00,
1775 0x00, 0x00, 0x00, 0x00,
1776 0x00, 0x00, 0x00, 0x00,
1777 0x00, 0x00, 0x00, 0x00,
1778 0x00, 0x00, 0x00, 0x00,
1779 0x00, 0x00, 0x00, 0x00,
1780 0x00, 0x00, 0x00, 0x00,
1782 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1786 struct ice_dummy_pkt_offsets dummy_qinq_ipv6_udp_packet_offsets[] = {
1787 { ICE_MAC_OFOS, 0 },
1788 { ICE_VLAN_EX, 12 },
1789 { ICE_VLAN_IN, 16 },
1790 { ICE_ETYPE_OL, 20 },
1791 { ICE_IPV6_OFOS, 22 },
1792 { ICE_UDP_ILOS, 62 },
1793 { ICE_PROTOCOL_LAST, 0 },
1796 static const u8 dummy_qinq_ipv6_udp_pkt[] = {
1797 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1798 0x00, 0x00, 0x00, 0x00,
1799 0x00, 0x00, 0x00, 0x00,
1801 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1802 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1803 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
1805 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1806 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
1807 0x00, 0x00, 0x00, 0x00,
1808 0x00, 0x00, 0x00, 0x00,
1809 0x00, 0x00, 0x00, 0x00,
1810 0x00, 0x00, 0x00, 0x00,
1811 0x00, 0x00, 0x00, 0x00,
1812 0x00, 0x00, 0x00, 0x00,
1813 0x00, 0x00, 0x00, 0x00,
1814 0x00, 0x00, 0x00, 0x00,
1816 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1817 0x00, 0x08, 0x00, 0x00,
1819 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1823 struct ice_dummy_pkt_offsets dummy_qinq_ipv6_tcp_packet_offsets[] = {
1824 { ICE_MAC_OFOS, 0 },
1825 { ICE_VLAN_EX, 12 },
1826 { ICE_VLAN_IN, 16 },
1827 { ICE_ETYPE_OL, 20 },
1828 { ICE_IPV6_OFOS, 22 },
1830 { ICE_PROTOCOL_LAST, 0 },
1833 static const u8 dummy_qinq_ipv6_tcp_pkt[] = {
1834 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1835 0x00, 0x00, 0x00, 0x00,
1836 0x00, 0x00, 0x00, 0x00,
1838 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1839 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1840 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
1842 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1843 0x00, 0x14, 0x06, 0x00, /* Next header TCP */
1844 0x00, 0x00, 0x00, 0x00,
1845 0x00, 0x00, 0x00, 0x00,
1846 0x00, 0x00, 0x00, 0x00,
1847 0x00, 0x00, 0x00, 0x00,
1848 0x00, 0x00, 0x00, 0x00,
1849 0x00, 0x00, 0x00, 0x00,
1850 0x00, 0x00, 0x00, 0x00,
1851 0x00, 0x00, 0x00, 0x00,
1853 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1854 0x00, 0x00, 0x00, 0x00,
1855 0x00, 0x00, 0x00, 0x00,
1856 0x50, 0x00, 0x00, 0x00,
1857 0x00, 0x00, 0x00, 0x00,
1859 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1862 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1863 { ICE_MAC_OFOS, 0 },
1864 { ICE_VLAN_EX, 12 },
1865 { ICE_VLAN_IN, 16 },
1866 { ICE_ETYPE_OL, 20 },
1868 { ICE_PROTOCOL_LAST, 0 },
1872 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1873 { ICE_MAC_OFOS, 0 },
1874 { ICE_VLAN_EX, 12 },
1875 { ICE_VLAN_IN, 16 },
1876 { ICE_ETYPE_OL, 20 },
1878 { ICE_IPV4_OFOS, 30 },
1879 { ICE_PROTOCOL_LAST, 0 },
1882 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1883 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1884 0x00, 0x00, 0x00, 0x00,
1885 0x00, 0x00, 0x00, 0x00,
1887 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1888 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1889 0x88, 0x64, /* ICE_ETYPE_OL 20 */
1891 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1894 0x00, 0x21, /* PPP Link Layer 28 */
1896 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 30 */
1897 0x00, 0x00, 0x00, 0x00,
1898 0x00, 0x00, 0x00, 0x00,
1899 0x00, 0x00, 0x00, 0x00,
1900 0x00, 0x00, 0x00, 0x00,
1902 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1906 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1907 { ICE_MAC_OFOS, 0 },
1908 { ICE_VLAN_EX, 12 },
1909 { ICE_VLAN_IN, 16 },
1910 { ICE_ETYPE_OL, 20 },
1912 { ICE_IPV6_OFOS, 30 },
1913 { ICE_PROTOCOL_LAST, 0 },
1916 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1917 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1918 0x00, 0x00, 0x00, 0x00,
1919 0x00, 0x00, 0x00, 0x00,
1921 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1922 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1923 0x88, 0x64, /* ICE_ETYPE_OL 20 */
1925 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1928 0x00, 0x57, /* PPP Link Layer 28*/
1930 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1931 0x00, 0x00, 0x3b, 0x00,
1932 0x00, 0x00, 0x00, 0x00,
1933 0x00, 0x00, 0x00, 0x00,
1934 0x00, 0x00, 0x00, 0x00,
1935 0x00, 0x00, 0x00, 0x00,
1936 0x00, 0x00, 0x00, 0x00,
1937 0x00, 0x00, 0x00, 0x00,
1938 0x00, 0x00, 0x00, 0x00,
1939 0x00, 0x00, 0x00, 0x00,
1941 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1944 /* this is a recipe to profile association bitmap */
1945 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1946 ICE_MAX_NUM_PROFILES);
1948 /* this is a profile to recipe association bitmap */
1949 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1950 ICE_MAX_NUM_RECIPES);
1952 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1955 * ice_collect_result_idx - copy result index values
1956 * @buf: buffer that contains the result index
1957 * @recp: the recipe struct to copy data into
1959 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1960 struct ice_sw_recipe *recp)
1962 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1963 ice_set_bit(buf->content.result_indx &
1964 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1967 static struct ice_prof_type_entry ice_prof_type_tbl[ICE_GTPU_PROFILE] = {
1968 { ICE_PROFID_IPV4_GTPU_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_IPV4},
1969 { ICE_PROFID_IPV4_GTPU_IPV4_UDP, ICE_SW_TUN_IPV4_GTPU_IPV4_UDP},
1970 { ICE_PROFID_IPV4_GTPU_IPV4_TCP, ICE_SW_TUN_IPV4_GTPU_IPV4_TCP},
1971 { ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV4},
1972 { ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP},
1973 { ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP},
1974 { ICE_PROFID_IPV4_GTPU_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_IPV6},
1975 { ICE_PROFID_IPV4_GTPU_IPV6_UDP, ICE_SW_TUN_IPV4_GTPU_IPV6_UDP},
1976 { ICE_PROFID_IPV4_GTPU_IPV6_TCP, ICE_SW_TUN_IPV4_GTPU_IPV6_TCP},
1977 { ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV6},
1978 { ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP},
1979 { ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP},
1980 { ICE_PROFID_IPV6_GTPU_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_IPV4},
1981 { ICE_PROFID_IPV6_GTPU_IPV4_UDP, ICE_SW_TUN_IPV6_GTPU_IPV4_UDP},
1982 { ICE_PROFID_IPV6_GTPU_IPV4_TCP, ICE_SW_TUN_IPV6_GTPU_IPV4_TCP},
1983 { ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV4},
1984 { ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP},
1985 { ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP},
1986 { ICE_PROFID_IPV6_GTPU_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_IPV6},
1987 { ICE_PROFID_IPV6_GTPU_IPV6_UDP, ICE_SW_TUN_IPV6_GTPU_IPV6_UDP},
1988 { ICE_PROFID_IPV6_GTPU_IPV6_TCP, ICE_SW_TUN_IPV6_GTPU_IPV6_TCP},
1989 { ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV6},
1990 { ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP},
1991 { ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP},
1995 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1996 * @rid: recipe ID that we are populating
1998 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
2000 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
2001 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
2002 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
2003 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
2004 enum ice_sw_tunnel_type tun_type;
2005 u16 i, j, k, profile_num = 0;
2006 bool non_tun_valid = false;
2007 bool pppoe_valid = false;
2008 bool vxlan_valid = false;
2009 bool gre_valid = false;
2010 bool gtp_valid = false;
2011 bool flag_valid = false;
2013 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
2014 if (!ice_is_bit_set(recipe_to_profile[rid], j))
2019 for (i = 0; i < 12; i++) {
2020 if (gre_profile[i] == j)
2024 for (i = 0; i < 12; i++) {
2025 if (vxlan_profile[i] == j)
2029 for (i = 0; i < 7; i++) {
2030 if (pppoe_profile[i] == j)
2034 for (i = 0; i < 6; i++) {
2035 if (non_tun_profile[i] == j)
2036 non_tun_valid = true;
2039 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
2040 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
2043 if ((j >= ICE_PROFID_IPV4_ESP &&
2044 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
2045 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
2046 j <= ICE_PROFID_IPV6_GTPU_TEID))
2050 if (!non_tun_valid && vxlan_valid)
2051 tun_type = ICE_SW_TUN_VXLAN;
2052 else if (!non_tun_valid && gre_valid)
2053 tun_type = ICE_SW_TUN_NVGRE;
2054 else if (!non_tun_valid && pppoe_valid)
2055 tun_type = ICE_SW_TUN_PPPOE;
2056 else if (!non_tun_valid && gtp_valid)
2057 tun_type = ICE_SW_TUN_GTP;
2058 else if (non_tun_valid &&
2059 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
2060 tun_type = ICE_SW_TUN_AND_NON_TUN;
2061 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
2063 tun_type = ICE_NON_TUN;
2065 tun_type = ICE_NON_TUN;
2067 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
2068 i = ice_is_bit_set(recipe_to_profile[rid],
2069 ICE_PROFID_PPPOE_IPV4_OTHER);
2070 j = ice_is_bit_set(recipe_to_profile[rid],
2071 ICE_PROFID_PPPOE_IPV6_OTHER);
2073 tun_type = ICE_SW_TUN_PPPOE_IPV4;
2075 tun_type = ICE_SW_TUN_PPPOE_IPV6;
2078 if (tun_type == ICE_SW_TUN_GTP) {
2079 for (k = 0; k < ARRAY_SIZE(ice_prof_type_tbl); k++)
2080 if (ice_is_bit_set(recipe_to_profile[rid],
2081 ice_prof_type_tbl[k].prof_id)) {
2082 tun_type = ice_prof_type_tbl[k].type;
2087 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
2088 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
2089 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
2091 case ICE_PROFID_IPV4_TCP:
2092 tun_type = ICE_SW_IPV4_TCP;
2094 case ICE_PROFID_IPV4_UDP:
2095 tun_type = ICE_SW_IPV4_UDP;
2097 case ICE_PROFID_IPV6_TCP:
2098 tun_type = ICE_SW_IPV6_TCP;
2100 case ICE_PROFID_IPV6_UDP:
2101 tun_type = ICE_SW_IPV6_UDP;
2103 case ICE_PROFID_PPPOE_PAY:
2104 tun_type = ICE_SW_TUN_PPPOE_PAY;
2106 case ICE_PROFID_PPPOE_IPV4_TCP:
2107 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
2109 case ICE_PROFID_PPPOE_IPV4_UDP:
2110 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
2112 case ICE_PROFID_PPPOE_IPV4_OTHER:
2113 tun_type = ICE_SW_TUN_PPPOE_IPV4;
2115 case ICE_PROFID_PPPOE_IPV6_TCP:
2116 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
2118 case ICE_PROFID_PPPOE_IPV6_UDP:
2119 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
2121 case ICE_PROFID_PPPOE_IPV6_OTHER:
2122 tun_type = ICE_SW_TUN_PPPOE_IPV6;
2124 case ICE_PROFID_IPV4_ESP:
2125 tun_type = ICE_SW_TUN_IPV4_ESP;
2127 case ICE_PROFID_IPV6_ESP:
2128 tun_type = ICE_SW_TUN_IPV6_ESP;
2130 case ICE_PROFID_IPV4_AH:
2131 tun_type = ICE_SW_TUN_IPV4_AH;
2133 case ICE_PROFID_IPV6_AH:
2134 tun_type = ICE_SW_TUN_IPV6_AH;
2136 case ICE_PROFID_IPV4_NAT_T:
2137 tun_type = ICE_SW_TUN_IPV4_NAT_T;
2139 case ICE_PROFID_IPV6_NAT_T:
2140 tun_type = ICE_SW_TUN_IPV6_NAT_T;
2142 case ICE_PROFID_IPV4_PFCP_NODE:
2144 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
2146 case ICE_PROFID_IPV6_PFCP_NODE:
2148 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
2150 case ICE_PROFID_IPV4_PFCP_SESSION:
2152 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
2154 case ICE_PROFID_IPV6_PFCP_SESSION:
2156 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
2158 case ICE_PROFID_MAC_IPV4_L2TPV3:
2159 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
2161 case ICE_PROFID_MAC_IPV6_L2TPV3:
2162 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
2164 case ICE_PROFID_IPV4_GTPU_TEID:
2165 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
2167 case ICE_PROFID_IPV6_GTPU_TEID:
2168 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
2179 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
2180 tun_type = ICE_SW_TUN_PPPOE_QINQ;
2181 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
2182 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
2183 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
2184 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
2185 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
2186 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
2187 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
2188 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
2189 else if (vlan && tun_type == ICE_NON_TUN)
2190 tun_type = ICE_NON_TUN_QINQ;
2196 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2197 * @hw: pointer to hardware structure
2198 * @recps: struct that we need to populate
2199 * @rid: recipe ID that we are populating
2200 * @refresh_required: true if we should get recipe to profile mapping from FW
2202 * This function is used to populate all the necessary entries into our
2203 * bookkeeping so that we have a current list of all the recipes that are
2204 * programmed in the firmware.
2206 static enum ice_status
2207 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2208 bool *refresh_required)
2210 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2211 struct ice_aqc_recipe_data_elem *tmp;
2212 u16 num_recps = ICE_MAX_NUM_RECIPES;
2213 struct ice_prot_lkup_ext *lkup_exts;
2214 enum ice_status status;
2219 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2221 /* we need a buffer big enough to accommodate all the recipes */
2222 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2223 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2225 return ICE_ERR_NO_MEMORY;
2227 tmp[0].recipe_indx = rid;
2228 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2229 /* non-zero status meaning recipe doesn't exist */
2233 /* Get recipe to profile map so that we can get the fv from lkups that
2234 * we read for a recipe from FW. Since we want to minimize the number of
2235 * times we make this FW call, just make one call and cache the copy
2236 * until a new recipe is added. This operation is only required the
2237 * first time to get the changes from FW. Then to search existing
2238 * entries we don't need to update the cache again until another recipe
2241 if (*refresh_required) {
2242 ice_get_recp_to_prof_map(hw);
2243 *refresh_required = false;
2246 /* Start populating all the entries for recps[rid] based on lkups from
2247 * firmware. Note that we are only creating the root recipe in our
2250 lkup_exts = &recps[rid].lkup_exts;
2252 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2253 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2254 struct ice_recp_grp_entry *rg_entry;
2255 u8 i, prof, idx, prot = 0;
2259 rg_entry = (struct ice_recp_grp_entry *)
2260 ice_malloc(hw, sizeof(*rg_entry));
2262 status = ICE_ERR_NO_MEMORY;
2266 idx = root_bufs.recipe_indx;
2267 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2269 /* Mark all result indices in this chain */
2270 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2271 ice_set_bit(root_bufs.content.result_indx &
2272 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2274 /* get the first profile that is associated with rid */
2275 prof = ice_find_first_bit(recipe_to_profile[idx],
2276 ICE_MAX_NUM_PROFILES);
2277 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2278 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2280 rg_entry->fv_idx[i] = lkup_indx;
2281 rg_entry->fv_mask[i] =
2282 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2284 /* If the recipe is a chained recipe then all its
2285 * child recipe's result will have a result index.
2286 * To fill fv_words we should not use those result
2287 * index, we only need the protocol ids and offsets.
2288 * We will skip all the fv_idx which stores result
2289 * index in them. We also need to skip any fv_idx which
2290 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2291 * valid offset value.
2293 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2294 rg_entry->fv_idx[i]) ||
2295 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2296 rg_entry->fv_idx[i] == 0)
2299 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2300 rg_entry->fv_idx[i], &prot, &off);
2301 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2302 lkup_exts->fv_words[fv_word_idx].off = off;
2303 lkup_exts->field_mask[fv_word_idx] =
2304 rg_entry->fv_mask[i];
2305 if (prot == ICE_META_DATA_ID_HW &&
2306 off == ICE_TUN_FLAG_MDID_OFF)
2310 /* populate rg_list with the data from the child entry of this
2313 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2315 /* Propagate some data to the recipe database */
2316 recps[idx].is_root = !!is_root;
2317 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2318 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2319 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2320 recps[idx].chain_idx = root_bufs.content.result_indx &
2321 ~ICE_AQ_RECIPE_RESULT_EN;
2322 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2324 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2330 /* Only do the following for root recipes entries */
2331 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2332 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2333 recps[idx].root_rid = root_bufs.content.rid &
2334 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2335 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2338 /* Complete initialization of the root recipe entry */
2339 lkup_exts->n_val_words = fv_word_idx;
2340 recps[rid].big_recp = (num_recps > 1);
2341 recps[rid].n_grp_count = (u8)num_recps;
2342 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2343 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2344 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2345 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2346 if (!recps[rid].root_buf)
2349 /* Copy result indexes */
2350 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2351 recps[rid].recp_created = true;
2359 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2360 * @hw: pointer to hardware structure
2362 * This function is used to populate recipe_to_profile matrix where index to
2363 * this array is the recipe ID and the element is the mapping of which profiles
2364 * is this recipe mapped to.
2366 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2368 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2371 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2374 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2375 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2376 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2378 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2379 ICE_MAX_NUM_RECIPES);
2380 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2381 ice_set_bit(i, recipe_to_profile[j]);
2386 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2387 * @hw: pointer to the HW struct
2388 * @recp_list: pointer to sw recipe list
2390 * Allocate memory for the entire recipe table and initialize the structures/
2391 * entries corresponding to basic recipes.
2394 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2396 struct ice_sw_recipe *recps;
2399 recps = (struct ice_sw_recipe *)
2400 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2402 return ICE_ERR_NO_MEMORY;
2404 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2405 recps[i].root_rid = i;
2406 INIT_LIST_HEAD(&recps[i].filt_rules);
2407 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2408 INIT_LIST_HEAD(&recps[i].rg_list);
2409 ice_init_lock(&recps[i].filt_rule_lock);
2418 * ice_aq_get_sw_cfg - get switch configuration
2419 * @hw: pointer to the hardware structure
2420 * @buf: pointer to the result buffer
2421 * @buf_size: length of the buffer available for response
2422 * @req_desc: pointer to requested descriptor
2423 * @num_elems: pointer to number of elements
2424 * @cd: pointer to command details structure or NULL
2426 * Get switch configuration (0x0200) to be placed in buf.
2427 * This admin command returns information such as initial VSI/port number
2428 * and switch ID it belongs to.
2430 * NOTE: *req_desc is both an input/output parameter.
2431 * The caller of this function first calls this function with *request_desc set
2432 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2433 * configuration information has been returned; if non-zero (meaning not all
2434 * the information was returned), the caller should call this function again
2435 * with *req_desc set to the previous value returned by f/w to get the
2436 * next block of switch configuration information.
2438 * *num_elems is output only parameter. This reflects the number of elements
2439 * in response buffer. The caller of this function to use *num_elems while
2440 * parsing the response buffer.
2442 static enum ice_status
2443 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2444 u16 buf_size, u16 *req_desc, u16 *num_elems,
2445 struct ice_sq_cd *cd)
2447 struct ice_aqc_get_sw_cfg *cmd;
2448 struct ice_aq_desc desc;
2449 enum ice_status status;
2451 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2452 cmd = &desc.params.get_sw_conf;
2453 cmd->element = CPU_TO_LE16(*req_desc);
2455 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2457 *req_desc = LE16_TO_CPU(cmd->element);
2458 *num_elems = LE16_TO_CPU(cmd->num_elems);
2465 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2466 * @hw: pointer to the HW struct
2467 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2468 * @global_lut_id: output parameter for the RSS global LUT's ID
2470 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2472 struct ice_aqc_alloc_free_res_elem *sw_buf;
2473 enum ice_status status;
2476 buf_len = ice_struct_size(sw_buf, elem, 1);
2477 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2479 return ICE_ERR_NO_MEMORY;
2481 sw_buf->num_elems = CPU_TO_LE16(1);
2482 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2483 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2484 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2486 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2488 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2489 shared_res ? "shared" : "dedicated", status);
2490 goto ice_alloc_global_lut_exit;
2493 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2495 ice_alloc_global_lut_exit:
2496 ice_free(hw, sw_buf);
2501 * ice_free_rss_global_lut - free a RSS global LUT
2502 * @hw: pointer to the HW struct
2503 * @global_lut_id: ID of the RSS global LUT to free
2505 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2507 struct ice_aqc_alloc_free_res_elem *sw_buf;
2508 u16 buf_len, num_elems = 1;
2509 enum ice_status status;
2511 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2512 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2514 return ICE_ERR_NO_MEMORY;
2516 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2517 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2518 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2520 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2522 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2523 global_lut_id, status);
2525 ice_free(hw, sw_buf);
2530 * ice_alloc_sw - allocate resources specific to switch
2531 * @hw: pointer to the HW struct
2532 * @ena_stats: true to turn on VEB stats
2533 * @shared_res: true for shared resource, false for dedicated resource
2534 * @sw_id: switch ID returned
2535 * @counter_id: VEB counter ID returned
2537 * allocates switch resources (SWID and VEB counter) (0x0208)
2540 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2543 struct ice_aqc_alloc_free_res_elem *sw_buf;
2544 struct ice_aqc_res_elem *sw_ele;
2545 enum ice_status status;
2548 buf_len = ice_struct_size(sw_buf, elem, 1);
2549 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2551 return ICE_ERR_NO_MEMORY;
2553 /* Prepare buffer for switch ID.
2554 * The number of resource entries in buffer is passed as 1 since only a
2555 * single switch/VEB instance is allocated, and hence a single sw_id
2558 sw_buf->num_elems = CPU_TO_LE16(1);
2560 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2561 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2562 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2564 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2565 ice_aqc_opc_alloc_res, NULL);
2568 goto ice_alloc_sw_exit;
2570 sw_ele = &sw_buf->elem[0];
2571 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2574 /* Prepare buffer for VEB Counter */
2575 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2576 struct ice_aqc_alloc_free_res_elem *counter_buf;
2577 struct ice_aqc_res_elem *counter_ele;
2579 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2580 ice_malloc(hw, buf_len);
2582 status = ICE_ERR_NO_MEMORY;
2583 goto ice_alloc_sw_exit;
2586 /* The number of resource entries in buffer is passed as 1 since
2587 * only a single switch/VEB instance is allocated, and hence a
2588 * single VEB counter is requested.
2590 counter_buf->num_elems = CPU_TO_LE16(1);
2591 counter_buf->res_type =
2592 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2593 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2594 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2598 ice_free(hw, counter_buf);
2599 goto ice_alloc_sw_exit;
2601 counter_ele = &counter_buf->elem[0];
2602 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2603 ice_free(hw, counter_buf);
2607 ice_free(hw, sw_buf);
2612 * ice_free_sw - free resources specific to switch
2613 * @hw: pointer to the HW struct
2614 * @sw_id: switch ID returned
2615 * @counter_id: VEB counter ID returned
2617 * free switch resources (SWID and VEB counter) (0x0209)
2619 * NOTE: This function frees multiple resources. It continues
2620 * releasing other resources even after it encounters error.
2621 * The error code returned is the last error it encountered.
2623 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2625 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2626 enum ice_status status, ret_status;
2629 buf_len = ice_struct_size(sw_buf, elem, 1);
2630 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2632 return ICE_ERR_NO_MEMORY;
2634 /* Prepare buffer to free for switch ID res.
2635 * The number of resource entries in buffer is passed as 1 since only a
2636 * single switch/VEB instance is freed, and hence a single sw_id
2639 sw_buf->num_elems = CPU_TO_LE16(1);
2640 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2641 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2643 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2644 ice_aqc_opc_free_res, NULL);
2647 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2649 /* Prepare buffer to free for VEB Counter resource */
2650 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2651 ice_malloc(hw, buf_len);
2653 ice_free(hw, sw_buf);
2654 return ICE_ERR_NO_MEMORY;
2657 /* The number of resource entries in buffer is passed as 1 since only a
2658 * single switch/VEB instance is freed, and hence a single VEB counter
2661 counter_buf->num_elems = CPU_TO_LE16(1);
2662 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2663 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2665 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2666 ice_aqc_opc_free_res, NULL);
2668 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2669 ret_status = status;
2672 ice_free(hw, counter_buf);
2673 ice_free(hw, sw_buf);
2679 * @hw: pointer to the HW struct
2680 * @vsi_ctx: pointer to a VSI context struct
2681 * @cd: pointer to command details structure or NULL
2683 * Add a VSI context to the hardware (0x0210)
2686 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2687 struct ice_sq_cd *cd)
2689 struct ice_aqc_add_update_free_vsi_resp *res;
2690 struct ice_aqc_add_get_update_free_vsi *cmd;
2691 struct ice_aq_desc desc;
2692 enum ice_status status;
2694 cmd = &desc.params.vsi_cmd;
2695 res = &desc.params.add_update_free_vsi_res;
2697 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2699 if (!vsi_ctx->alloc_from_pool)
2700 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2701 ICE_AQ_VSI_IS_VALID);
2703 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2705 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2707 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2708 sizeof(vsi_ctx->info), cd);
2711 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2712 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2713 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2721 * @hw: pointer to the HW struct
2722 * @vsi_ctx: pointer to a VSI context struct
2723 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2724 * @cd: pointer to command details structure or NULL
2726 * Free VSI context info from hardware (0x0213)
2729 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2730 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2732 struct ice_aqc_add_update_free_vsi_resp *resp;
2733 struct ice_aqc_add_get_update_free_vsi *cmd;
2734 struct ice_aq_desc desc;
2735 enum ice_status status;
2737 cmd = &desc.params.vsi_cmd;
2738 resp = &desc.params.add_update_free_vsi_res;
2740 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2742 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2744 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2746 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2748 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2749 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2757 * @hw: pointer to the HW struct
2758 * @vsi_ctx: pointer to a VSI context struct
2759 * @cd: pointer to command details structure or NULL
2761 * Update VSI context in the hardware (0x0211)
2764 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2765 struct ice_sq_cd *cd)
2767 struct ice_aqc_add_update_free_vsi_resp *resp;
2768 struct ice_aqc_add_get_update_free_vsi *cmd;
2769 struct ice_aq_desc desc;
2770 enum ice_status status;
2772 cmd = &desc.params.vsi_cmd;
2773 resp = &desc.params.add_update_free_vsi_res;
2775 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2777 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2779 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2781 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2782 sizeof(vsi_ctx->info), cd);
2785 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2786 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2793 * ice_is_vsi_valid - check whether the VSI is valid or not
2794 * @hw: pointer to the HW struct
2795 * @vsi_handle: VSI handle
2797 * check whether the VSI is valid or not
2799 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2801 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2805 * ice_get_hw_vsi_num - return the HW VSI number
2806 * @hw: pointer to the HW struct
2807 * @vsi_handle: VSI handle
2809 * return the HW VSI number
2810 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2812 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2814 return hw->vsi_ctx[vsi_handle]->vsi_num;
2818 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2819 * @hw: pointer to the HW struct
2820 * @vsi_handle: VSI handle
2822 * return the VSI context entry for a given VSI handle
2824 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2826 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2830 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2831 * @hw: pointer to the HW struct
2832 * @vsi_handle: VSI handle
2833 * @vsi: VSI context pointer
2835 * save the VSI context entry for a given VSI handle
2838 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2840 hw->vsi_ctx[vsi_handle] = vsi;
2844 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2845 * @hw: pointer to the HW struct
2846 * @vsi_handle: VSI handle
2848 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2850 struct ice_vsi_ctx *vsi;
2853 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2856 ice_for_each_traffic_class(i) {
2857 if (vsi->lan_q_ctx[i]) {
2858 ice_free(hw, vsi->lan_q_ctx[i]);
2859 vsi->lan_q_ctx[i] = NULL;
2865 * ice_clear_vsi_ctx - clear the VSI context entry
2866 * @hw: pointer to the HW struct
2867 * @vsi_handle: VSI handle
2869 * clear the VSI context entry
2871 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2873 struct ice_vsi_ctx *vsi;
2875 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2877 ice_clear_vsi_q_ctx(hw, vsi_handle);
2879 hw->vsi_ctx[vsi_handle] = NULL;
2884 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2885 * @hw: pointer to the HW struct
2887 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2891 for (i = 0; i < ICE_MAX_VSI; i++)
2892 ice_clear_vsi_ctx(hw, i);
2896 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2897 * @hw: pointer to the HW struct
2898 * @vsi_handle: unique VSI handle provided by drivers
2899 * @vsi_ctx: pointer to a VSI context struct
2900 * @cd: pointer to command details structure or NULL
2902 * Add a VSI context to the hardware also add it into the VSI handle list.
2903 * If this function gets called after reset for existing VSIs then update
2904 * with the new HW VSI number in the corresponding VSI handle list entry.
2907 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2908 struct ice_sq_cd *cd)
2910 struct ice_vsi_ctx *tmp_vsi_ctx;
2911 enum ice_status status;
2913 if (vsi_handle >= ICE_MAX_VSI)
2914 return ICE_ERR_PARAM;
2915 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2918 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2920 /* Create a new VSI context */
2921 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2922 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2924 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2925 return ICE_ERR_NO_MEMORY;
2927 *tmp_vsi_ctx = *vsi_ctx;
2929 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2931 /* update with new HW VSI num */
2932 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2939 * ice_free_vsi- free VSI context from hardware and VSI handle list
2940 * @hw: pointer to the HW struct
2941 * @vsi_handle: unique VSI handle
2942 * @vsi_ctx: pointer to a VSI context struct
2943 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2944 * @cd: pointer to command details structure or NULL
2946 * Free VSI context info from hardware as well as from VSI handle list
2949 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2950 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2952 enum ice_status status;
2954 if (!ice_is_vsi_valid(hw, vsi_handle))
2955 return ICE_ERR_PARAM;
2956 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2957 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2959 ice_clear_vsi_ctx(hw, vsi_handle);
2965 * @hw: pointer to the HW struct
2966 * @vsi_handle: unique VSI handle
2967 * @vsi_ctx: pointer to a VSI context struct
2968 * @cd: pointer to command details structure or NULL
2970 * Update VSI context in the hardware
2973 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2974 struct ice_sq_cd *cd)
2976 if (!ice_is_vsi_valid(hw, vsi_handle))
2977 return ICE_ERR_PARAM;
2978 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2979 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2983 * ice_aq_get_vsi_params
2984 * @hw: pointer to the HW struct
2985 * @vsi_ctx: pointer to a VSI context struct
2986 * @cd: pointer to command details structure or NULL
2988 * Get VSI context info from hardware (0x0212)
2991 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2992 struct ice_sq_cd *cd)
2994 struct ice_aqc_add_get_update_free_vsi *cmd;
2995 struct ice_aqc_get_vsi_resp *resp;
2996 struct ice_aq_desc desc;
2997 enum ice_status status;
2999 cmd = &desc.params.vsi_cmd;
3000 resp = &desc.params.get_vsi_resp;
3002 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
3004 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
3006 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
3007 sizeof(vsi_ctx->info), cd);
3009 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
3011 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
3012 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
3019 * ice_aq_add_update_mir_rule - add/update a mirror rule
3020 * @hw: pointer to the HW struct
3021 * @rule_type: Rule Type
3022 * @dest_vsi: VSI number to which packets will be mirrored
3023 * @count: length of the list
3024 * @mr_buf: buffer for list of mirrored VSI numbers
3025 * @cd: pointer to command details structure or NULL
3028 * Add/Update Mirror Rule (0x260).
3031 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
3032 u16 count, struct ice_mir_rule_buf *mr_buf,
3033 struct ice_sq_cd *cd, u16 *rule_id)
3035 struct ice_aqc_add_update_mir_rule *cmd;
3036 struct ice_aq_desc desc;
3037 enum ice_status status;
3038 __le16 *mr_list = NULL;
3041 switch (rule_type) {
3042 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
3043 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
3044 /* Make sure count and mr_buf are set for these rule_types */
3045 if (!(count && mr_buf))
3046 return ICE_ERR_PARAM;
3048 buf_size = count * sizeof(__le16);
3049 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
3051 return ICE_ERR_NO_MEMORY;
3053 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
3054 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
3055 /* Make sure count and mr_buf are not set for these
3058 if (count || mr_buf)
3059 return ICE_ERR_PARAM;
3062 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
3063 return ICE_ERR_OUT_OF_RANGE;
3066 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
3068 /* Pre-process 'mr_buf' items for add/update of virtual port
3069 * ingress/egress mirroring (but not physical port ingress/egress
3075 for (i = 0; i < count; i++) {
3078 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
3080 /* Validate specified VSI number, make sure it is less
3081 * than ICE_MAX_VSI, if not return with error.
3083 if (id >= ICE_MAX_VSI) {
3084 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
3086 ice_free(hw, mr_list);
3087 return ICE_ERR_OUT_OF_RANGE;
3090 /* add VSI to mirror rule */
3093 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
3094 else /* remove VSI from mirror rule */
3095 mr_list[i] = CPU_TO_LE16(id);
3099 cmd = &desc.params.add_update_rule;
3100 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
3101 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
3102 ICE_AQC_RULE_ID_VALID_M);
3103 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
3104 cmd->num_entries = CPU_TO_LE16(count);
3105 cmd->dest = CPU_TO_LE16(dest_vsi);
3107 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
3109 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
3111 ice_free(hw, mr_list);
3117 * ice_aq_delete_mir_rule - delete a mirror rule
3118 * @hw: pointer to the HW struct
3119 * @rule_id: Mirror rule ID (to be deleted)
3120 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
3121 * otherwise it is returned to the shared pool
3122 * @cd: pointer to command details structure or NULL
3124 * Delete Mirror Rule (0x261).
3127 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
3128 struct ice_sq_cd *cd)
3130 struct ice_aqc_delete_mir_rule *cmd;
3131 struct ice_aq_desc desc;
3133 /* rule_id should be in the range 0...63 */
3134 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
3135 return ICE_ERR_OUT_OF_RANGE;
3137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
3139 cmd = &desc.params.del_rule;
3140 rule_id |= ICE_AQC_RULE_ID_VALID_M;
3141 cmd->rule_id = CPU_TO_LE16(rule_id);
3144 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
3146 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3150 * ice_aq_alloc_free_vsi_list
3151 * @hw: pointer to the HW struct
3152 * @vsi_list_id: VSI list ID returned or used for lookup
3153 * @lkup_type: switch rule filter lookup type
3154 * @opc: switch rules population command type - pass in the command opcode
3156 * allocates or free a VSI list resource
3158 static enum ice_status
3159 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
3160 enum ice_sw_lkup_type lkup_type,
3161 enum ice_adminq_opc opc)
3163 struct ice_aqc_alloc_free_res_elem *sw_buf;
3164 struct ice_aqc_res_elem *vsi_ele;
3165 enum ice_status status;
3168 buf_len = ice_struct_size(sw_buf, elem, 1);
3169 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3171 return ICE_ERR_NO_MEMORY;
3172 sw_buf->num_elems = CPU_TO_LE16(1);
3174 if (lkup_type == ICE_SW_LKUP_MAC ||
3175 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3176 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3177 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3178 lkup_type == ICE_SW_LKUP_PROMISC ||
3179 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3180 lkup_type == ICE_SW_LKUP_LAST) {
3181 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
3182 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
3184 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
3186 status = ICE_ERR_PARAM;
3187 goto ice_aq_alloc_free_vsi_list_exit;
3190 if (opc == ice_aqc_opc_free_res)
3191 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
3193 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
3195 goto ice_aq_alloc_free_vsi_list_exit;
3197 if (opc == ice_aqc_opc_alloc_res) {
3198 vsi_ele = &sw_buf->elem[0];
3199 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3202 ice_aq_alloc_free_vsi_list_exit:
3203 ice_free(hw, sw_buf);
3208 * ice_aq_set_storm_ctrl - Sets storm control configuration
3209 * @hw: pointer to the HW struct
3210 * @bcast_thresh: represents the upper threshold for broadcast storm control
3211 * @mcast_thresh: represents the upper threshold for multicast storm control
3212 * @ctl_bitmask: storm control knobs
3214 * Sets the storm control configuration (0x0280)
3217 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3220 struct ice_aqc_storm_cfg *cmd;
3221 struct ice_aq_desc desc;
3223 cmd = &desc.params.storm_conf;
3225 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3227 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3228 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3229 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3231 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3235 * ice_aq_get_storm_ctrl - gets storm control configuration
3236 * @hw: pointer to the HW struct
3237 * @bcast_thresh: represents the upper threshold for broadcast storm control
3238 * @mcast_thresh: represents the upper threshold for multicast storm control
3239 * @ctl_bitmask: storm control knobs
3241 * Gets the storm control configuration (0x0281)
3244 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3247 enum ice_status status;
3248 struct ice_aq_desc desc;
3250 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3252 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3254 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3257 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3260 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3263 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3270 * ice_aq_sw_rules - add/update/remove switch rules
3271 * @hw: pointer to the HW struct
3272 * @rule_list: pointer to switch rule population list
3273 * @rule_list_sz: total size of the rule list in bytes
3274 * @num_rules: number of switch rules in the rule_list
3275 * @opc: switch rules population command type - pass in the command opcode
3276 * @cd: pointer to command details structure or NULL
3278 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3280 static enum ice_status
3281 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3282 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3284 struct ice_aq_desc desc;
3285 enum ice_status status;
3287 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3289 if (opc != ice_aqc_opc_add_sw_rules &&
3290 opc != ice_aqc_opc_update_sw_rules &&
3291 opc != ice_aqc_opc_remove_sw_rules)
3292 return ICE_ERR_PARAM;
3294 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3296 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3297 desc.params.sw_rules.num_rules_fltr_entry_index =
3298 CPU_TO_LE16(num_rules);
3299 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3300 if (opc != ice_aqc_opc_add_sw_rules &&
3301 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3302 status = ICE_ERR_DOES_NOT_EXIST;
3308 * ice_aq_add_recipe - add switch recipe
3309 * @hw: pointer to the HW struct
3310 * @s_recipe_list: pointer to switch rule population list
3311 * @num_recipes: number of switch recipes in the list
3312 * @cd: pointer to command details structure or NULL
3317 ice_aq_add_recipe(struct ice_hw *hw,
3318 struct ice_aqc_recipe_data_elem *s_recipe_list,
3319 u16 num_recipes, struct ice_sq_cd *cd)
3321 struct ice_aqc_add_get_recipe *cmd;
3322 struct ice_aq_desc desc;
3325 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3326 cmd = &desc.params.add_get_recipe;
3327 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3329 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3330 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3332 buf_size = num_recipes * sizeof(*s_recipe_list);
3334 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3338 * ice_aq_get_recipe - get switch recipe
3339 * @hw: pointer to the HW struct
3340 * @s_recipe_list: pointer to switch rule population list
3341 * @num_recipes: pointer to the number of recipes (input and output)
3342 * @recipe_root: root recipe number of recipe(s) to retrieve
3343 * @cd: pointer to command details structure or NULL
3347 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3348 * On output, *num_recipes will equal the number of entries returned in
3351 * The caller must supply enough space in s_recipe_list to hold all possible
3352 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3355 ice_aq_get_recipe(struct ice_hw *hw,
3356 struct ice_aqc_recipe_data_elem *s_recipe_list,
3357 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3359 struct ice_aqc_add_get_recipe *cmd;
3360 struct ice_aq_desc desc;
3361 enum ice_status status;
3364 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3365 return ICE_ERR_PARAM;
3367 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3368 cmd = &desc.params.add_get_recipe;
3369 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3371 cmd->return_index = CPU_TO_LE16(recipe_root);
3372 cmd->num_sub_recipes = 0;
3374 buf_size = *num_recipes * sizeof(*s_recipe_list);
3376 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3377 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3383 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3384 * @hw: pointer to the HW struct
3385 * @params: parameters used to update the default recipe
3387 * This function only supports updating default recipes and it only supports
3388 * updating a single recipe based on the lkup_idx at a time.
3390 * This is done as a read-modify-write operation. First, get the current recipe
3391 * contents based on the recipe's ID. Then modify the field vector index and
3392 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3393 * the pre-existing recipe with the modifications.
3396 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3397 struct ice_update_recipe_lkup_idx_params *params)
3399 struct ice_aqc_recipe_data_elem *rcp_list;
3400 u16 num_recps = ICE_MAX_NUM_RECIPES;
3401 enum ice_status status;
3403 rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3405 return ICE_ERR_NO_MEMORY;
3407 /* read current recipe list from firmware */
3408 rcp_list->recipe_indx = params->rid;
3409 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3411 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3412 params->rid, status);
3416 /* only modify existing recipe's lkup_idx and mask if valid, while
3417 * leaving all other fields the same, then update the recipe firmware
3419 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3420 if (params->mask_valid)
3421 rcp_list->content.mask[params->lkup_idx] =
3422 CPU_TO_LE16(params->mask);
3424 if (params->ignore_valid)
3425 rcp_list->content.lkup_indx[params->lkup_idx] |=
3426 ICE_AQ_RECIPE_LKUP_IGNORE;
3428 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3430 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3431 params->rid, params->lkup_idx, params->fv_idx,
3432 params->mask, params->mask_valid ? "true" : "false",
3436 ice_free(hw, rcp_list);
3441 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3442 * @hw: pointer to the HW struct
3443 * @profile_id: package profile ID to associate the recipe with
3444 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3445 * @cd: pointer to command details structure or NULL
3446 * Recipe to profile association (0x0291)
3449 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3450 struct ice_sq_cd *cd)
3452 struct ice_aqc_recipe_to_profile *cmd;
3453 struct ice_aq_desc desc;
3455 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3456 cmd = &desc.params.recipe_to_profile;
3457 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3458 cmd->profile_id = CPU_TO_LE16(profile_id);
3459 /* Set the recipe ID bit in the bitmask to let the device know which
3460 * profile we are associating the recipe to
3462 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3463 ICE_NONDMA_TO_NONDMA);
3465 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3469 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3470 * @hw: pointer to the HW struct
3471 * @profile_id: package profile ID to associate the recipe with
3472 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3473 * @cd: pointer to command details structure or NULL
3474 * Associate profile ID with given recipe (0x0293)
3477 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3478 struct ice_sq_cd *cd)
3480 struct ice_aqc_recipe_to_profile *cmd;
3481 struct ice_aq_desc desc;
3482 enum ice_status status;
3484 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3485 cmd = &desc.params.recipe_to_profile;
3486 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3487 cmd->profile_id = CPU_TO_LE16(profile_id);
3489 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3491 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3492 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3498 * ice_alloc_recipe - add recipe resource
3499 * @hw: pointer to the hardware structure
3500 * @rid: recipe ID returned as response to AQ call
3502 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3504 struct ice_aqc_alloc_free_res_elem *sw_buf;
3505 enum ice_status status;
3508 buf_len = ice_struct_size(sw_buf, elem, 1);
3509 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3511 return ICE_ERR_NO_MEMORY;
3513 sw_buf->num_elems = CPU_TO_LE16(1);
3514 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3515 ICE_AQC_RES_TYPE_S) |
3516 ICE_AQC_RES_TYPE_FLAG_SHARED);
3517 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3518 ice_aqc_opc_alloc_res, NULL);
3520 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3521 ice_free(hw, sw_buf);
3526 /* ice_init_port_info - Initialize port_info with switch configuration data
3527 * @pi: pointer to port_info
3528 * @vsi_port_num: VSI number or port number
3529 * @type: Type of switch element (port or VSI)
3530 * @swid: switch ID of the switch the element is attached to
3531 * @pf_vf_num: PF or VF number
3532 * @is_vf: true if the element is a VF, false otherwise
3535 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3536 u16 swid, u16 pf_vf_num, bool is_vf)
3539 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3540 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3542 pi->pf_vf_num = pf_vf_num;
3544 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3545 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3548 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3553 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3554 * @hw: pointer to the hardware structure
3556 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3558 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3559 enum ice_status status;
3566 num_total_ports = 1;
3568 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3569 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3572 return ICE_ERR_NO_MEMORY;
3574 /* Multiple calls to ice_aq_get_sw_cfg may be required
3575 * to get all the switch configuration information. The need
3576 * for additional calls is indicated by ice_aq_get_sw_cfg
3577 * writing a non-zero value in req_desc
3580 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3582 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3583 &req_desc, &num_elems, NULL);
3588 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3589 u16 pf_vf_num, swid, vsi_port_num;
3593 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3594 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3596 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3597 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3599 swid = LE16_TO_CPU(ele->swid);
3601 if (LE16_TO_CPU(ele->pf_vf_num) &
3602 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3605 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3606 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3609 case ICE_AQC_GET_SW_CONF_RESP_VSI:
3610 if (hw->dcf_enabled && !is_vf)
3611 hw->pf_id = pf_vf_num;
3613 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3614 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3615 if (j == num_total_ports) {
3616 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3617 status = ICE_ERR_CFG;
3620 ice_init_port_info(hw->port_info,
3621 vsi_port_num, res_type, swid,
3629 } while (req_desc && !status);
3637 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3638 * @hw: pointer to the hardware structure
3639 * @fi: filter info structure to fill/update
3641 * This helper function populates the lb_en and lan_en elements of the provided
3642 * ice_fltr_info struct using the switch's type and characteristics of the
3643 * switch rule being configured.
3645 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3647 if ((fi->flag & ICE_FLTR_RX) &&
3648 (fi->fltr_act == ICE_FWD_TO_VSI ||
3649 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3650 fi->lkup_type == ICE_SW_LKUP_LAST)
3654 if ((fi->flag & ICE_FLTR_TX) &&
3655 (fi->fltr_act == ICE_FWD_TO_VSI ||
3656 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3657 fi->fltr_act == ICE_FWD_TO_Q ||
3658 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3659 /* Setting LB for prune actions will result in replicated
3660 * packets to the internal switch that will be dropped.
3662 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3665 /* Set lan_en to TRUE if
3666 * 1. The switch is a VEB AND
3668 * 2.1 The lookup is a directional lookup like ethertype,
3669 * promiscuous, ethertype-MAC, promiscuous-VLAN
3670 * and default-port OR
3671 * 2.2 The lookup is VLAN, OR
3672 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3673 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3677 * The switch is a VEPA.
3679 * In all other cases, the LAN enable has to be set to false.
3682 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3683 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3684 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3685 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3686 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3687 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3688 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3689 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3690 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3691 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3700 * ice_fill_sw_rule - Helper function to fill switch rule structure
3701 * @hw: pointer to the hardware structure
3702 * @f_info: entry containing packet forwarding information
3703 * @s_rule: switch rule structure to be filled in based on mac_entry
3704 * @opc: switch rules population command type - pass in the command opcode
3707 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3708 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3710 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3711 u16 vlan_tpid = ICE_ETH_P_8021Q;
3719 if (opc == ice_aqc_opc_remove_sw_rules) {
3720 s_rule->pdata.lkup_tx_rx.act = 0;
3721 s_rule->pdata.lkup_tx_rx.index =
3722 CPU_TO_LE16(f_info->fltr_rule_id);
3723 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3727 eth_hdr_sz = sizeof(dummy_eth_header);
3728 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3730 /* initialize the ether header with a dummy header */
3731 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3732 ice_fill_sw_info(hw, f_info);
3734 switch (f_info->fltr_act) {
3735 case ICE_FWD_TO_VSI:
3736 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3737 ICE_SINGLE_ACT_VSI_ID_M;
3738 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3739 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3740 ICE_SINGLE_ACT_VALID_BIT;
3742 case ICE_FWD_TO_VSI_LIST:
3743 act |= ICE_SINGLE_ACT_VSI_LIST;
3744 act |= (f_info->fwd_id.vsi_list_id <<
3745 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3746 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3747 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3748 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3749 ICE_SINGLE_ACT_VALID_BIT;
3752 act |= ICE_SINGLE_ACT_TO_Q;
3753 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3754 ICE_SINGLE_ACT_Q_INDEX_M;
3756 case ICE_DROP_PACKET:
3757 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3758 ICE_SINGLE_ACT_VALID_BIT;
3760 case ICE_FWD_TO_QGRP:
3761 q_rgn = f_info->qgrp_size > 0 ?
3762 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3763 act |= ICE_SINGLE_ACT_TO_Q;
3764 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3765 ICE_SINGLE_ACT_Q_INDEX_M;
3766 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3767 ICE_SINGLE_ACT_Q_REGION_M;
3774 act |= ICE_SINGLE_ACT_LB_ENABLE;
3776 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3778 switch (f_info->lkup_type) {
3779 case ICE_SW_LKUP_MAC:
3780 daddr = f_info->l_data.mac.mac_addr;
3782 case ICE_SW_LKUP_VLAN:
3783 vlan_id = f_info->l_data.vlan.vlan_id;
3784 if (f_info->l_data.vlan.tpid_valid)
3785 vlan_tpid = f_info->l_data.vlan.tpid;
3786 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3787 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3788 act |= ICE_SINGLE_ACT_PRUNE;
3789 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3792 case ICE_SW_LKUP_ETHERTYPE_MAC:
3793 daddr = f_info->l_data.ethertype_mac.mac_addr;
3795 case ICE_SW_LKUP_ETHERTYPE:
3796 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3797 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3799 case ICE_SW_LKUP_MAC_VLAN:
3800 daddr = f_info->l_data.mac_vlan.mac_addr;
3801 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3803 case ICE_SW_LKUP_PROMISC_VLAN:
3804 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3806 case ICE_SW_LKUP_PROMISC:
3807 daddr = f_info->l_data.mac_vlan.mac_addr;
3813 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3814 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3815 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3817 /* Recipe set depending on lookup type */
3818 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3819 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3820 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3823 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3824 ICE_NONDMA_TO_NONDMA);
3826 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3827 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3828 *off = CPU_TO_BE16(vlan_id);
3829 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3830 *off = CPU_TO_BE16(vlan_tpid);
3833 /* Create the switch rule with the final dummy Ethernet header */
3834 if (opc != ice_aqc_opc_update_sw_rules)
3835 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3839 * ice_add_marker_act
3840 * @hw: pointer to the hardware structure
3841 * @m_ent: the management entry for which sw marker needs to be added
3842 * @sw_marker: sw marker to tag the Rx descriptor with
3843 * @l_id: large action resource ID
3845 * Create a large action to hold software marker and update the switch rule
3846 * entry pointed by m_ent with newly created large action
3848 static enum ice_status
3849 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3850 u16 sw_marker, u16 l_id)
3852 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3853 /* For software marker we need 3 large actions
3854 * 1. FWD action: FWD TO VSI or VSI LIST
3855 * 2. GENERIC VALUE action to hold the profile ID
3856 * 3. GENERIC VALUE action to hold the software marker ID
3858 const u16 num_lg_acts = 3;
3859 enum ice_status status;
3865 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3866 return ICE_ERR_PARAM;
3868 /* Create two back-to-back switch rules and submit them to the HW using
3869 * one memory buffer:
3873 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3874 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3875 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3877 return ICE_ERR_NO_MEMORY;
3879 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3881 /* Fill in the first switch rule i.e. large action */
3882 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3883 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3884 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3886 /* First action VSI forwarding or VSI list forwarding depending on how
3889 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3890 m_ent->fltr_info.fwd_id.hw_vsi_id;
3892 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3893 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3894 if (m_ent->vsi_count > 1)
3895 act |= ICE_LG_ACT_VSI_LIST;
3896 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3898 /* Second action descriptor type */
3899 act = ICE_LG_ACT_GENERIC;
3901 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3902 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3904 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3905 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3907 /* Third action Marker value */
3908 act |= ICE_LG_ACT_GENERIC;
3909 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3910 ICE_LG_ACT_GENERIC_VALUE_M;
3912 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3914 /* call the fill switch rule to fill the lookup Tx Rx structure */
3915 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3916 ice_aqc_opc_update_sw_rules);
3918 /* Update the action to point to the large action ID */
3919 rx_tx->pdata.lkup_tx_rx.act =
3920 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3921 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3922 ICE_SINGLE_ACT_PTR_VAL_M));
3924 /* Use the filter rule ID of the previously created rule with single
3925 * act. Once the update happens, hardware will treat this as large
3928 rx_tx->pdata.lkup_tx_rx.index =
3929 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3931 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3932 ice_aqc_opc_update_sw_rules, NULL);
3934 m_ent->lg_act_idx = l_id;
3935 m_ent->sw_marker_id = sw_marker;
3938 ice_free(hw, lg_act);
3943 * ice_add_counter_act - add/update filter rule with counter action
3944 * @hw: pointer to the hardware structure
3945 * @m_ent: the management entry for which counter needs to be added
3946 * @counter_id: VLAN counter ID returned as part of allocate resource
3947 * @l_id: large action resource ID
3949 static enum ice_status
3950 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3951 u16 counter_id, u16 l_id)
3953 struct ice_aqc_sw_rules_elem *lg_act;
3954 struct ice_aqc_sw_rules_elem *rx_tx;
3955 enum ice_status status;
3956 /* 2 actions will be added while adding a large action counter */
3957 const int num_acts = 2;
3964 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3965 return ICE_ERR_PARAM;
3967 /* Create two back-to-back switch rules and submit them to the HW using
3968 * one memory buffer:
3972 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3973 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3974 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3976 return ICE_ERR_NO_MEMORY;
3978 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3980 /* Fill in the first switch rule i.e. large action */
3981 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3982 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3983 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3985 /* First action VSI forwarding or VSI list forwarding depending on how
3988 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3989 m_ent->fltr_info.fwd_id.hw_vsi_id;
3991 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3992 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3993 ICE_LG_ACT_VSI_LIST_ID_M;
3994 if (m_ent->vsi_count > 1)
3995 act |= ICE_LG_ACT_VSI_LIST;
3996 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3998 /* Second action counter ID */
3999 act = ICE_LG_ACT_STAT_COUNT;
4000 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
4001 ICE_LG_ACT_STAT_COUNT_M;
4002 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
4004 /* call the fill switch rule to fill the lookup Tx Rx structure */
4005 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
4006 ice_aqc_opc_update_sw_rules);
4008 act = ICE_SINGLE_ACT_PTR;
4009 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
4010 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
4012 /* Use the filter rule ID of the previously created rule with single
4013 * act. Once the update happens, hardware will treat this as large
4016 f_rule_id = m_ent->fltr_info.fltr_rule_id;
4017 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
4019 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
4020 ice_aqc_opc_update_sw_rules, NULL);
4022 m_ent->lg_act_idx = l_id;
4023 m_ent->counter_index = counter_id;
4026 ice_free(hw, lg_act);
4031 * ice_create_vsi_list_map
4032 * @hw: pointer to the hardware structure
4033 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
4034 * @num_vsi: number of VSI handles in the array
4035 * @vsi_list_id: VSI list ID generated as part of allocate resource
4037 * Helper function to create a new entry of VSI list ID to VSI mapping
4038 * using the given VSI list ID
4040 static struct ice_vsi_list_map_info *
4041 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4044 struct ice_switch_info *sw = hw->switch_info;
4045 struct ice_vsi_list_map_info *v_map;
4048 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
4052 v_map->vsi_list_id = vsi_list_id;
4054 for (i = 0; i < num_vsi; i++)
4055 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
4057 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
4062 * ice_update_vsi_list_rule
4063 * @hw: pointer to the hardware structure
4064 * @vsi_handle_arr: array of VSI handles to form a VSI list
4065 * @num_vsi: number of VSI handles in the array
4066 * @vsi_list_id: VSI list ID generated as part of allocate resource
4067 * @remove: Boolean value to indicate if this is a remove action
4068 * @opc: switch rules population command type - pass in the command opcode
4069 * @lkup_type: lookup type of the filter
4071 * Call AQ command to add a new switch rule or update existing switch rule
4072 * using the given VSI list ID
4074 static enum ice_status
4075 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4076 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
4077 enum ice_sw_lkup_type lkup_type)
4079 struct ice_aqc_sw_rules_elem *s_rule;
4080 enum ice_status status;
4086 return ICE_ERR_PARAM;
4088 if (lkup_type == ICE_SW_LKUP_MAC ||
4089 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
4090 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
4091 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
4092 lkup_type == ICE_SW_LKUP_PROMISC ||
4093 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
4094 lkup_type == ICE_SW_LKUP_LAST)
4095 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
4096 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
4097 else if (lkup_type == ICE_SW_LKUP_VLAN)
4098 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
4099 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
4101 return ICE_ERR_PARAM;
4103 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
4104 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4106 return ICE_ERR_NO_MEMORY;
4107 for (i = 0; i < num_vsi; i++) {
4108 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
4109 status = ICE_ERR_PARAM;
4112 /* AQ call requires hw_vsi_id(s) */
4113 s_rule->pdata.vsi_list.vsi[i] =
4114 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
4117 s_rule->type = CPU_TO_LE16(rule_type);
4118 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
4119 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
4121 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
4124 ice_free(hw, s_rule);
4129 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
4130 * @hw: pointer to the HW struct
4131 * @vsi_handle_arr: array of VSI handles to form a VSI list
4132 * @num_vsi: number of VSI handles in the array
4133 * @vsi_list_id: stores the ID of the VSI list to be created
4134 * @lkup_type: switch rule filter's lookup type
4136 static enum ice_status
4137 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4138 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
4140 enum ice_status status;
4142 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
4143 ice_aqc_opc_alloc_res);
4147 /* Update the newly created VSI list to include the specified VSIs */
4148 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
4149 *vsi_list_id, false,
4150 ice_aqc_opc_add_sw_rules, lkup_type);
4154 * ice_create_pkt_fwd_rule
4155 * @hw: pointer to the hardware structure
4156 * @recp_list: corresponding filter management list
4157 * @f_entry: entry containing packet forwarding information
4159 * Create switch rule with given filter information and add an entry
4160 * to the corresponding filter management list to track this switch rule
4163 static enum ice_status
4164 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4165 struct ice_fltr_list_entry *f_entry)
4167 struct ice_fltr_mgmt_list_entry *fm_entry;
4168 struct ice_aqc_sw_rules_elem *s_rule;
4169 enum ice_status status;
4171 s_rule = (struct ice_aqc_sw_rules_elem *)
4172 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4174 return ICE_ERR_NO_MEMORY;
4175 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4176 ice_malloc(hw, sizeof(*fm_entry));
4178 status = ICE_ERR_NO_MEMORY;
4179 goto ice_create_pkt_fwd_rule_exit;
4182 fm_entry->fltr_info = f_entry->fltr_info;
4184 /* Initialize all the fields for the management entry */
4185 fm_entry->vsi_count = 1;
4186 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
4187 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
4188 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
4190 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
4191 ice_aqc_opc_add_sw_rules);
4193 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4194 ice_aqc_opc_add_sw_rules, NULL);
4196 ice_free(hw, fm_entry);
4197 goto ice_create_pkt_fwd_rule_exit;
4200 f_entry->fltr_info.fltr_rule_id =
4201 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4202 fm_entry->fltr_info.fltr_rule_id =
4203 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4205 /* The book keeping entries will get removed when base driver
4206 * calls remove filter AQ command
4208 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4210 ice_create_pkt_fwd_rule_exit:
4211 ice_free(hw, s_rule);
4216 * ice_update_pkt_fwd_rule
4217 * @hw: pointer to the hardware structure
4218 * @f_info: filter information for switch rule
4220 * Call AQ command to update a previously created switch rule with a
4223 static enum ice_status
4224 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4226 struct ice_aqc_sw_rules_elem *s_rule;
4227 enum ice_status status;
4229 s_rule = (struct ice_aqc_sw_rules_elem *)
4230 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4232 return ICE_ERR_NO_MEMORY;
4234 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4236 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4238 /* Update switch rule with new rule set to forward VSI list */
4239 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4240 ice_aqc_opc_update_sw_rules, NULL);
4242 ice_free(hw, s_rule);
4247 * ice_update_sw_rule_bridge_mode
4248 * @hw: pointer to the HW struct
4250 * Updates unicast switch filter rules based on VEB/VEPA mode
4252 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4254 struct ice_switch_info *sw = hw->switch_info;
4255 struct ice_fltr_mgmt_list_entry *fm_entry;
4256 enum ice_status status = ICE_SUCCESS;
4257 struct LIST_HEAD_TYPE *rule_head;
4258 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4260 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4261 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4263 ice_acquire_lock(rule_lock);
4264 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4266 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4267 u8 *addr = fi->l_data.mac.mac_addr;
4269 /* Update unicast Tx rules to reflect the selected
4272 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4273 (fi->fltr_act == ICE_FWD_TO_VSI ||
4274 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4275 fi->fltr_act == ICE_FWD_TO_Q ||
4276 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4277 status = ice_update_pkt_fwd_rule(hw, fi);
4283 ice_release_lock(rule_lock);
4289 * ice_add_update_vsi_list
4290 * @hw: pointer to the hardware structure
4291 * @m_entry: pointer to current filter management list entry
4292 * @cur_fltr: filter information from the book keeping entry
4293 * @new_fltr: filter information with the new VSI to be added
4295 * Call AQ command to add or update previously created VSI list with new VSI.
4297 * Helper function to do book keeping associated with adding filter information
4298 * The algorithm to do the book keeping is described below :
4299 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4300 * if only one VSI has been added till now
4301 * Allocate a new VSI list and add two VSIs
4302 * to this list using switch rule command
4303 * Update the previously created switch rule with the
4304 * newly created VSI list ID
4305 * if a VSI list was previously created
4306 * Add the new VSI to the previously created VSI list set
4307 * using the update switch rule command
4309 static enum ice_status
4310 ice_add_update_vsi_list(struct ice_hw *hw,
4311 struct ice_fltr_mgmt_list_entry *m_entry,
4312 struct ice_fltr_info *cur_fltr,
4313 struct ice_fltr_info *new_fltr)
4315 enum ice_status status = ICE_SUCCESS;
4316 u16 vsi_list_id = 0;
4318 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4319 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4320 return ICE_ERR_NOT_IMPL;
4322 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4323 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4324 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4325 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4326 return ICE_ERR_NOT_IMPL;
4328 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4329 /* Only one entry existed in the mapping and it was not already
4330 * a part of a VSI list. So, create a VSI list with the old and
4333 struct ice_fltr_info tmp_fltr;
4334 u16 vsi_handle_arr[2];
4336 /* A rule already exists with the new VSI being added */
4337 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4338 return ICE_ERR_ALREADY_EXISTS;
4340 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4341 vsi_handle_arr[1] = new_fltr->vsi_handle;
4342 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4344 new_fltr->lkup_type);
4348 tmp_fltr = *new_fltr;
4349 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4350 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4351 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4352 /* Update the previous switch rule of "MAC forward to VSI" to
4353 * "MAC fwd to VSI list"
4355 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4359 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4360 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4361 m_entry->vsi_list_info =
4362 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4365 if (!m_entry->vsi_list_info)
4366 return ICE_ERR_NO_MEMORY;
4368 /* If this entry was large action then the large action needs
4369 * to be updated to point to FWD to VSI list
4371 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4373 ice_add_marker_act(hw, m_entry,
4374 m_entry->sw_marker_id,
4375 m_entry->lg_act_idx);
4377 u16 vsi_handle = new_fltr->vsi_handle;
4378 enum ice_adminq_opc opcode;
4380 if (!m_entry->vsi_list_info)
4383 /* A rule already exists with the new VSI being added */
4384 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4387 /* Update the previously created VSI list set with
4388 * the new VSI ID passed in
4390 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4391 opcode = ice_aqc_opc_update_sw_rules;
4393 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4394 vsi_list_id, false, opcode,
4395 new_fltr->lkup_type);
4396 /* update VSI list mapping info with new VSI ID */
4398 ice_set_bit(vsi_handle,
4399 m_entry->vsi_list_info->vsi_map);
4402 m_entry->vsi_count++;
4407 * ice_find_rule_entry - Search a rule entry
4408 * @list_head: head of rule list
4409 * @f_info: rule information
4411 * Helper function to search for a given rule entry
4412 * Returns pointer to entry storing the rule if found
4414 static struct ice_fltr_mgmt_list_entry *
4415 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4416 struct ice_fltr_info *f_info)
4418 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4420 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4422 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4423 sizeof(f_info->l_data)) &&
4424 f_info->flag == list_itr->fltr_info.flag) {
4433 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4434 * @recp_list: VSI lists needs to be searched
4435 * @vsi_handle: VSI handle to be found in VSI list
4436 * @vsi_list_id: VSI list ID found containing vsi_handle
4438 * Helper function to search a VSI list with single entry containing given VSI
4439 * handle element. This can be extended further to search VSI list with more
4440 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4442 static struct ice_vsi_list_map_info *
4443 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4446 struct ice_vsi_list_map_info *map_info = NULL;
4447 struct LIST_HEAD_TYPE *list_head;
4449 list_head = &recp_list->filt_rules;
4450 if (recp_list->adv_rule) {
4451 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4453 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4454 ice_adv_fltr_mgmt_list_entry,
4456 if (list_itr->vsi_list_info) {
4457 map_info = list_itr->vsi_list_info;
4458 if (ice_is_bit_set(map_info->vsi_map,
4460 *vsi_list_id = map_info->vsi_list_id;
4466 struct ice_fltr_mgmt_list_entry *list_itr;
4468 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4469 ice_fltr_mgmt_list_entry,
4471 if (list_itr->vsi_count == 1 &&
4472 list_itr->vsi_list_info) {
4473 map_info = list_itr->vsi_list_info;
4474 if (ice_is_bit_set(map_info->vsi_map,
4476 *vsi_list_id = map_info->vsi_list_id;
4486 * ice_add_rule_internal - add rule for a given lookup type
4487 * @hw: pointer to the hardware structure
4488 * @recp_list: recipe list for which rule has to be added
4489 * @lport: logic port number on which function add rule
4490 * @f_entry: structure containing MAC forwarding information
4492 * Adds or updates the rule lists for a given recipe
4494 static enum ice_status
4495 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4496 u8 lport, struct ice_fltr_list_entry *f_entry)
4498 struct ice_fltr_info *new_fltr, *cur_fltr;
4499 struct ice_fltr_mgmt_list_entry *m_entry;
4500 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4501 enum ice_status status = ICE_SUCCESS;
4503 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4504 return ICE_ERR_PARAM;
4506 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4507 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4508 f_entry->fltr_info.fwd_id.hw_vsi_id =
4509 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4511 rule_lock = &recp_list->filt_rule_lock;
4513 ice_acquire_lock(rule_lock);
4514 new_fltr = &f_entry->fltr_info;
4515 if (new_fltr->flag & ICE_FLTR_RX)
4516 new_fltr->src = lport;
4517 else if (new_fltr->flag & ICE_FLTR_TX)
4519 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4521 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4523 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4524 goto exit_add_rule_internal;
4527 cur_fltr = &m_entry->fltr_info;
4528 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4530 exit_add_rule_internal:
4531 ice_release_lock(rule_lock);
4536 * ice_remove_vsi_list_rule
4537 * @hw: pointer to the hardware structure
4538 * @vsi_list_id: VSI list ID generated as part of allocate resource
4539 * @lkup_type: switch rule filter lookup type
4541 * The VSI list should be emptied before this function is called to remove the
4544 static enum ice_status
4545 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4546 enum ice_sw_lkup_type lkup_type)
4548 /* Free the vsi_list resource that we allocated. It is assumed that the
4549 * list is empty at this point.
4551 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4552 ice_aqc_opc_free_res);
4556 * ice_rem_update_vsi_list
4557 * @hw: pointer to the hardware structure
4558 * @vsi_handle: VSI handle of the VSI to remove
4559 * @fm_list: filter management entry for which the VSI list management needs to
4562 static enum ice_status
4563 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4564 struct ice_fltr_mgmt_list_entry *fm_list)
4566 enum ice_sw_lkup_type lkup_type;
4567 enum ice_status status = ICE_SUCCESS;
4570 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4571 fm_list->vsi_count == 0)
4572 return ICE_ERR_PARAM;
4574 /* A rule with the VSI being removed does not exist */
4575 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4576 return ICE_ERR_DOES_NOT_EXIST;
4578 lkup_type = fm_list->fltr_info.lkup_type;
4579 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4580 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4581 ice_aqc_opc_update_sw_rules,
4586 fm_list->vsi_count--;
4587 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4589 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4590 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4591 struct ice_vsi_list_map_info *vsi_list_info =
4592 fm_list->vsi_list_info;
4595 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4597 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4598 return ICE_ERR_OUT_OF_RANGE;
4600 /* Make sure VSI list is empty before removing it below */
4601 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4603 ice_aqc_opc_update_sw_rules,
4608 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4609 tmp_fltr_info.fwd_id.hw_vsi_id =
4610 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4611 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4612 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4614 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4615 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4619 fm_list->fltr_info = tmp_fltr_info;
4622 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4623 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4624 struct ice_vsi_list_map_info *vsi_list_info =
4625 fm_list->vsi_list_info;
4627 /* Remove the VSI list since it is no longer used */
4628 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4630 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4631 vsi_list_id, status);
4635 LIST_DEL(&vsi_list_info->list_entry);
4636 ice_free(hw, vsi_list_info);
4637 fm_list->vsi_list_info = NULL;
4644 * ice_remove_rule_internal - Remove a filter rule of a given type
4646 * @hw: pointer to the hardware structure
4647 * @recp_list: recipe list for which the rule needs to removed
4648 * @f_entry: rule entry containing filter information
4650 static enum ice_status
4651 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4652 struct ice_fltr_list_entry *f_entry)
4654 struct ice_fltr_mgmt_list_entry *list_elem;
4655 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4656 enum ice_status status = ICE_SUCCESS;
4657 bool remove_rule = false;
4660 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4661 return ICE_ERR_PARAM;
4662 f_entry->fltr_info.fwd_id.hw_vsi_id =
4663 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4665 rule_lock = &recp_list->filt_rule_lock;
4666 ice_acquire_lock(rule_lock);
4667 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4668 &f_entry->fltr_info);
4670 status = ICE_ERR_DOES_NOT_EXIST;
4674 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4676 } else if (!list_elem->vsi_list_info) {
4677 status = ICE_ERR_DOES_NOT_EXIST;
4679 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4680 /* a ref_cnt > 1 indicates that the vsi_list is being
4681 * shared by multiple rules. Decrement the ref_cnt and
4682 * remove this rule, but do not modify the list, as it
4683 * is in-use by other rules.
4685 list_elem->vsi_list_info->ref_cnt--;
4688 /* a ref_cnt of 1 indicates the vsi_list is only used
4689 * by one rule. However, the original removal request is only
4690 * for a single VSI. Update the vsi_list first, and only
4691 * remove the rule if there are no further VSIs in this list.
4693 vsi_handle = f_entry->fltr_info.vsi_handle;
4694 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4697 /* if VSI count goes to zero after updating the VSI list */
4698 if (list_elem->vsi_count == 0)
4703 /* Remove the lookup rule */
4704 struct ice_aqc_sw_rules_elem *s_rule;
4706 s_rule = (struct ice_aqc_sw_rules_elem *)
4707 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4709 status = ICE_ERR_NO_MEMORY;
4713 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4714 ice_aqc_opc_remove_sw_rules);
4716 status = ice_aq_sw_rules(hw, s_rule,
4717 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4718 ice_aqc_opc_remove_sw_rules, NULL);
4720 /* Remove a book keeping from the list */
4721 ice_free(hw, s_rule);
4726 LIST_DEL(&list_elem->list_entry);
4727 ice_free(hw, list_elem);
4730 ice_release_lock(rule_lock);
4735 * ice_aq_get_res_alloc - get allocated resources
4736 * @hw: pointer to the HW struct
4737 * @num_entries: pointer to u16 to store the number of resource entries returned
4738 * @buf: pointer to buffer
4739 * @buf_size: size of buf
4740 * @cd: pointer to command details structure or NULL
4742 * The caller-supplied buffer must be large enough to store the resource
4743 * information for all resource types. Each resource type is an
4744 * ice_aqc_get_res_resp_elem structure.
4747 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4748 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4749 struct ice_sq_cd *cd)
4751 struct ice_aqc_get_res_alloc *resp;
4752 enum ice_status status;
4753 struct ice_aq_desc desc;
4756 return ICE_ERR_BAD_PTR;
4758 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4759 return ICE_ERR_INVAL_SIZE;
4761 resp = &desc.params.get_res;
4763 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4764 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4766 if (!status && num_entries)
4767 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4773 * ice_aq_get_res_descs - get allocated resource descriptors
4774 * @hw: pointer to the hardware structure
4775 * @num_entries: number of resource entries in buffer
4776 * @buf: structure to hold response data buffer
4777 * @buf_size: size of buffer
4778 * @res_type: resource type
4779 * @res_shared: is resource shared
4780 * @desc_id: input - first desc ID to start; output - next desc ID
4781 * @cd: pointer to command details structure or NULL
4784 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4785 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4786 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4788 struct ice_aqc_get_allocd_res_desc *cmd;
4789 struct ice_aq_desc desc;
4790 enum ice_status status;
4792 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4794 cmd = &desc.params.get_res_desc;
4797 return ICE_ERR_PARAM;
4799 if (buf_size != (num_entries * sizeof(*buf)))
4800 return ICE_ERR_PARAM;
4802 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4804 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4805 ICE_AQC_RES_TYPE_M) | (res_shared ?
4806 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4807 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4809 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4811 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4817 * ice_add_mac_rule - Add a MAC address based filter rule
4818 * @hw: pointer to the hardware structure
4819 * @m_list: list of MAC addresses and forwarding information
4820 * @sw: pointer to switch info struct for which function add rule
4821 * @lport: logic port number on which function add rule
4823 * IMPORTANT: When the umac_shared flag is set to false and m_list has
4824 * multiple unicast addresses, the function assumes that all the
4825 * addresses are unique in a given add_mac call. It doesn't
4826 * check for duplicates in this case, removing duplicates from a given
4827 * list should be taken care of in the caller of this function.
4829 static enum ice_status
4830 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4831 struct ice_switch_info *sw, u8 lport)
4833 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4834 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4835 struct ice_fltr_list_entry *m_list_itr;
4836 struct LIST_HEAD_TYPE *rule_head;
4837 u16 total_elem_left, s_rule_size;
4838 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4839 enum ice_status status = ICE_SUCCESS;
4840 u16 num_unicast = 0;
4844 rule_lock = &recp_list->filt_rule_lock;
4845 rule_head = &recp_list->filt_rules;
4847 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4849 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4853 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4854 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4855 if (!ice_is_vsi_valid(hw, vsi_handle))
4856 return ICE_ERR_PARAM;
4857 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4858 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4859 /* update the src in case it is VSI num */
4860 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4861 return ICE_ERR_PARAM;
4862 m_list_itr->fltr_info.src = hw_vsi_id;
4863 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4864 IS_ZERO_ETHER_ADDR(add))
4865 return ICE_ERR_PARAM;
4866 if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
4867 /* Don't overwrite the unicast address */
4868 ice_acquire_lock(rule_lock);
4869 if (ice_find_rule_entry(rule_head,
4870 &m_list_itr->fltr_info)) {
4871 ice_release_lock(rule_lock);
4874 ice_release_lock(rule_lock);
4876 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4877 (IS_UNICAST_ETHER_ADDR(add) && hw->umac_shared)) {
4878 m_list_itr->status =
4879 ice_add_rule_internal(hw, recp_list, lport,
4881 if (m_list_itr->status)
4882 return m_list_itr->status;
4886 ice_acquire_lock(rule_lock);
4887 /* Exit if no suitable entries were found for adding bulk switch rule */
4889 status = ICE_SUCCESS;
4890 goto ice_add_mac_exit;
4893 /* Allocate switch rule buffer for the bulk update for unicast */
4894 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4895 s_rule = (struct ice_aqc_sw_rules_elem *)
4896 ice_calloc(hw, num_unicast, s_rule_size);
4898 status = ICE_ERR_NO_MEMORY;
4899 goto ice_add_mac_exit;
4903 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4905 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4906 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4908 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4909 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4910 ice_aqc_opc_add_sw_rules);
4911 r_iter = (struct ice_aqc_sw_rules_elem *)
4912 ((u8 *)r_iter + s_rule_size);
4916 /* Call AQ bulk switch rule update for all unicast addresses */
4918 /* Call AQ switch rule in AQ_MAX chunk */
4919 for (total_elem_left = num_unicast; total_elem_left > 0;
4920 total_elem_left -= elem_sent) {
4921 struct ice_aqc_sw_rules_elem *entry = r_iter;
4923 elem_sent = MIN_T(u8, total_elem_left,
4924 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4925 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4926 elem_sent, ice_aqc_opc_add_sw_rules,
4929 goto ice_add_mac_exit;
4930 r_iter = (struct ice_aqc_sw_rules_elem *)
4931 ((u8 *)r_iter + (elem_sent * s_rule_size));
4934 /* Fill up rule ID based on the value returned from FW */
4936 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4938 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4939 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4940 struct ice_fltr_mgmt_list_entry *fm_entry;
4942 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4943 f_info->fltr_rule_id =
4944 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4945 f_info->fltr_act = ICE_FWD_TO_VSI;
4946 /* Create an entry to track this MAC address */
4947 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4948 ice_malloc(hw, sizeof(*fm_entry));
4950 status = ICE_ERR_NO_MEMORY;
4951 goto ice_add_mac_exit;
4953 fm_entry->fltr_info = *f_info;
4954 fm_entry->vsi_count = 1;
4955 /* The book keeping entries will get removed when
4956 * base driver calls remove filter AQ command
4959 LIST_ADD(&fm_entry->list_entry, rule_head);
4960 r_iter = (struct ice_aqc_sw_rules_elem *)
4961 ((u8 *)r_iter + s_rule_size);
4966 ice_release_lock(rule_lock);
4968 ice_free(hw, s_rule);
4973 * ice_add_mac - Add a MAC address based filter rule
4974 * @hw: pointer to the hardware structure
4975 * @m_list: list of MAC addresses and forwarding information
4977 * Function add MAC rule for logical port from HW struct
4979 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4982 return ICE_ERR_PARAM;
4984 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4985 hw->port_info->lport);
4989 * ice_add_vlan_internal - Add one VLAN based filter rule
4990 * @hw: pointer to the hardware structure
4991 * @recp_list: recipe list for which rule has to be added
4992 * @f_entry: filter entry containing one VLAN information
4994 static enum ice_status
4995 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4996 struct ice_fltr_list_entry *f_entry)
4998 struct ice_fltr_mgmt_list_entry *v_list_itr;
4999 struct ice_fltr_info *new_fltr, *cur_fltr;
5000 enum ice_sw_lkup_type lkup_type;
5001 u16 vsi_list_id = 0, vsi_handle;
5002 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5003 enum ice_status status = ICE_SUCCESS;
5005 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
5006 return ICE_ERR_PARAM;
5008 f_entry->fltr_info.fwd_id.hw_vsi_id =
5009 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
5010 new_fltr = &f_entry->fltr_info;
5012 /* VLAN ID should only be 12 bits */
5013 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
5014 return ICE_ERR_PARAM;
5016 if (new_fltr->src_id != ICE_SRC_ID_VSI)
5017 return ICE_ERR_PARAM;
5019 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
5020 lkup_type = new_fltr->lkup_type;
5021 vsi_handle = new_fltr->vsi_handle;
5022 rule_lock = &recp_list->filt_rule_lock;
5023 ice_acquire_lock(rule_lock);
5024 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
5026 struct ice_vsi_list_map_info *map_info = NULL;
5028 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
5029 /* All VLAN pruning rules use a VSI list. Check if
5030 * there is already a VSI list containing VSI that we
5031 * want to add. If found, use the same vsi_list_id for
5032 * this new VLAN rule or else create a new list.
5034 map_info = ice_find_vsi_list_entry(recp_list,
5038 status = ice_create_vsi_list_rule(hw,
5046 /* Convert the action to forwarding to a VSI list. */
5047 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
5048 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
5051 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
5053 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
5056 status = ICE_ERR_DOES_NOT_EXIST;
5059 /* reuse VSI list for new rule and increment ref_cnt */
5061 v_list_itr->vsi_list_info = map_info;
5062 map_info->ref_cnt++;
5064 v_list_itr->vsi_list_info =
5065 ice_create_vsi_list_map(hw, &vsi_handle,
5069 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
5070 /* Update existing VSI list to add new VSI ID only if it used
5073 cur_fltr = &v_list_itr->fltr_info;
5074 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
5077 /* If VLAN rule exists and VSI list being used by this rule is
5078 * referenced by more than 1 VLAN rule. Then create a new VSI
5079 * list appending previous VSI with new VSI and update existing
5080 * VLAN rule to point to new VSI list ID
5082 struct ice_fltr_info tmp_fltr;
5083 u16 vsi_handle_arr[2];
5086 /* Current implementation only supports reusing VSI list with
5087 * one VSI count. We should never hit below condition
5089 if (v_list_itr->vsi_count > 1 &&
5090 v_list_itr->vsi_list_info->ref_cnt > 1) {
5091 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
5092 status = ICE_ERR_CFG;
5097 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
5100 /* A rule already exists with the new VSI being added */
5101 if (cur_handle == vsi_handle) {
5102 status = ICE_ERR_ALREADY_EXISTS;
5106 vsi_handle_arr[0] = cur_handle;
5107 vsi_handle_arr[1] = vsi_handle;
5108 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5109 &vsi_list_id, lkup_type);
5113 tmp_fltr = v_list_itr->fltr_info;
5114 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
5115 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5116 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5117 /* Update the previous switch rule to a new VSI list which
5118 * includes current VSI that is requested
5120 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5124 /* before overriding VSI list map info. decrement ref_cnt of
5127 v_list_itr->vsi_list_info->ref_cnt--;
5129 /* now update to newly created list */
5130 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
5131 v_list_itr->vsi_list_info =
5132 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5134 v_list_itr->vsi_count++;
5138 ice_release_lock(rule_lock);
5143 * ice_add_vlan_rule - Add VLAN based filter rule
5144 * @hw: pointer to the hardware structure
5145 * @v_list: list of VLAN entries and forwarding information
5146 * @sw: pointer to switch info struct for which function add rule
5148 static enum ice_status
5149 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5150 struct ice_switch_info *sw)
5152 struct ice_fltr_list_entry *v_list_itr;
5153 struct ice_sw_recipe *recp_list;
5155 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
5156 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
5158 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
5159 return ICE_ERR_PARAM;
5160 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
5161 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
5163 if (v_list_itr->status)
5164 return v_list_itr->status;
5170 * ice_add_vlan - Add a VLAN based filter rule
5171 * @hw: pointer to the hardware structure
5172 * @v_list: list of VLAN and forwarding information
5174 * Function add VLAN rule for logical port from HW struct
5176 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5179 return ICE_ERR_PARAM;
5181 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
5185 * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
5186 * @hw: pointer to the hardware structure
5187 * @mv_list: list of MAC and VLAN filters
5188 * @sw: pointer to switch info struct for which function add rule
5189 * @lport: logic port number on which function add rule
5191 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
5192 * pruning bits enabled, then it is the responsibility of the caller to make
5193 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
5194 * VLAN won't be received on that VSI otherwise.
5196 static enum ice_status
5197 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
5198 struct ice_switch_info *sw, u8 lport)
5200 struct ice_fltr_list_entry *mv_list_itr;
5201 struct ice_sw_recipe *recp_list;
5203 if (!mv_list || !hw)
5204 return ICE_ERR_PARAM;
5206 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5207 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5209 enum ice_sw_lkup_type l_type =
5210 mv_list_itr->fltr_info.lkup_type;
5212 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5213 return ICE_ERR_PARAM;
5214 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5215 mv_list_itr->status =
5216 ice_add_rule_internal(hw, recp_list, lport,
5218 if (mv_list_itr->status)
5219 return mv_list_itr->status;
5225 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5226 * @hw: pointer to the hardware structure
5227 * @mv_list: list of MAC VLAN addresses and forwarding information
5229 * Function add MAC VLAN rule for logical port from HW struct
5232 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5234 if (!mv_list || !hw)
5235 return ICE_ERR_PARAM;
5237 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5238 hw->port_info->lport);
5242 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5243 * @hw: pointer to the hardware structure
5244 * @em_list: list of ether type MAC filter, MAC is optional
5245 * @sw: pointer to switch info struct for which function add rule
5246 * @lport: logic port number on which function add rule
5248 * This function requires the caller to populate the entries in
5249 * the filter list with the necessary fields (including flags to
5250 * indicate Tx or Rx rules).
5252 static enum ice_status
5253 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5254 struct ice_switch_info *sw, u8 lport)
5256 struct ice_fltr_list_entry *em_list_itr;
5258 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5260 struct ice_sw_recipe *recp_list;
5261 enum ice_sw_lkup_type l_type;
5263 l_type = em_list_itr->fltr_info.lkup_type;
5264 recp_list = &sw->recp_list[l_type];
5266 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5267 l_type != ICE_SW_LKUP_ETHERTYPE)
5268 return ICE_ERR_PARAM;
5270 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5273 if (em_list_itr->status)
5274 return em_list_itr->status;
5280 * ice_add_eth_mac - Add a ethertype based filter rule
5281 * @hw: pointer to the hardware structure
5282 * @em_list: list of ethertype and forwarding information
5284 * Function add ethertype rule for logical port from HW struct
5287 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5289 if (!em_list || !hw)
5290 return ICE_ERR_PARAM;
5292 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5293 hw->port_info->lport);
5297 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5298 * @hw: pointer to the hardware structure
5299 * @em_list: list of ethertype or ethertype MAC entries
5300 * @sw: pointer to switch info struct for which function add rule
5302 static enum ice_status
5303 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5304 struct ice_switch_info *sw)
5306 struct ice_fltr_list_entry *em_list_itr, *tmp;
5308 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5310 struct ice_sw_recipe *recp_list;
5311 enum ice_sw_lkup_type l_type;
5313 l_type = em_list_itr->fltr_info.lkup_type;
5315 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5316 l_type != ICE_SW_LKUP_ETHERTYPE)
5317 return ICE_ERR_PARAM;
5319 recp_list = &sw->recp_list[l_type];
5320 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5322 if (em_list_itr->status)
5323 return em_list_itr->status;
5329 * ice_remove_eth_mac - remove a ethertype based filter rule
5330 * @hw: pointer to the hardware structure
5331 * @em_list: list of ethertype and forwarding information
5335 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5337 if (!em_list || !hw)
5338 return ICE_ERR_PARAM;
5340 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5344 * ice_rem_sw_rule_info
5345 * @hw: pointer to the hardware structure
5346 * @rule_head: pointer to the switch list structure that we want to delete
5349 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5351 if (!LIST_EMPTY(rule_head)) {
5352 struct ice_fltr_mgmt_list_entry *entry;
5353 struct ice_fltr_mgmt_list_entry *tmp;
5355 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5356 ice_fltr_mgmt_list_entry, list_entry) {
5357 LIST_DEL(&entry->list_entry);
5358 ice_free(hw, entry);
5364 * ice_rem_adv_rule_info
5365 * @hw: pointer to the hardware structure
5366 * @rule_head: pointer to the switch list structure that we want to delete
5369 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5371 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5372 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5374 if (LIST_EMPTY(rule_head))
5377 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5378 ice_adv_fltr_mgmt_list_entry, list_entry) {
5379 LIST_DEL(&lst_itr->list_entry);
5380 ice_free(hw, lst_itr->lkups);
5381 ice_free(hw, lst_itr);
5386 * ice_rem_all_sw_rules_info
5387 * @hw: pointer to the hardware structure
5389 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5391 struct ice_switch_info *sw = hw->switch_info;
5394 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5395 struct LIST_HEAD_TYPE *rule_head;
5397 rule_head = &sw->recp_list[i].filt_rules;
5398 if (!sw->recp_list[i].adv_rule)
5399 ice_rem_sw_rule_info(hw, rule_head);
5401 ice_rem_adv_rule_info(hw, rule_head);
5402 if (sw->recp_list[i].adv_rule &&
5403 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5404 sw->recp_list[i].adv_rule = false;
5409 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5410 * @pi: pointer to the port_info structure
5411 * @vsi_handle: VSI handle to set as default
5412 * @set: true to add the above mentioned switch rule, false to remove it
5413 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5415 * add filter rule to set/unset given VSI as default VSI for the switch
5416 * (represented by swid)
5419 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5422 struct ice_aqc_sw_rules_elem *s_rule;
5423 struct ice_fltr_info f_info;
5424 struct ice_hw *hw = pi->hw;
5425 enum ice_adminq_opc opcode;
5426 enum ice_status status;
5430 if (!ice_is_vsi_valid(hw, vsi_handle))
5431 return ICE_ERR_PARAM;
5432 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5434 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5435 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5437 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5439 return ICE_ERR_NO_MEMORY;
5441 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5443 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5444 f_info.flag = direction;
5445 f_info.fltr_act = ICE_FWD_TO_VSI;
5446 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5448 if (f_info.flag & ICE_FLTR_RX) {
5449 f_info.src = pi->lport;
5450 f_info.src_id = ICE_SRC_ID_LPORT;
5452 f_info.fltr_rule_id =
5453 pi->dflt_rx_vsi_rule_id;
5454 } else if (f_info.flag & ICE_FLTR_TX) {
5455 f_info.src_id = ICE_SRC_ID_VSI;
5456 f_info.src = hw_vsi_id;
5458 f_info.fltr_rule_id =
5459 pi->dflt_tx_vsi_rule_id;
5463 opcode = ice_aqc_opc_add_sw_rules;
5465 opcode = ice_aqc_opc_remove_sw_rules;
5467 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5469 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5470 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5473 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5475 if (f_info.flag & ICE_FLTR_TX) {
5476 pi->dflt_tx_vsi_num = hw_vsi_id;
5477 pi->dflt_tx_vsi_rule_id = index;
5478 } else if (f_info.flag & ICE_FLTR_RX) {
5479 pi->dflt_rx_vsi_num = hw_vsi_id;
5480 pi->dflt_rx_vsi_rule_id = index;
5483 if (f_info.flag & ICE_FLTR_TX) {
5484 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5485 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5486 } else if (f_info.flag & ICE_FLTR_RX) {
5487 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5488 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5493 ice_free(hw, s_rule);
5498 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5499 * @list_head: head of rule list
5500 * @f_info: rule information
5502 * Helper function to search for a unicast rule entry - this is to be used
5503 * to remove unicast MAC filter that is not shared with other VSIs on the
5506 * Returns pointer to entry storing the rule if found
5508 static struct ice_fltr_mgmt_list_entry *
5509 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5510 struct ice_fltr_info *f_info)
5512 struct ice_fltr_mgmt_list_entry *list_itr;
5514 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5516 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5517 sizeof(f_info->l_data)) &&
5518 f_info->fwd_id.hw_vsi_id ==
5519 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5520 f_info->flag == list_itr->fltr_info.flag)
5527 * ice_remove_mac_rule - remove a MAC based filter rule
5528 * @hw: pointer to the hardware structure
5529 * @m_list: list of MAC addresses and forwarding information
5530 * @recp_list: list from which function remove MAC address
5532 * This function removes either a MAC filter rule or a specific VSI from a
5533 * VSI list for a multicast MAC address.
5535 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5536 * ice_add_mac. Caller should be aware that this call will only work if all
5537 * the entries passed into m_list were added previously. It will not attempt to
5538 * do a partial remove of entries that were found.
5540 static enum ice_status
5541 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5542 struct ice_sw_recipe *recp_list)
5544 struct ice_fltr_list_entry *list_itr, *tmp;
5545 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5548 return ICE_ERR_PARAM;
5550 rule_lock = &recp_list->filt_rule_lock;
5551 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5553 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5554 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5557 if (l_type != ICE_SW_LKUP_MAC)
5558 return ICE_ERR_PARAM;
5560 vsi_handle = list_itr->fltr_info.vsi_handle;
5561 if (!ice_is_vsi_valid(hw, vsi_handle))
5562 return ICE_ERR_PARAM;
5564 list_itr->fltr_info.fwd_id.hw_vsi_id =
5565 ice_get_hw_vsi_num(hw, vsi_handle);
5566 if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
5567 /* Don't remove the unicast address that belongs to
5568 * another VSI on the switch, since it is not being
5571 ice_acquire_lock(rule_lock);
5572 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5573 &list_itr->fltr_info)) {
5574 ice_release_lock(rule_lock);
5575 return ICE_ERR_DOES_NOT_EXIST;
5577 ice_release_lock(rule_lock);
5579 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5581 if (list_itr->status)
5582 return list_itr->status;
5588 * ice_remove_mac - remove a MAC address based filter rule
5589 * @hw: pointer to the hardware structure
5590 * @m_list: list of MAC addresses and forwarding information
5593 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5595 struct ice_sw_recipe *recp_list;
5597 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5598 return ice_remove_mac_rule(hw, m_list, recp_list);
5602 * ice_remove_vlan_rule - Remove VLAN based filter rule
5603 * @hw: pointer to the hardware structure
5604 * @v_list: list of VLAN entries and forwarding information
5605 * @recp_list: list from which function remove VLAN
5607 static enum ice_status
5608 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5609 struct ice_sw_recipe *recp_list)
5611 struct ice_fltr_list_entry *v_list_itr, *tmp;
5613 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5615 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5617 if (l_type != ICE_SW_LKUP_VLAN)
5618 return ICE_ERR_PARAM;
5619 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5621 if (v_list_itr->status)
5622 return v_list_itr->status;
5628 * ice_remove_vlan - remove a VLAN address based filter rule
5629 * @hw: pointer to the hardware structure
5630 * @v_list: list of VLAN and forwarding information
5634 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5636 struct ice_sw_recipe *recp_list;
5639 return ICE_ERR_PARAM;
5641 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5642 return ice_remove_vlan_rule(hw, v_list, recp_list);
5646 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5647 * @hw: pointer to the hardware structure
5648 * @v_list: list of MAC VLAN entries and forwarding information
5649 * @recp_list: list from which function remove MAC VLAN
5651 static enum ice_status
5652 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5653 struct ice_sw_recipe *recp_list)
5655 struct ice_fltr_list_entry *v_list_itr, *tmp;
5657 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5658 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5660 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5662 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5663 return ICE_ERR_PARAM;
5664 v_list_itr->status =
5665 ice_remove_rule_internal(hw, recp_list,
5667 if (v_list_itr->status)
5668 return v_list_itr->status;
5674 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5675 * @hw: pointer to the hardware structure
5676 * @mv_list: list of MAC VLAN and forwarding information
5679 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5681 struct ice_sw_recipe *recp_list;
5683 if (!mv_list || !hw)
5684 return ICE_ERR_PARAM;
5686 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5687 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5691 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5692 * @fm_entry: filter entry to inspect
5693 * @vsi_handle: VSI handle to compare with filter info
5696 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5698 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5699 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5700 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5701 fm_entry->vsi_list_info &&
5702 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5707 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5708 * @hw: pointer to the hardware structure
5709 * @vsi_handle: VSI handle to remove filters from
5710 * @vsi_list_head: pointer to the list to add entry to
5711 * @fi: pointer to fltr_info of filter entry to copy & add
5713 * Helper function, used when creating a list of filters to remove from
5714 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5715 * original filter entry, with the exception of fltr_info.fltr_act and
5716 * fltr_info.fwd_id fields. These are set such that later logic can
5717 * extract which VSI to remove the fltr from, and pass on that information.
5719 static enum ice_status
5720 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5721 struct LIST_HEAD_TYPE *vsi_list_head,
5722 struct ice_fltr_info *fi)
5724 struct ice_fltr_list_entry *tmp;
5726 /* this memory is freed up in the caller function
5727 * once filters for this VSI are removed
5729 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5731 return ICE_ERR_NO_MEMORY;
5733 tmp->fltr_info = *fi;
5735 /* Overwrite these fields to indicate which VSI to remove filter from,
5736 * so find and remove logic can extract the information from the
5737 * list entries. Note that original entries will still have proper
5740 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5741 tmp->fltr_info.vsi_handle = vsi_handle;
5742 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5744 LIST_ADD(&tmp->list_entry, vsi_list_head);
5750 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5751 * @hw: pointer to the hardware structure
5752 * @vsi_handle: VSI handle to remove filters from
5753 * @lkup_list_head: pointer to the list that has certain lookup type filters
5754 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5756 * Locates all filters in lkup_list_head that are used by the given VSI,
5757 * and adds COPIES of those entries to vsi_list_head (intended to be used
5758 * to remove the listed filters).
5759 * Note that this means all entries in vsi_list_head must be explicitly
5760 * deallocated by the caller when done with list.
5762 static enum ice_status
5763 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5764 struct LIST_HEAD_TYPE *lkup_list_head,
5765 struct LIST_HEAD_TYPE *vsi_list_head)
5767 struct ice_fltr_mgmt_list_entry *fm_entry;
5768 enum ice_status status = ICE_SUCCESS;
5770 /* check to make sure VSI ID is valid and within boundary */
5771 if (!ice_is_vsi_valid(hw, vsi_handle))
5772 return ICE_ERR_PARAM;
5774 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5775 ice_fltr_mgmt_list_entry, list_entry) {
5776 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5779 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5781 &fm_entry->fltr_info);
5789 * ice_determine_promisc_mask
5790 * @fi: filter info to parse
5792 * Helper function to determine which ICE_PROMISC_ mask corresponds
5793 * to given filter into.
5795 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5797 u16 vid = fi->l_data.mac_vlan.vlan_id;
5798 u8 *macaddr = fi->l_data.mac.mac_addr;
5799 bool is_tx_fltr = false;
5800 u8 promisc_mask = 0;
5802 if (fi->flag == ICE_FLTR_TX)
5805 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5806 promisc_mask |= is_tx_fltr ?
5807 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5808 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5809 promisc_mask |= is_tx_fltr ?
5810 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5811 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5812 promisc_mask |= is_tx_fltr ?
5813 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5815 promisc_mask |= is_tx_fltr ?
5816 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5818 return promisc_mask;
5822 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5823 * @hw: pointer to the hardware structure
5824 * @vsi_handle: VSI handle to retrieve info from
5825 * @promisc_mask: pointer to mask to be filled in
5826 * @vid: VLAN ID of promisc VLAN VSI
5827 * @sw: pointer to switch info struct for which function add rule
5829 static enum ice_status
5830 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5831 u16 *vid, struct ice_switch_info *sw)
5833 struct ice_fltr_mgmt_list_entry *itr;
5834 struct LIST_HEAD_TYPE *rule_head;
5835 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5837 if (!ice_is_vsi_valid(hw, vsi_handle))
5838 return ICE_ERR_PARAM;
5842 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5843 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5845 ice_acquire_lock(rule_lock);
5846 LIST_FOR_EACH_ENTRY(itr, rule_head,
5847 ice_fltr_mgmt_list_entry, list_entry) {
5848 /* Continue if this filter doesn't apply to this VSI or the
5849 * VSI ID is not in the VSI map for this filter
5851 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5854 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5856 ice_release_lock(rule_lock);
5862 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5863 * @hw: pointer to the hardware structure
5864 * @vsi_handle: VSI handle to retrieve info from
5865 * @promisc_mask: pointer to mask to be filled in
5866 * @vid: VLAN ID of promisc VLAN VSI
5869 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5872 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5873 vid, hw->switch_info);
5877 * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5878 * @hw: pointer to the hardware structure
5879 * @vsi_handle: VSI handle to retrieve info from
5880 * @promisc_mask: pointer to mask to be filled in
5881 * @vid: VLAN ID of promisc VLAN VSI
5882 * @sw: pointer to switch info struct for which function add rule
5884 static enum ice_status
5885 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5886 u16 *vid, struct ice_switch_info *sw)
5888 struct ice_fltr_mgmt_list_entry *itr;
5889 struct LIST_HEAD_TYPE *rule_head;
5890 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5892 if (!ice_is_vsi_valid(hw, vsi_handle))
5893 return ICE_ERR_PARAM;
5897 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5898 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5900 ice_acquire_lock(rule_lock);
5901 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5903 /* Continue if this filter doesn't apply to this VSI or the
5904 * VSI ID is not in the VSI map for this filter
5906 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5909 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5911 ice_release_lock(rule_lock);
5917 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5918 * @hw: pointer to the hardware structure
5919 * @vsi_handle: VSI handle to retrieve info from
5920 * @promisc_mask: pointer to mask to be filled in
5921 * @vid: VLAN ID of promisc VLAN VSI
5924 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5927 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5928 vid, hw->switch_info);
5932 * ice_remove_promisc - Remove promisc based filter rules
5933 * @hw: pointer to the hardware structure
5934 * @recp_id: recipe ID for which the rule needs to removed
5935 * @v_list: list of promisc entries
5937 static enum ice_status
5938 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5939 struct LIST_HEAD_TYPE *v_list)
5941 struct ice_fltr_list_entry *v_list_itr, *tmp;
5942 struct ice_sw_recipe *recp_list;
5944 recp_list = &hw->switch_info->recp_list[recp_id];
5945 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5947 v_list_itr->status =
5948 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5949 if (v_list_itr->status)
5950 return v_list_itr->status;
5956 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5957 * @hw: pointer to the hardware structure
5958 * @vsi_handle: VSI handle to clear mode
5959 * @promisc_mask: mask of promiscuous config bits to clear
5960 * @vid: VLAN ID to clear VLAN promiscuous
5961 * @sw: pointer to switch info struct for which function add rule
5963 static enum ice_status
5964 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5965 u16 vid, struct ice_switch_info *sw)
5967 struct ice_fltr_list_entry *fm_entry, *tmp;
5968 struct LIST_HEAD_TYPE remove_list_head;
5969 struct ice_fltr_mgmt_list_entry *itr;
5970 struct LIST_HEAD_TYPE *rule_head;
5971 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5972 enum ice_status status = ICE_SUCCESS;
5975 if (!ice_is_vsi_valid(hw, vsi_handle))
5976 return ICE_ERR_PARAM;
5978 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5979 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5981 recipe_id = ICE_SW_LKUP_PROMISC;
5983 rule_head = &sw->recp_list[recipe_id].filt_rules;
5984 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5986 INIT_LIST_HEAD(&remove_list_head);
5988 ice_acquire_lock(rule_lock);
5989 LIST_FOR_EACH_ENTRY(itr, rule_head,
5990 ice_fltr_mgmt_list_entry, list_entry) {
5991 struct ice_fltr_info *fltr_info;
5992 u8 fltr_promisc_mask = 0;
5994 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5996 fltr_info = &itr->fltr_info;
5998 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5999 vid != fltr_info->l_data.mac_vlan.vlan_id)
6002 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
6004 /* Skip if filter is not completely specified by given mask */
6005 if (fltr_promisc_mask & ~promisc_mask)
6008 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
6012 ice_release_lock(rule_lock);
6013 goto free_fltr_list;
6016 ice_release_lock(rule_lock);
6018 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
6021 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6022 ice_fltr_list_entry, list_entry) {
6023 LIST_DEL(&fm_entry->list_entry);
6024 ice_free(hw, fm_entry);
6031 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
6032 * @hw: pointer to the hardware structure
6033 * @vsi_handle: VSI handle to clear mode
6034 * @promisc_mask: mask of promiscuous config bits to clear
6035 * @vid: VLAN ID to clear VLAN promiscuous
6038 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
6039 u8 promisc_mask, u16 vid)
6041 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
6042 vid, hw->switch_info);
6046 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6047 * @hw: pointer to the hardware structure
6048 * @vsi_handle: VSI handle to configure
6049 * @promisc_mask: mask of promiscuous config bits
6050 * @vid: VLAN ID to set VLAN promiscuous
6051 * @lport: logical port number to configure promisc mode
6052 * @sw: pointer to switch info struct for which function add rule
6054 static enum ice_status
6055 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6056 u16 vid, u8 lport, struct ice_switch_info *sw)
6058 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
6059 struct ice_fltr_list_entry f_list_entry;
6060 struct ice_fltr_info new_fltr;
6061 enum ice_status status = ICE_SUCCESS;
6067 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6069 if (!ice_is_vsi_valid(hw, vsi_handle))
6070 return ICE_ERR_PARAM;
6071 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6073 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
6075 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
6076 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
6077 new_fltr.l_data.mac_vlan.vlan_id = vid;
6078 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
6080 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
6081 recipe_id = ICE_SW_LKUP_PROMISC;
6084 /* Separate filters must be set for each direction/packet type
6085 * combination, so we will loop over the mask value, store the
6086 * individual type, and clear it out in the input mask as it
6089 while (promisc_mask) {
6090 struct ice_sw_recipe *recp_list;
6096 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
6097 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
6098 pkt_type = UCAST_FLTR;
6099 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
6100 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
6101 pkt_type = UCAST_FLTR;
6103 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
6104 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
6105 pkt_type = MCAST_FLTR;
6106 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
6107 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
6108 pkt_type = MCAST_FLTR;
6110 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
6111 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
6112 pkt_type = BCAST_FLTR;
6113 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
6114 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
6115 pkt_type = BCAST_FLTR;
6119 /* Check for VLAN promiscuous flag */
6120 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
6121 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
6122 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
6123 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
6127 /* Set filter DA based on packet type */
6128 mac_addr = new_fltr.l_data.mac.mac_addr;
6129 if (pkt_type == BCAST_FLTR) {
6130 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
6131 } else if (pkt_type == MCAST_FLTR ||
6132 pkt_type == UCAST_FLTR) {
6133 /* Use the dummy ether header DA */
6134 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
6135 ICE_NONDMA_TO_NONDMA);
6136 if (pkt_type == MCAST_FLTR)
6137 mac_addr[0] |= 0x1; /* Set multicast bit */
6140 /* Need to reset this to zero for all iterations */
6143 new_fltr.flag |= ICE_FLTR_TX;
6144 new_fltr.src = hw_vsi_id;
6146 new_fltr.flag |= ICE_FLTR_RX;
6147 new_fltr.src = lport;
6150 new_fltr.fltr_act = ICE_FWD_TO_VSI;
6151 new_fltr.vsi_handle = vsi_handle;
6152 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
6153 f_list_entry.fltr_info = new_fltr;
6154 recp_list = &sw->recp_list[recipe_id];
6156 status = ice_add_rule_internal(hw, recp_list, lport,
6158 if (status != ICE_SUCCESS)
6159 goto set_promisc_exit;
6167 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6168 * @hw: pointer to the hardware structure
6169 * @vsi_handle: VSI handle to configure
6170 * @promisc_mask: mask of promiscuous config bits
6171 * @vid: VLAN ID to set VLAN promiscuous
6174 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6177 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
6178 hw->port_info->lport,
6183 * _ice_set_vlan_vsi_promisc
6184 * @hw: pointer to the hardware structure
6185 * @vsi_handle: VSI handle to configure
6186 * @promisc_mask: mask of promiscuous config bits
6187 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6188 * @lport: logical port number to configure promisc mode
6189 * @sw: pointer to switch info struct for which function add rule
6191 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6193 static enum ice_status
6194 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6195 bool rm_vlan_promisc, u8 lport,
6196 struct ice_switch_info *sw)
6198 struct ice_fltr_list_entry *list_itr, *tmp;
6199 struct LIST_HEAD_TYPE vsi_list_head;
6200 struct LIST_HEAD_TYPE *vlan_head;
6201 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6202 enum ice_status status;
6205 INIT_LIST_HEAD(&vsi_list_head);
6206 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6207 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6208 ice_acquire_lock(vlan_lock);
6209 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6211 ice_release_lock(vlan_lock);
6213 goto free_fltr_list;
6215 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6217 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6218 if (rm_vlan_promisc)
6219 status = _ice_clear_vsi_promisc(hw, vsi_handle,
6223 status = _ice_set_vsi_promisc(hw, vsi_handle,
6224 promisc_mask, vlan_id,
6231 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6232 ice_fltr_list_entry, list_entry) {
6233 LIST_DEL(&list_itr->list_entry);
6234 ice_free(hw, list_itr);
6240 * ice_set_vlan_vsi_promisc
6241 * @hw: pointer to the hardware structure
6242 * @vsi_handle: VSI handle to configure
6243 * @promisc_mask: mask of promiscuous config bits
6244 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6246 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6249 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6250 bool rm_vlan_promisc)
6252 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6253 rm_vlan_promisc, hw->port_info->lport,
6258 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6259 * @hw: pointer to the hardware structure
6260 * @vsi_handle: VSI handle to remove filters from
6261 * @recp_list: recipe list from which function remove fltr
6262 * @lkup: switch rule filter lookup type
6265 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6266 struct ice_sw_recipe *recp_list,
6267 enum ice_sw_lkup_type lkup)
6269 struct ice_fltr_list_entry *fm_entry;
6270 struct LIST_HEAD_TYPE remove_list_head;
6271 struct LIST_HEAD_TYPE *rule_head;
6272 struct ice_fltr_list_entry *tmp;
6273 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6274 enum ice_status status;
6276 INIT_LIST_HEAD(&remove_list_head);
6277 rule_lock = &recp_list[lkup].filt_rule_lock;
6278 rule_head = &recp_list[lkup].filt_rules;
6279 ice_acquire_lock(rule_lock);
6280 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6282 ice_release_lock(rule_lock);
6284 goto free_fltr_list;
6287 case ICE_SW_LKUP_MAC:
6288 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6290 case ICE_SW_LKUP_VLAN:
6291 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6293 case ICE_SW_LKUP_PROMISC:
6294 case ICE_SW_LKUP_PROMISC_VLAN:
6295 ice_remove_promisc(hw, lkup, &remove_list_head);
6297 case ICE_SW_LKUP_MAC_VLAN:
6298 ice_remove_mac_vlan(hw, &remove_list_head);
6300 case ICE_SW_LKUP_ETHERTYPE:
6301 case ICE_SW_LKUP_ETHERTYPE_MAC:
6302 ice_remove_eth_mac(hw, &remove_list_head);
6304 case ICE_SW_LKUP_DFLT:
6305 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6307 case ICE_SW_LKUP_LAST:
6308 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6313 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6314 ice_fltr_list_entry, list_entry) {
6315 LIST_DEL(&fm_entry->list_entry);
6316 ice_free(hw, fm_entry);
6321 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6322 * @hw: pointer to the hardware structure
6323 * @vsi_handle: VSI handle to remove filters from
6324 * @sw: pointer to switch info struct
6327 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6328 struct ice_switch_info *sw)
6330 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6332 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6333 sw->recp_list, ICE_SW_LKUP_MAC);
6334 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6335 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6336 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6337 sw->recp_list, ICE_SW_LKUP_PROMISC);
6338 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6339 sw->recp_list, ICE_SW_LKUP_VLAN);
6340 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6341 sw->recp_list, ICE_SW_LKUP_DFLT);
6342 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6343 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6344 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6345 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6346 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6347 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6351 * ice_remove_vsi_fltr - Remove all filters for a VSI
6352 * @hw: pointer to the hardware structure
6353 * @vsi_handle: VSI handle to remove filters from
6355 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6357 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6361 * ice_alloc_res_cntr - allocating resource counter
6362 * @hw: pointer to the hardware structure
6363 * @type: type of resource
6364 * @alloc_shared: if set it is shared else dedicated
6365 * @num_items: number of entries requested for FD resource type
6366 * @counter_id: counter index returned by AQ call
6369 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6372 struct ice_aqc_alloc_free_res_elem *buf;
6373 enum ice_status status;
6376 /* Allocate resource */
6377 buf_len = ice_struct_size(buf, elem, 1);
6378 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6380 return ICE_ERR_NO_MEMORY;
6382 buf->num_elems = CPU_TO_LE16(num_items);
6383 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6384 ICE_AQC_RES_TYPE_M) | alloc_shared);
6386 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6387 ice_aqc_opc_alloc_res, NULL);
6391 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6399 * ice_free_res_cntr - free resource counter
6400 * @hw: pointer to the hardware structure
6401 * @type: type of resource
6402 * @alloc_shared: if set it is shared else dedicated
6403 * @num_items: number of entries to be freed for FD resource type
6404 * @counter_id: counter ID resource which needs to be freed
6407 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6410 struct ice_aqc_alloc_free_res_elem *buf;
6411 enum ice_status status;
6415 buf_len = ice_struct_size(buf, elem, 1);
6416 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6418 return ICE_ERR_NO_MEMORY;
6420 buf->num_elems = CPU_TO_LE16(num_items);
6421 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6422 ICE_AQC_RES_TYPE_M) | alloc_shared);
6423 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6425 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6426 ice_aqc_opc_free_res, NULL);
6428 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6435 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6436 * @hw: pointer to the hardware structure
6437 * @counter_id: returns counter index
6439 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6441 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6442 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6447 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6448 * @hw: pointer to the hardware structure
6449 * @counter_id: counter index to be freed
6451 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6453 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6454 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6459 * ice_alloc_res_lg_act - add large action resource
6460 * @hw: pointer to the hardware structure
6461 * @l_id: large action ID to fill it in
6462 * @num_acts: number of actions to hold with a large action entry
6464 static enum ice_status
6465 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6467 struct ice_aqc_alloc_free_res_elem *sw_buf;
6468 enum ice_status status;
6471 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6472 return ICE_ERR_PARAM;
6474 /* Allocate resource for large action */
6475 buf_len = ice_struct_size(sw_buf, elem, 1);
6476 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6478 return ICE_ERR_NO_MEMORY;
6480 sw_buf->num_elems = CPU_TO_LE16(1);
6482 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6483 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6484 * If num_acts is greater than 2, then use
6485 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6486 * The num_acts cannot exceed 4. This was ensured at the
6487 * beginning of the function.
6490 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6491 else if (num_acts == 2)
6492 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6494 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6496 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6497 ice_aqc_opc_alloc_res, NULL);
6499 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6501 ice_free(hw, sw_buf);
6506 * ice_add_mac_with_sw_marker - add filter with sw marker
6507 * @hw: pointer to the hardware structure
6508 * @f_info: filter info structure containing the MAC filter information
6509 * @sw_marker: sw marker to tag the Rx descriptor with
6512 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6515 struct ice_fltr_mgmt_list_entry *m_entry;
6516 struct ice_fltr_list_entry fl_info;
6517 struct ice_sw_recipe *recp_list;
6518 struct LIST_HEAD_TYPE l_head;
6519 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6520 enum ice_status ret;
6524 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6525 return ICE_ERR_PARAM;
6527 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6528 return ICE_ERR_PARAM;
6530 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6531 return ICE_ERR_PARAM;
6533 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6534 return ICE_ERR_PARAM;
6535 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6537 /* Add filter if it doesn't exist so then the adding of large
6538 * action always results in update
6541 INIT_LIST_HEAD(&l_head);
6542 fl_info.fltr_info = *f_info;
6543 LIST_ADD(&fl_info.list_entry, &l_head);
6545 entry_exists = false;
6546 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6547 hw->port_info->lport);
6548 if (ret == ICE_ERR_ALREADY_EXISTS)
6549 entry_exists = true;
6553 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6554 rule_lock = &recp_list->filt_rule_lock;
6555 ice_acquire_lock(rule_lock);
6556 /* Get the book keeping entry for the filter */
6557 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6561 /* If counter action was enabled for this rule then don't enable
6562 * sw marker large action
6564 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6565 ret = ICE_ERR_PARAM;
6569 /* if same marker was added before */
6570 if (m_entry->sw_marker_id == sw_marker) {
6571 ret = ICE_ERR_ALREADY_EXISTS;
6575 /* Allocate a hardware table entry to hold large act. Three actions
6576 * for marker based large action
6578 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6582 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6585 /* Update the switch rule to add the marker action */
6586 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6588 ice_release_lock(rule_lock);
6593 ice_release_lock(rule_lock);
6594 /* only remove entry if it did not exist previously */
6596 ret = ice_remove_mac(hw, &l_head);
6602 * ice_add_mac_with_counter - add filter with counter enabled
6603 * @hw: pointer to the hardware structure
6604 * @f_info: pointer to filter info structure containing the MAC filter
6608 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6610 struct ice_fltr_mgmt_list_entry *m_entry;
6611 struct ice_fltr_list_entry fl_info;
6612 struct ice_sw_recipe *recp_list;
6613 struct LIST_HEAD_TYPE l_head;
6614 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6615 enum ice_status ret;
6620 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6621 return ICE_ERR_PARAM;
6623 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6624 return ICE_ERR_PARAM;
6626 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6627 return ICE_ERR_PARAM;
6628 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6629 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6631 entry_exist = false;
6633 rule_lock = &recp_list->filt_rule_lock;
6635 /* Add filter if it doesn't exist so then the adding of large
6636 * action always results in update
6638 INIT_LIST_HEAD(&l_head);
6640 fl_info.fltr_info = *f_info;
6641 LIST_ADD(&fl_info.list_entry, &l_head);
6643 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6644 hw->port_info->lport);
6645 if (ret == ICE_ERR_ALREADY_EXISTS)
6650 ice_acquire_lock(rule_lock);
6651 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6653 ret = ICE_ERR_BAD_PTR;
6657 /* Don't enable counter for a filter for which sw marker was enabled */
6658 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6659 ret = ICE_ERR_PARAM;
6663 /* If a counter was already enabled then don't need to add again */
6664 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6665 ret = ICE_ERR_ALREADY_EXISTS;
6669 /* Allocate a hardware table entry to VLAN counter */
6670 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6674 /* Allocate a hardware table entry to hold large act. Two actions for
6675 * counter based large action
6677 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6681 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6684 /* Update the switch rule to add the counter action */
6685 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6687 ice_release_lock(rule_lock);
6692 ice_release_lock(rule_lock);
6693 /* only remove entry if it did not exist previously */
6695 ret = ice_remove_mac(hw, &l_head);
6700 /* This is mapping table entry that maps every word within a given protocol
6701 * structure to the real byte offset as per the specification of that
6703 * for example dst address is 3 words in ethertype header and corresponding
6704 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6705 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6706 * matching entry describing its field. This needs to be updated if new
6707 * structure is added to that union.
6709 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6710 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6711 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6712 { ICE_ETYPE_OL, { 0 } },
6713 { ICE_VLAN_OFOS, { 2, 0 } },
6714 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6715 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6716 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6717 26, 28, 30, 32, 34, 36, 38 } },
6718 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6719 26, 28, 30, 32, 34, 36, 38 } },
6720 { ICE_TCP_IL, { 0, 2 } },
6721 { ICE_UDP_OF, { 0, 2 } },
6722 { ICE_UDP_ILOS, { 0, 2 } },
6723 { ICE_SCTP_IL, { 0, 2 } },
6724 { ICE_VXLAN, { 8, 10, 12, 14 } },
6725 { ICE_GENEVE, { 8, 10, 12, 14 } },
6726 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6727 { ICE_NVGRE, { 0, 2, 4, 6 } },
6728 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6729 { ICE_PPPOE, { 0, 2, 4, 6 } },
6730 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6731 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6732 { ICE_ESP, { 0, 2, 4, 6 } },
6733 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6734 { ICE_NAT_T, { 8, 10, 12, 14 } },
6735 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6736 { ICE_VLAN_EX, { 2, 0 } },
6737 { ICE_VLAN_IN, { 2, 0 } },
6740 /* The following table describes preferred grouping of recipes.
6741 * If a recipe that needs to be programmed is a superset or matches one of the
6742 * following combinations, then the recipe needs to be chained as per the
6746 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6747 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6748 { ICE_MAC_IL, ICE_MAC_IL_HW },
6749 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6750 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6751 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6752 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6753 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6754 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6755 { ICE_TCP_IL, ICE_TCP_IL_HW },
6756 { ICE_UDP_OF, ICE_UDP_OF_HW },
6757 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6758 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6759 { ICE_VXLAN, ICE_UDP_OF_HW },
6760 { ICE_GENEVE, ICE_UDP_OF_HW },
6761 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6762 { ICE_NVGRE, ICE_GRE_OF_HW },
6763 { ICE_GTP, ICE_UDP_OF_HW },
6764 { ICE_PPPOE, ICE_PPPOE_HW },
6765 { ICE_PFCP, ICE_UDP_ILOS_HW },
6766 { ICE_L2TPV3, ICE_L2TPV3_HW },
6767 { ICE_ESP, ICE_ESP_HW },
6768 { ICE_AH, ICE_AH_HW },
6769 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6770 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6771 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6772 { ICE_VLAN_IN, ICE_VLAN_OL_HW },
6776 * ice_find_recp - find a recipe
6777 * @hw: pointer to the hardware structure
6778 * @lkup_exts: extension sequence to match
6780 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6782 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6783 enum ice_sw_tunnel_type tun_type, u32 priority)
6785 bool refresh_required = true;
6786 struct ice_sw_recipe *recp;
6789 /* Walk through existing recipes to find a match */
6790 recp = hw->switch_info->recp_list;
6791 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6792 /* If recipe was not created for this ID, in SW bookkeeping,
6793 * check if FW has an entry for this recipe. If the FW has an
6794 * entry update it in our SW bookkeeping and continue with the
6797 if (!recp[i].recp_created)
6798 if (ice_get_recp_frm_fw(hw,
6799 hw->switch_info->recp_list, i,
6803 /* Skip inverse action recipes */
6804 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6805 ICE_AQ_RECIPE_ACT_INV_ACT)
6808 /* if number of words we are looking for match */
6809 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6810 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6811 struct ice_fv_word *be = lkup_exts->fv_words;
6812 u16 *cr = recp[i].lkup_exts.field_mask;
6813 u16 *de = lkup_exts->field_mask;
6817 /* ar, cr, and qr are related to the recipe words, while
6818 * be, de, and pe are related to the lookup words
6820 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6821 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6823 if (ar[qr].off == be[pe].off &&
6824 ar[qr].prot_id == be[pe].prot_id &&
6826 /* Found the "pe"th word in the
6831 /* After walking through all the words in the
6832 * "i"th recipe if "p"th word was not found then
6833 * this recipe is not what we are looking for.
6834 * So break out from this loop and try the next
6837 if (qr >= recp[i].lkup_exts.n_val_words) {
6842 /* If for "i"th recipe the found was never set to false
6843 * then it means we found our match
6845 if (tun_type == recp[i].tun_type && found &&
6846 priority == recp[i].priority)
6847 return i; /* Return the recipe ID */
6850 return ICE_MAX_NUM_RECIPES;
6854 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6856 * As protocol id for outer vlan is different in dvm and svm, if dvm is
6857 * supported protocol array record for outer vlan has to be modified to
6858 * reflect the value proper for DVM.
6860 void ice_change_proto_id_to_dvm(void)
6864 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6865 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6866 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6867 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6871 * ice_prot_type_to_id - get protocol ID from protocol type
6872 * @type: protocol type
6873 * @id: pointer to variable that will receive the ID
6875 * Returns true if found, false otherwise
6877 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6881 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6882 if (ice_prot_id_tbl[i].type == type) {
6883 *id = ice_prot_id_tbl[i].protocol_id;
6890 * ice_fill_valid_words - count valid words
6891 * @rule: advanced rule with lookup information
6892 * @lkup_exts: byte offset extractions of the words that are valid
6894 * calculate valid words in a lookup rule using mask value
6897 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6898 struct ice_prot_lkup_ext *lkup_exts)
6900 u8 j, word, prot_id, ret_val;
6902 if (!ice_prot_type_to_id(rule->type, &prot_id))
6905 word = lkup_exts->n_val_words;
6907 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6908 if (((u16 *)&rule->m_u)[j] &&
6909 (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6910 /* No more space to accommodate */
6911 if (word >= ICE_MAX_CHAIN_WORDS)
6913 lkup_exts->fv_words[word].off =
6914 ice_prot_ext[rule->type].offs[j];
6915 lkup_exts->fv_words[word].prot_id =
6916 ice_prot_id_tbl[rule->type].protocol_id;
6917 lkup_exts->field_mask[word] =
6918 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6922 ret_val = word - lkup_exts->n_val_words;
6923 lkup_exts->n_val_words = word;
6929 * ice_create_first_fit_recp_def - Create a recipe grouping
6930 * @hw: pointer to the hardware structure
6931 * @lkup_exts: an array of protocol header extractions
6932 * @rg_list: pointer to a list that stores new recipe groups
6933 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6935 * Using first fit algorithm, take all the words that are still not done
6936 * and start grouping them in 4-word groups. Each group makes up one
6939 static enum ice_status
6940 ice_create_first_fit_recp_def(struct ice_hw *hw,
6941 struct ice_prot_lkup_ext *lkup_exts,
6942 struct LIST_HEAD_TYPE *rg_list,
6945 struct ice_pref_recipe_group *grp = NULL;
6950 if (!lkup_exts->n_val_words) {
6951 struct ice_recp_grp_entry *entry;
6953 entry = (struct ice_recp_grp_entry *)
6954 ice_malloc(hw, sizeof(*entry));
6956 return ICE_ERR_NO_MEMORY;
6957 LIST_ADD(&entry->l_entry, rg_list);
6958 grp = &entry->r_group;
6960 grp->n_val_pairs = 0;
6963 /* Walk through every word in the rule to check if it is not done. If so
6964 * then this word needs to be part of a new recipe.
6966 for (j = 0; j < lkup_exts->n_val_words; j++)
6967 if (!ice_is_bit_set(lkup_exts->done, j)) {
6969 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6970 struct ice_recp_grp_entry *entry;
6972 entry = (struct ice_recp_grp_entry *)
6973 ice_malloc(hw, sizeof(*entry));
6975 return ICE_ERR_NO_MEMORY;
6976 LIST_ADD(&entry->l_entry, rg_list);
6977 grp = &entry->r_group;
6981 grp->pairs[grp->n_val_pairs].prot_id =
6982 lkup_exts->fv_words[j].prot_id;
6983 grp->pairs[grp->n_val_pairs].off =
6984 lkup_exts->fv_words[j].off;
6985 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6993 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6994 * @hw: pointer to the hardware structure
6995 * @fv_list: field vector with the extraction sequence information
6996 * @rg_list: recipe groupings with protocol-offset pairs
6998 * Helper function to fill in the field vector indices for protocol-offset
6999 * pairs. These indexes are then ultimately programmed into a recipe.
7001 static enum ice_status
7002 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
7003 struct LIST_HEAD_TYPE *rg_list)
7005 struct ice_sw_fv_list_entry *fv;
7006 struct ice_recp_grp_entry *rg;
7007 struct ice_fv_word *fv_ext;
7009 if (LIST_EMPTY(fv_list))
7012 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
7013 fv_ext = fv->fv_ptr->ew;
7015 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
7018 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
7019 struct ice_fv_word *pr;
7024 pr = &rg->r_group.pairs[i];
7025 mask = rg->r_group.mask[i];
7027 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
7028 if (fv_ext[j].prot_id == pr->prot_id &&
7029 fv_ext[j].off == pr->off) {
7032 /* Store index of field vector */
7034 rg->fv_mask[i] = mask;
7038 /* Protocol/offset could not be found, caller gave an
7042 return ICE_ERR_PARAM;
7050 * ice_find_free_recp_res_idx - find free result indexes for recipe
7051 * @hw: pointer to hardware structure
7052 * @profiles: bitmap of profiles that will be associated with the new recipe
7053 * @free_idx: pointer to variable to receive the free index bitmap
7055 * The algorithm used here is:
7056 * 1. When creating a new recipe, create a set P which contains all
7057 * Profiles that will be associated with our new recipe
7059 * 2. For each Profile p in set P:
7060 * a. Add all recipes associated with Profile p into set R
7061 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
7062 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
7063 * i. Or just assume they all have the same possible indexes:
7065 * i.e., PossibleIndexes = 0x0000F00000000000
7067 * 3. For each Recipe r in set R:
7068 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
7069 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
7071 * FreeIndexes will contain the bits indicating the indexes free for use,
7072 * then the code needs to update the recipe[r].used_result_idx_bits to
7073 * indicate which indexes were selected for use by this recipe.
7076 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
7077 ice_bitmap_t *free_idx)
7079 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
7080 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
7081 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
7084 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
7085 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
7086 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
7087 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
7089 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
7091 /* For each profile we are going to associate the recipe with, add the
7092 * recipes that are associated with that profile. This will give us
7093 * the set of recipes that our recipe may collide with. Also, determine
7094 * what possible result indexes are usable given this set of profiles.
7096 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
7097 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
7098 ICE_MAX_NUM_RECIPES);
7099 ice_and_bitmap(possible_idx, possible_idx,
7100 hw->switch_info->prof_res_bm[bit],
7104 /* For each recipe that our new recipe may collide with, determine
7105 * which indexes have been used.
7107 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
7108 ice_or_bitmap(used_idx, used_idx,
7109 hw->switch_info->recp_list[bit].res_idxs,
7112 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
7114 /* return number of free indexes */
7115 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
7119 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
7120 * @hw: pointer to hardware structure
7121 * @rm: recipe management list entry
7122 * @profiles: bitmap of profiles that will be associated.
7124 static enum ice_status
7125 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
7126 ice_bitmap_t *profiles)
7128 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7129 struct ice_aqc_recipe_data_elem *tmp;
7130 struct ice_aqc_recipe_data_elem *buf;
7131 struct ice_recp_grp_entry *entry;
7132 enum ice_status status;
7138 /* When more than one recipe are required, another recipe is needed to
7139 * chain them together. Matching a tunnel metadata ID takes up one of
7140 * the match fields in the chaining recipe reducing the number of
7141 * chained recipes by one.
7143 /* check number of free result indices */
7144 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7145 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
7147 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
7148 free_res_idx, rm->n_grp_count);
7150 if (rm->n_grp_count > 1) {
7151 if (rm->n_grp_count > free_res_idx)
7152 return ICE_ERR_MAX_LIMIT;
7157 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
7158 return ICE_ERR_MAX_LIMIT;
7160 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
7161 ICE_MAX_NUM_RECIPES,
7164 return ICE_ERR_NO_MEMORY;
7166 buf = (struct ice_aqc_recipe_data_elem *)
7167 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
7169 status = ICE_ERR_NO_MEMORY;
7173 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
7174 recipe_count = ICE_MAX_NUM_RECIPES;
7175 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
7177 if (status || recipe_count == 0)
7180 /* Allocate the recipe resources, and configure them according to the
7181 * match fields from protocol headers and extracted field vectors.
7183 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
7184 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7187 status = ice_alloc_recipe(hw, &entry->rid);
7191 /* Clear the result index of the located recipe, as this will be
7192 * updated, if needed, later in the recipe creation process.
7194 tmp[0].content.result_indx = 0;
7196 buf[recps] = tmp[0];
7197 buf[recps].recipe_indx = (u8)entry->rid;
7198 /* if the recipe is a non-root recipe RID should be programmed
7199 * as 0 for the rules to be applied correctly.
7201 buf[recps].content.rid = 0;
7202 ice_memset(&buf[recps].content.lkup_indx, 0,
7203 sizeof(buf[recps].content.lkup_indx),
7206 /* All recipes use look-up index 0 to match switch ID. */
7207 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7208 buf[recps].content.mask[0] =
7209 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7210 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7213 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7214 buf[recps].content.lkup_indx[i] = 0x80;
7215 buf[recps].content.mask[i] = 0;
7218 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7219 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7220 buf[recps].content.mask[i + 1] =
7221 CPU_TO_LE16(entry->fv_mask[i]);
7224 if (rm->n_grp_count > 1) {
7225 /* Checks to see if there really is a valid result index
7228 if (chain_idx >= ICE_MAX_FV_WORDS) {
7229 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7230 status = ICE_ERR_MAX_LIMIT;
7234 entry->chain_idx = chain_idx;
7235 buf[recps].content.result_indx =
7236 ICE_AQ_RECIPE_RESULT_EN |
7237 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7238 ICE_AQ_RECIPE_RESULT_DATA_M);
7239 ice_clear_bit(chain_idx, result_idx_bm);
7240 chain_idx = ice_find_first_bit(result_idx_bm,
7244 /* fill recipe dependencies */
7245 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7246 ICE_MAX_NUM_RECIPES);
7247 ice_set_bit(buf[recps].recipe_indx,
7248 (ice_bitmap_t *)buf[recps].recipe_bitmap);
7249 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7253 if (rm->n_grp_count == 1) {
7254 rm->root_rid = buf[0].recipe_indx;
7255 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7256 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7257 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7258 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7259 sizeof(buf[0].recipe_bitmap),
7260 ICE_NONDMA_TO_NONDMA);
7262 status = ICE_ERR_BAD_PTR;
7265 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7266 * the recipe which is getting created if specified
7267 * by user. Usually any advanced switch filter, which results
7268 * into new extraction sequence, ended up creating a new recipe
7269 * of type ROOT and usually recipes are associated with profiles
7270 * Switch rule referreing newly created recipe, needs to have
7271 * either/or 'fwd' or 'join' priority, otherwise switch rule
7272 * evaluation will not happen correctly. In other words, if
7273 * switch rule to be evaluated on priority basis, then recipe
7274 * needs to have priority, otherwise it will be evaluated last.
7276 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7278 struct ice_recp_grp_entry *last_chain_entry;
7281 /* Allocate the last recipe that will chain the outcomes of the
7282 * other recipes together
7284 status = ice_alloc_recipe(hw, &rid);
7288 buf[recps].recipe_indx = (u8)rid;
7289 buf[recps].content.rid = (u8)rid;
7290 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7291 /* the new entry created should also be part of rg_list to
7292 * make sure we have complete recipe
7294 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7295 sizeof(*last_chain_entry));
7296 if (!last_chain_entry) {
7297 status = ICE_ERR_NO_MEMORY;
7300 last_chain_entry->rid = rid;
7301 ice_memset(&buf[recps].content.lkup_indx, 0,
7302 sizeof(buf[recps].content.lkup_indx),
7304 /* All recipes use look-up index 0 to match switch ID. */
7305 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7306 buf[recps].content.mask[0] =
7307 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7308 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7309 buf[recps].content.lkup_indx[i] =
7310 ICE_AQ_RECIPE_LKUP_IGNORE;
7311 buf[recps].content.mask[i] = 0;
7315 /* update r_bitmap with the recp that is used for chaining */
7316 ice_set_bit(rid, rm->r_bitmap);
7317 /* this is the recipe that chains all the other recipes so it
7318 * should not have a chaining ID to indicate the same
7320 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7321 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7323 last_chain_entry->fv_idx[i] = entry->chain_idx;
7324 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7325 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7326 ice_set_bit(entry->rid, rm->r_bitmap);
7328 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7329 if (sizeof(buf[recps].recipe_bitmap) >=
7330 sizeof(rm->r_bitmap)) {
7331 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7332 sizeof(buf[recps].recipe_bitmap),
7333 ICE_NONDMA_TO_NONDMA);
7335 status = ICE_ERR_BAD_PTR;
7338 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7341 rm->root_rid = (u8)rid;
7343 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7347 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7348 ice_release_change_lock(hw);
7352 /* Every recipe that just got created add it to the recipe
7355 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7356 struct ice_switch_info *sw = hw->switch_info;
7357 bool is_root, idx_found = false;
7358 struct ice_sw_recipe *recp;
7359 u16 idx, buf_idx = 0;
7361 /* find buffer index for copying some data */
7362 for (idx = 0; idx < rm->n_grp_count; idx++)
7363 if (buf[idx].recipe_indx == entry->rid) {
7369 status = ICE_ERR_OUT_OF_RANGE;
7373 recp = &sw->recp_list[entry->rid];
7374 is_root = (rm->root_rid == entry->rid);
7375 recp->is_root = is_root;
7377 recp->root_rid = entry->rid;
7378 recp->big_recp = (is_root && rm->n_grp_count > 1);
7380 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7381 entry->r_group.n_val_pairs *
7382 sizeof(struct ice_fv_word),
7383 ICE_NONDMA_TO_NONDMA);
7385 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7386 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7388 /* Copy non-result fv index values and masks to recipe. This
7389 * call will also update the result recipe bitmask.
7391 ice_collect_result_idx(&buf[buf_idx], recp);
7393 /* for non-root recipes, also copy to the root, this allows
7394 * easier matching of a complete chained recipe
7397 ice_collect_result_idx(&buf[buf_idx],
7398 &sw->recp_list[rm->root_rid]);
7400 recp->n_ext_words = entry->r_group.n_val_pairs;
7401 recp->chain_idx = entry->chain_idx;
7402 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7403 recp->n_grp_count = rm->n_grp_count;
7404 recp->tun_type = rm->tun_type;
7405 recp->recp_created = true;
7419 * ice_create_recipe_group - creates recipe group
7420 * @hw: pointer to hardware structure
7421 * @rm: recipe management list entry
7422 * @lkup_exts: lookup elements
7424 static enum ice_status
7425 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7426 struct ice_prot_lkup_ext *lkup_exts)
7428 enum ice_status status;
7431 rm->n_grp_count = 0;
7433 /* Create recipes for words that are marked not done by packing them
7436 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7437 &rm->rg_list, &recp_count);
7439 rm->n_grp_count += recp_count;
7440 rm->n_ext_words = lkup_exts->n_val_words;
7441 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7442 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7443 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7444 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7451 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7452 * @hw: pointer to hardware structure
7453 * @lkups: lookup elements or match criteria for the advanced recipe, one
7454 * structure per protocol header
7455 * @lkups_cnt: number of protocols
7456 * @bm: bitmap of field vectors to consider
7457 * @fv_list: pointer to a list that holds the returned field vectors
7459 static enum ice_status
7460 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7461 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7463 enum ice_status status;
7470 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7472 return ICE_ERR_NO_MEMORY;
7474 for (i = 0; i < lkups_cnt; i++)
7475 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7476 status = ICE_ERR_CFG;
7480 /* Find field vectors that include all specified protocol types */
7481 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7484 ice_free(hw, prot_ids);
7489 * ice_tun_type_match_word - determine if tun type needs a match mask
7490 * @tun_type: tunnel type
7491 * @mask: mask to be used for the tunnel
7493 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7496 case ICE_SW_TUN_VXLAN_GPE:
7497 case ICE_SW_TUN_GENEVE:
7498 case ICE_SW_TUN_VXLAN:
7499 case ICE_SW_TUN_NVGRE:
7500 case ICE_SW_TUN_UDP:
7501 case ICE_ALL_TUNNELS:
7502 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7503 case ICE_NON_TUN_QINQ:
7504 case ICE_SW_TUN_PPPOE_QINQ:
7505 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7506 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7507 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7508 *mask = ICE_TUN_FLAG_MASK;
7511 case ICE_SW_TUN_GENEVE_VLAN:
7512 case ICE_SW_TUN_VXLAN_VLAN:
7513 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7523 * ice_add_special_words - Add words that are not protocols, such as metadata
7524 * @rinfo: other information regarding the rule e.g. priority and action info
7525 * @lkup_exts: lookup word structure
7527 static enum ice_status
7528 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7529 struct ice_prot_lkup_ext *lkup_exts)
7533 /* If this is a tunneled packet, then add recipe index to match the
7534 * tunnel bit in the packet metadata flags.
7536 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7537 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7538 u8 word = lkup_exts->n_val_words++;
7540 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7541 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7542 lkup_exts->field_mask[word] = mask;
7544 return ICE_ERR_MAX_LIMIT;
7551 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7552 * @hw: pointer to hardware structure
7553 * @rinfo: other information regarding the rule e.g. priority and action info
7554 * @bm: pointer to memory for returning the bitmap of field vectors
7557 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7560 enum ice_prof_type prof_type;
7562 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7564 switch (rinfo->tun_type) {
7566 case ICE_NON_TUN_QINQ:
7567 prof_type = ICE_PROF_NON_TUN;
7569 case ICE_ALL_TUNNELS:
7570 prof_type = ICE_PROF_TUN_ALL;
7572 case ICE_SW_TUN_VXLAN_GPE:
7573 case ICE_SW_TUN_GENEVE:
7574 case ICE_SW_TUN_GENEVE_VLAN:
7575 case ICE_SW_TUN_VXLAN:
7576 case ICE_SW_TUN_VXLAN_VLAN:
7577 case ICE_SW_TUN_UDP:
7578 case ICE_SW_TUN_GTP:
7579 prof_type = ICE_PROF_TUN_UDP;
7581 case ICE_SW_TUN_NVGRE:
7582 prof_type = ICE_PROF_TUN_GRE;
7584 case ICE_SW_TUN_PPPOE:
7585 case ICE_SW_TUN_PPPOE_QINQ:
7586 prof_type = ICE_PROF_TUN_PPPOE;
7588 case ICE_SW_TUN_PPPOE_PAY:
7589 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7590 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7592 case ICE_SW_TUN_PPPOE_IPV4:
7593 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7594 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7595 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7596 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7598 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7599 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7601 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7602 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7604 case ICE_SW_TUN_PPPOE_IPV6:
7605 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7606 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7607 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7608 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7610 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7611 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7613 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7614 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7616 case ICE_SW_TUN_PROFID_IPV6_ESP:
7617 case ICE_SW_TUN_IPV6_ESP:
7618 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7620 case ICE_SW_TUN_PROFID_IPV6_AH:
7621 case ICE_SW_TUN_IPV6_AH:
7622 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7624 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7625 case ICE_SW_TUN_IPV6_L2TPV3:
7626 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7628 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7629 case ICE_SW_TUN_IPV6_NAT_T:
7630 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7632 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7633 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7635 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7636 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7638 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7639 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7641 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7642 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7644 case ICE_SW_TUN_IPV4_NAT_T:
7645 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7647 case ICE_SW_TUN_IPV4_L2TPV3:
7648 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7650 case ICE_SW_TUN_IPV4_ESP:
7651 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7653 case ICE_SW_TUN_IPV4_AH:
7654 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7656 case ICE_SW_IPV4_TCP:
7657 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7659 case ICE_SW_IPV4_UDP:
7660 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7662 case ICE_SW_IPV6_TCP:
7663 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7665 case ICE_SW_IPV6_UDP:
7666 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7668 case ICE_SW_TUN_IPV4_GTPU_NO_PAY:
7669 ice_set_bit(ICE_PROFID_IPV4_GTPU_TEID, bm);
7671 case ICE_SW_TUN_IPV6_GTPU_NO_PAY:
7672 ice_set_bit(ICE_PROFID_IPV6_GTPU_TEID, bm);
7674 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7675 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7676 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7677 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7679 case ICE_SW_TUN_IPV4_GTPU_IPV4_UDP:
7680 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7682 case ICE_SW_TUN_IPV4_GTPU_IPV4_TCP:
7683 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7685 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4:
7686 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7687 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7688 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7690 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP:
7691 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7693 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP:
7694 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7696 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7697 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7698 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7699 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7701 case ICE_SW_TUN_IPV6_GTPU_IPV4_UDP:
7702 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7704 case ICE_SW_TUN_IPV6_GTPU_IPV4_TCP:
7705 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7707 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4:
7708 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7709 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7710 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7712 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP:
7713 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7715 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP:
7716 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7718 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7719 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7720 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7721 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7723 case ICE_SW_TUN_IPV4_GTPU_IPV6_UDP:
7724 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7726 case ICE_SW_TUN_IPV4_GTPU_IPV6_TCP:
7727 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7729 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6:
7730 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7731 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7732 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7734 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP:
7735 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7737 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP:
7738 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7740 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7741 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7742 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7743 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7745 case ICE_SW_TUN_IPV6_GTPU_IPV6_UDP:
7746 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7748 case ICE_SW_TUN_IPV6_GTPU_IPV6_TCP:
7749 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7751 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6:
7752 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7753 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7754 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7756 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP:
7757 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7759 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP:
7760 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7762 case ICE_SW_TUN_AND_NON_TUN:
7763 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7765 prof_type = ICE_PROF_ALL;
7769 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7773 * ice_is_prof_rule - determine if rule type is a profile rule
7774 * @type: the rule type
7776 * if the rule type is a profile rule, that means that there no field value
7777 * match required, in this case just a profile hit is required.
7779 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7782 case ICE_SW_TUN_PROFID_IPV6_ESP:
7783 case ICE_SW_TUN_PROFID_IPV6_AH:
7784 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7785 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7786 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7787 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7788 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7789 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7799 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7800 * @hw: pointer to hardware structure
7801 * @lkups: lookup elements or match criteria for the advanced recipe, one
7802 * structure per protocol header
7803 * @lkups_cnt: number of protocols
7804 * @rinfo: other information regarding the rule e.g. priority and action info
7805 * @rid: return the recipe ID of the recipe created
7807 static enum ice_status
7808 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7809 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7811 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7812 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7813 struct ice_prot_lkup_ext *lkup_exts;
7814 struct ice_recp_grp_entry *r_entry;
7815 struct ice_sw_fv_list_entry *fvit;
7816 struct ice_recp_grp_entry *r_tmp;
7817 struct ice_sw_fv_list_entry *tmp;
7818 enum ice_status status = ICE_SUCCESS;
7819 struct ice_sw_recipe *rm;
7822 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7823 return ICE_ERR_PARAM;
7825 lkup_exts = (struct ice_prot_lkup_ext *)
7826 ice_malloc(hw, sizeof(*lkup_exts));
7828 return ICE_ERR_NO_MEMORY;
7830 /* Determine the number of words to be matched and if it exceeds a
7831 * recipe's restrictions
7833 for (i = 0; i < lkups_cnt; i++) {
7836 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7837 status = ICE_ERR_CFG;
7838 goto err_free_lkup_exts;
7841 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7843 status = ICE_ERR_CFG;
7844 goto err_free_lkup_exts;
7848 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7850 status = ICE_ERR_NO_MEMORY;
7851 goto err_free_lkup_exts;
7854 /* Get field vectors that contain fields extracted from all the protocol
7855 * headers being programmed.
7857 INIT_LIST_HEAD(&rm->fv_list);
7858 INIT_LIST_HEAD(&rm->rg_list);
7860 /* Get bitmap of field vectors (profiles) that are compatible with the
7861 * rule request; only these will be searched in the subsequent call to
7864 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7866 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7870 /* Create any special protocol/offset pairs, such as looking at tunnel
7871 * bits by extracting metadata
7873 status = ice_add_special_words(rinfo, lkup_exts);
7875 goto err_free_lkup_exts;
7877 /* Group match words into recipes using preferred recipe grouping
7880 status = ice_create_recipe_group(hw, rm, lkup_exts);
7884 /* set the recipe priority if specified */
7885 rm->priority = (u8)rinfo->priority;
7887 /* Find offsets from the field vector. Pick the first one for all the
7890 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7894 /* An empty FV list means to use all the profiles returned in the
7897 if (LIST_EMPTY(&rm->fv_list)) {
7900 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7901 struct ice_sw_fv_list_entry *fvl;
7903 fvl = (struct ice_sw_fv_list_entry *)
7904 ice_malloc(hw, sizeof(*fvl));
7908 fvl->profile_id = j;
7909 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7913 /* get bitmap of all profiles the recipe will be associated with */
7914 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7915 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7917 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7918 ice_set_bit((u16)fvit->profile_id, profiles);
7921 /* Look for a recipe which matches our requested fv / mask list */
7922 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority);
7923 if (*rid < ICE_MAX_NUM_RECIPES)
7924 /* Success if found a recipe that match the existing criteria */
7927 rm->tun_type = rinfo->tun_type;
7928 /* Recipe we need does not exist, add a recipe */
7929 status = ice_add_sw_recipe(hw, rm, profiles);
7933 /* Associate all the recipes created with all the profiles in the
7934 * common field vector.
7936 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7938 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7941 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7942 (u8 *)r_bitmap, NULL);
7946 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7947 ICE_MAX_NUM_RECIPES);
7948 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7952 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7955 ice_release_change_lock(hw);
7960 /* Update profile to recipe bitmap array */
7961 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7962 ICE_MAX_NUM_RECIPES);
7964 /* Update recipe to profile bitmap array */
7965 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7966 ice_set_bit((u16)fvit->profile_id,
7967 recipe_to_profile[j]);
7970 *rid = rm->root_rid;
7971 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7972 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7974 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7975 ice_recp_grp_entry, l_entry) {
7976 LIST_DEL(&r_entry->l_entry);
7977 ice_free(hw, r_entry);
7980 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7982 LIST_DEL(&fvit->list_entry);
7987 ice_free(hw, rm->root_buf);
7992 ice_free(hw, lkup_exts);
7998 * ice_find_dummy_packet - find dummy packet by tunnel type
8000 * @lkups: lookup elements or match criteria for the advanced recipe, one
8001 * structure per protocol header
8002 * @lkups_cnt: number of protocols
8003 * @tun_type: tunnel type from the match criteria
8004 * @pkt: dummy packet to fill according to filter match criteria
8005 * @pkt_len: packet length of dummy packet
8006 * @offsets: pointer to receive the pointer to the offsets for the packet
8009 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8010 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
8012 const struct ice_dummy_pkt_offsets **offsets)
8014 bool tcp = false, udp = false, ipv6 = false, vlan = false;
8015 bool gre = false, mpls = false;
8018 for (i = 0; i < lkups_cnt; i++) {
8019 if (lkups[i].type == ICE_UDP_ILOS)
8021 else if (lkups[i].type == ICE_TCP_IL)
8023 else if (lkups[i].type == ICE_IPV6_OFOS)
8025 else if (lkups[i].type == ICE_VLAN_OFOS)
8027 else if (lkups[i].type == ICE_ETYPE_OL &&
8028 lkups[i].h_u.ethertype.ethtype_id ==
8029 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
8030 lkups[i].m_u.ethertype.ethtype_id ==
8031 CPU_TO_BE16(0xFFFF))
8033 else if (lkups[i].type == ICE_IPV4_OFOS &&
8034 lkups[i].h_u.ipv4_hdr.protocol ==
8035 ICE_IPV4_NVGRE_PROTO_ID &&
8036 lkups[i].m_u.ipv4_hdr.protocol ==
8039 else if (lkups[i].type == ICE_PPPOE &&
8040 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
8041 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
8042 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
8045 else if (lkups[i].type == ICE_IPV4_IL &&
8046 lkups[i].h_u.ipv4_hdr.protocol ==
8048 lkups[i].m_u.ipv4_hdr.protocol ==
8051 else if (lkups[i].type == ICE_ETYPE_OL &&
8052 lkups[i].h_u.ethertype.ethtype_id ==
8053 CPU_TO_BE16(ICE_MPLS_ETHER_ID) &&
8054 lkups[i].m_u.ethertype.ethtype_id == 0xFFFF)
8058 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
8059 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
8061 *pkt = dummy_qinq_ipv6_tcp_pkt;
8062 *pkt_len = sizeof(dummy_qinq_ipv6_tcp_pkt);
8063 *offsets = dummy_qinq_ipv6_tcp_packet_offsets;
8068 *pkt = dummy_qinq_ipv6_udp_pkt;
8069 *pkt_len = sizeof(dummy_qinq_ipv6_udp_pkt);
8070 *offsets = dummy_qinq_ipv6_udp_packet_offsets;
8074 *pkt = dummy_qinq_ipv6_pkt;
8075 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
8076 *offsets = dummy_qinq_ipv6_packet_offsets;
8078 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
8079 tun_type == ICE_NON_TUN_QINQ) {
8081 *pkt = dummy_qinq_ipv4_tcp_pkt;
8082 *pkt_len = sizeof(dummy_qinq_ipv4_tcp_pkt);
8083 *offsets = dummy_qinq_ipv4_tcp_packet_offsets;
8088 *pkt = dummy_qinq_ipv4_udp_pkt;
8089 *pkt_len = sizeof(dummy_qinq_ipv4_udp_pkt);
8090 *offsets = dummy_qinq_ipv4_udp_packet_offsets;
8094 *pkt = dummy_qinq_ipv4_pkt;
8095 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
8096 *offsets = dummy_qinq_ipv4_packet_offsets;
8100 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
8101 *pkt = dummy_qinq_pppoe_ipv6_packet;
8102 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
8103 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
8105 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
8106 *pkt = dummy_qinq_pppoe_ipv4_pkt;
8107 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
8108 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
8110 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
8111 *pkt = dummy_qinq_pppoe_ipv6_packet;
8112 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
8113 *offsets = dummy_qinq_pppoe_packet_offsets;
8115 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
8116 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
8117 *pkt = dummy_qinq_pppoe_ipv4_pkt;
8118 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
8119 *offsets = dummy_qinq_pppoe_packet_offsets;
8123 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
8124 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8125 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8126 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
8128 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
8129 *pkt = dummy_ipv6_gtp_packet;
8130 *pkt_len = sizeof(dummy_ipv6_gtp_packet);
8131 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
8135 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
8136 *pkt = dummy_ipv4_esp_pkt;
8137 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
8138 *offsets = dummy_ipv4_esp_packet_offsets;
8142 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
8143 *pkt = dummy_ipv6_esp_pkt;
8144 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
8145 *offsets = dummy_ipv6_esp_packet_offsets;
8149 if (tun_type == ICE_SW_TUN_IPV4_AH) {
8150 *pkt = dummy_ipv4_ah_pkt;
8151 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
8152 *offsets = dummy_ipv4_ah_packet_offsets;
8156 if (tun_type == ICE_SW_TUN_IPV6_AH) {
8157 *pkt = dummy_ipv6_ah_pkt;
8158 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
8159 *offsets = dummy_ipv6_ah_packet_offsets;
8163 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
8164 *pkt = dummy_ipv4_nat_pkt;
8165 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
8166 *offsets = dummy_ipv4_nat_packet_offsets;
8170 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
8171 *pkt = dummy_ipv6_nat_pkt;
8172 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
8173 *offsets = dummy_ipv6_nat_packet_offsets;
8177 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
8178 *pkt = dummy_ipv4_l2tpv3_pkt;
8179 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
8180 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
8184 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
8185 *pkt = dummy_ipv6_l2tpv3_pkt;
8186 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
8187 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
8191 if (tun_type == ICE_SW_TUN_GTP) {
8192 *pkt = dummy_udp_gtp_packet;
8193 *pkt_len = sizeof(dummy_udp_gtp_packet);
8194 *offsets = dummy_udp_gtp_packet_offsets;
8198 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8199 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4) {
8200 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8201 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8202 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8206 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_UDP ||
8207 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP) {
8208 *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
8209 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
8210 *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
8214 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_TCP ||
8215 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP) {
8216 *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
8217 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
8218 *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
8222 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8223 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6) {
8224 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8225 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8226 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8230 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_UDP ||
8231 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP) {
8232 *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
8233 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
8234 *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
8238 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_TCP ||
8239 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP) {
8240 *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
8241 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
8242 *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
8246 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4 ||
8247 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4) {
8248 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8249 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8250 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8254 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_UDP ||
8255 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP) {
8256 *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
8257 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
8258 *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
8262 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_TCP ||
8263 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP) {
8264 *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
8265 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
8266 *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
8270 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6 ||
8271 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6) {
8272 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8273 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8274 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8278 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_UDP ||
8279 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP) {
8280 *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
8281 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
8282 *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
8286 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_TCP ||
8287 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP) {
8288 *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
8289 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
8290 *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
8294 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
8295 *pkt = dummy_pppoe_ipv6_packet;
8296 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8297 *offsets = dummy_pppoe_packet_offsets;
8299 } else if (tun_type == ICE_SW_TUN_PPPOE ||
8300 tun_type == ICE_SW_TUN_PPPOE_PAY) {
8301 *pkt = dummy_pppoe_ipv4_packet;
8302 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8303 *offsets = dummy_pppoe_packet_offsets;
8307 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
8308 *pkt = dummy_pppoe_ipv4_packet;
8309 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8310 *offsets = dummy_pppoe_packet_ipv4_offsets;
8314 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
8315 *pkt = dummy_pppoe_ipv4_tcp_packet;
8316 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
8317 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
8321 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
8322 *pkt = dummy_pppoe_ipv4_udp_packet;
8323 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
8324 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
8328 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
8329 *pkt = dummy_pppoe_ipv6_packet;
8330 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8331 *offsets = dummy_pppoe_packet_ipv6_offsets;
8335 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
8336 *pkt = dummy_pppoe_ipv6_tcp_packet;
8337 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
8338 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
8342 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
8343 *pkt = dummy_pppoe_ipv6_udp_packet;
8344 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
8345 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
8349 if (tun_type == ICE_SW_IPV4_TCP) {
8350 *pkt = dummy_tcp_packet;
8351 *pkt_len = sizeof(dummy_tcp_packet);
8352 *offsets = dummy_tcp_packet_offsets;
8356 if (tun_type == ICE_SW_IPV4_UDP) {
8357 *pkt = dummy_udp_packet;
8358 *pkt_len = sizeof(dummy_udp_packet);
8359 *offsets = dummy_udp_packet_offsets;
8363 if (tun_type == ICE_SW_IPV6_TCP) {
8364 *pkt = dummy_tcp_ipv6_packet;
8365 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8366 *offsets = dummy_tcp_ipv6_packet_offsets;
8370 if (tun_type == ICE_SW_IPV6_UDP) {
8371 *pkt = dummy_udp_ipv6_packet;
8372 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8373 *offsets = dummy_udp_ipv6_packet_offsets;
8377 if (tun_type == ICE_ALL_TUNNELS) {
8378 *pkt = dummy_gre_udp_packet;
8379 *pkt_len = sizeof(dummy_gre_udp_packet);
8380 *offsets = dummy_gre_udp_packet_offsets;
8384 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8386 *pkt = dummy_gre_tcp_packet;
8387 *pkt_len = sizeof(dummy_gre_tcp_packet);
8388 *offsets = dummy_gre_tcp_packet_offsets;
8392 *pkt = dummy_gre_udp_packet;
8393 *pkt_len = sizeof(dummy_gre_udp_packet);
8394 *offsets = dummy_gre_udp_packet_offsets;
8398 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8399 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8400 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8401 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8403 *pkt = dummy_udp_tun_tcp_packet;
8404 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8405 *offsets = dummy_udp_tun_tcp_packet_offsets;
8409 *pkt = dummy_udp_tun_udp_packet;
8410 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8411 *offsets = dummy_udp_tun_udp_packet_offsets;
8417 *pkt = dummy_vlan_udp_packet;
8418 *pkt_len = sizeof(dummy_vlan_udp_packet);
8419 *offsets = dummy_vlan_udp_packet_offsets;
8422 *pkt = dummy_udp_packet;
8423 *pkt_len = sizeof(dummy_udp_packet);
8424 *offsets = dummy_udp_packet_offsets;
8426 } else if (udp && ipv6) {
8428 *pkt = dummy_vlan_udp_ipv6_packet;
8429 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8430 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8433 *pkt = dummy_udp_ipv6_packet;
8434 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8435 *offsets = dummy_udp_ipv6_packet_offsets;
8437 } else if ((tcp && ipv6) || ipv6) {
8439 *pkt = dummy_vlan_tcp_ipv6_packet;
8440 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8441 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8444 *pkt = dummy_tcp_ipv6_packet;
8445 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8446 *offsets = dummy_tcp_ipv6_packet_offsets;
8451 *pkt = dummy_vlan_tcp_packet;
8452 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8453 *offsets = dummy_vlan_tcp_packet_offsets;
8455 *pkt = dummy_mpls_packet;
8456 *pkt_len = sizeof(dummy_mpls_packet);
8457 *offsets = dummy_mpls_packet_offsets;
8459 *pkt = dummy_tcp_packet;
8460 *pkt_len = sizeof(dummy_tcp_packet);
8461 *offsets = dummy_tcp_packet_offsets;
8466 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8468 * @lkups: lookup elements or match criteria for the advanced recipe, one
8469 * structure per protocol header
8470 * @lkups_cnt: number of protocols
8471 * @s_rule: stores rule information from the match criteria
8472 * @dummy_pkt: dummy packet to fill according to filter match criteria
8473 * @pkt_len: packet length of dummy packet
8474 * @offsets: offset info for the dummy packet
8476 static enum ice_status
8477 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8478 struct ice_aqc_sw_rules_elem *s_rule,
8479 const u8 *dummy_pkt, u16 pkt_len,
8480 const struct ice_dummy_pkt_offsets *offsets)
8485 /* Start with a packet with a pre-defined/dummy content. Then, fill
8486 * in the header values to be looked up or matched.
8488 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8490 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8492 for (i = 0; i < lkups_cnt; i++) {
8493 enum ice_protocol_type type;
8494 u16 offset = 0, len = 0, j;
8497 /* find the start of this layer; it should be found since this
8498 * was already checked when search for the dummy packet
8500 type = lkups[i].type;
8501 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8502 if (type == offsets[j].type) {
8503 offset = offsets[j].offset;
8508 /* this should never happen in a correct calling sequence */
8510 return ICE_ERR_PARAM;
8512 switch (lkups[i].type) {
8515 len = sizeof(struct ice_ether_hdr);
8518 len = sizeof(struct ice_ethtype_hdr);
8523 len = sizeof(struct ice_vlan_hdr);
8527 len = sizeof(struct ice_ipv4_hdr);
8531 len = sizeof(struct ice_ipv6_hdr);
8536 len = sizeof(struct ice_l4_hdr);
8539 len = sizeof(struct ice_sctp_hdr);
8542 len = sizeof(struct ice_nvgre);
8547 len = sizeof(struct ice_udp_tnl_hdr);
8551 case ICE_GTP_NO_PAY:
8552 len = sizeof(struct ice_udp_gtp_hdr);
8555 len = sizeof(struct ice_pppoe_hdr);
8558 len = sizeof(struct ice_esp_hdr);
8561 len = sizeof(struct ice_nat_t_hdr);
8564 len = sizeof(struct ice_ah_hdr);
8567 len = sizeof(struct ice_l2tpv3_sess_hdr);
8570 return ICE_ERR_PARAM;
8573 /* the length should be a word multiple */
8574 if (len % ICE_BYTES_PER_WORD)
8577 /* We have the offset to the header start, the length, the
8578 * caller's header values and mask. Use this information to
8579 * copy the data into the dummy packet appropriately based on
8580 * the mask. Note that we need to only write the bits as
8581 * indicated by the mask to make sure we don't improperly write
8582 * over any significant packet data.
8584 for (j = 0; j < len / sizeof(u16); j++)
8585 if (((u16 *)&lkups[i].m_u)[j])
8586 ((u16 *)(pkt + offset))[j] =
8587 (((u16 *)(pkt + offset))[j] &
8588 ~((u16 *)&lkups[i].m_u)[j]) |
8589 (((u16 *)&lkups[i].h_u)[j] &
8590 ((u16 *)&lkups[i].m_u)[j]);
8593 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8599 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8600 * @hw: pointer to the hardware structure
8601 * @tun_type: tunnel type
8602 * @pkt: dummy packet to fill in
8603 * @offsets: offset info for the dummy packet
8605 static enum ice_status
8606 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8607 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8612 case ICE_SW_TUN_AND_NON_TUN:
8613 case ICE_SW_TUN_VXLAN_GPE:
8614 case ICE_SW_TUN_VXLAN:
8615 case ICE_SW_TUN_VXLAN_VLAN:
8616 case ICE_SW_TUN_UDP:
8617 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8621 case ICE_SW_TUN_GENEVE:
8622 case ICE_SW_TUN_GENEVE_VLAN:
8623 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8628 /* Nothing needs to be done for this tunnel type */
8632 /* Find the outer UDP protocol header and insert the port number */
8633 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8634 if (offsets[i].type == ICE_UDP_OF) {
8635 struct ice_l4_hdr *hdr;
8638 offset = offsets[i].offset;
8639 hdr = (struct ice_l4_hdr *)&pkt[offset];
8640 hdr->dst_port = CPU_TO_BE16(open_port);
8650 * ice_find_adv_rule_entry - Search a rule entry
8651 * @hw: pointer to the hardware structure
8652 * @lkups: lookup elements or match criteria for the advanced recipe, one
8653 * structure per protocol header
8654 * @lkups_cnt: number of protocols
8655 * @recp_id: recipe ID for which we are finding the rule
8656 * @rinfo: other information regarding the rule e.g. priority and action info
8658 * Helper function to search for a given advance rule entry
8659 * Returns pointer to entry storing the rule if found
8661 static struct ice_adv_fltr_mgmt_list_entry *
8662 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8663 u16 lkups_cnt, u16 recp_id,
8664 struct ice_adv_rule_info *rinfo)
8666 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8667 struct ice_switch_info *sw = hw->switch_info;
8670 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8671 ice_adv_fltr_mgmt_list_entry, list_entry) {
8672 bool lkups_matched = true;
8674 if (lkups_cnt != list_itr->lkups_cnt)
8676 for (i = 0; i < list_itr->lkups_cnt; i++)
8677 if (memcmp(&list_itr->lkups[i], &lkups[i],
8679 lkups_matched = false;
8682 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8683 rinfo->tun_type == list_itr->rule_info.tun_type &&
8691 * ice_adv_add_update_vsi_list
8692 * @hw: pointer to the hardware structure
8693 * @m_entry: pointer to current adv filter management list entry
8694 * @cur_fltr: filter information from the book keeping entry
8695 * @new_fltr: filter information with the new VSI to be added
8697 * Call AQ command to add or update previously created VSI list with new VSI.
8699 * Helper function to do book keeping associated with adding filter information
8700 * The algorithm to do the booking keeping is described below :
8701 * When a VSI needs to subscribe to a given advanced filter
8702 * if only one VSI has been added till now
8703 * Allocate a new VSI list and add two VSIs
8704 * to this list using switch rule command
8705 * Update the previously created switch rule with the
8706 * newly created VSI list ID
8707 * if a VSI list was previously created
8708 * Add the new VSI to the previously created VSI list set
8709 * using the update switch rule command
8711 static enum ice_status
8712 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8713 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8714 struct ice_adv_rule_info *cur_fltr,
8715 struct ice_adv_rule_info *new_fltr)
8717 enum ice_status status;
8718 u16 vsi_list_id = 0;
8720 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8721 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8722 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8723 return ICE_ERR_NOT_IMPL;
8725 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8726 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8727 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8728 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8729 return ICE_ERR_NOT_IMPL;
8731 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8732 /* Only one entry existed in the mapping and it was not already
8733 * a part of a VSI list. So, create a VSI list with the old and
8736 struct ice_fltr_info tmp_fltr;
8737 u16 vsi_handle_arr[2];
8739 /* A rule already exists with the new VSI being added */
8740 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8741 new_fltr->sw_act.fwd_id.hw_vsi_id)
8742 return ICE_ERR_ALREADY_EXISTS;
8744 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8745 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8746 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8752 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8753 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8754 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8755 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8756 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8757 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8759 /* Update the previous switch rule of "forward to VSI" to
8762 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8766 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8767 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8768 m_entry->vsi_list_info =
8769 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8772 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8774 if (!m_entry->vsi_list_info)
8777 /* A rule already exists with the new VSI being added */
8778 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8781 /* Update the previously created VSI list set with
8782 * the new VSI ID passed in
8784 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8786 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8788 ice_aqc_opc_update_sw_rules,
8790 /* update VSI list mapping info with new VSI ID */
8792 ice_set_bit(vsi_handle,
8793 m_entry->vsi_list_info->vsi_map);
8796 m_entry->vsi_count++;
8801 * ice_add_adv_rule - helper function to create an advanced switch rule
8802 * @hw: pointer to the hardware structure
8803 * @lkups: information on the words that needs to be looked up. All words
8804 * together makes one recipe
8805 * @lkups_cnt: num of entries in the lkups array
8806 * @rinfo: other information related to the rule that needs to be programmed
8807 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8808 * ignored is case of error.
8810 * This function can program only 1 rule at a time. The lkups is used to
8811 * describe the all the words that forms the "lookup" portion of the recipe.
8812 * These words can span multiple protocols. Callers to this function need to
8813 * pass in a list of protocol headers with lookup information along and mask
8814 * that determines which words are valid from the given protocol header.
8815 * rinfo describes other information related to this rule such as forwarding
8816 * IDs, priority of this rule, etc.
8819 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8820 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8821 struct ice_rule_query_data *added_entry)
8823 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8824 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8825 const struct ice_dummy_pkt_offsets *pkt_offsets;
8826 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8827 struct LIST_HEAD_TYPE *rule_head;
8828 struct ice_switch_info *sw;
8829 enum ice_status status;
8830 const u8 *pkt = NULL;
8836 /* Initialize profile to result index bitmap */
8837 if (!hw->switch_info->prof_res_bm_init) {
8838 hw->switch_info->prof_res_bm_init = 1;
8839 ice_init_prof_result_bm(hw);
8842 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8843 if (!prof_rule && !lkups_cnt)
8844 return ICE_ERR_PARAM;
8846 /* get # of words we need to match */
8848 for (i = 0; i < lkups_cnt; i++) {
8851 ptr = (u16 *)&lkups[i].m_u;
8852 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8858 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8859 return ICE_ERR_PARAM;
8861 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8862 return ICE_ERR_PARAM;
8865 /* make sure that we can locate a dummy packet */
8866 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8869 status = ICE_ERR_PARAM;
8870 goto err_ice_add_adv_rule;
8873 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8874 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8875 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8876 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8879 vsi_handle = rinfo->sw_act.vsi_handle;
8880 if (!ice_is_vsi_valid(hw, vsi_handle))
8881 return ICE_ERR_PARAM;
8883 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8884 rinfo->sw_act.fwd_id.hw_vsi_id =
8885 ice_get_hw_vsi_num(hw, vsi_handle);
8886 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8887 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8889 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8892 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8894 /* we have to add VSI to VSI_LIST and increment vsi_count.
8895 * Also Update VSI list so that we can change forwarding rule
8896 * if the rule already exists, we will check if it exists with
8897 * same vsi_id, if not then add it to the VSI list if it already
8898 * exists if not then create a VSI list and add the existing VSI
8899 * ID and the new VSI ID to the list
8900 * We will add that VSI to the list
8902 status = ice_adv_add_update_vsi_list(hw, m_entry,
8903 &m_entry->rule_info,
8906 added_entry->rid = rid;
8907 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8908 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8912 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8913 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8915 return ICE_ERR_NO_MEMORY;
8916 if (!rinfo->flags_info.act_valid)
8917 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8919 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
8920 ICE_SINGLE_ACT_LB_ENABLE);
8922 switch (rinfo->sw_act.fltr_act) {
8923 case ICE_FWD_TO_VSI:
8924 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8925 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8926 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8929 act |= ICE_SINGLE_ACT_TO_Q;
8930 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8931 ICE_SINGLE_ACT_Q_INDEX_M;
8933 case ICE_FWD_TO_QGRP:
8934 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8935 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8936 act |= ICE_SINGLE_ACT_TO_Q;
8937 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8938 ICE_SINGLE_ACT_Q_INDEX_M;
8939 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8940 ICE_SINGLE_ACT_Q_REGION_M;
8942 case ICE_DROP_PACKET:
8943 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8944 ICE_SINGLE_ACT_VALID_BIT;
8947 status = ICE_ERR_CFG;
8948 goto err_ice_add_adv_rule;
8951 /* set the rule LOOKUP type based on caller specified 'RX'
8952 * instead of hardcoding it to be either LOOKUP_TX/RX
8954 * for 'RX' set the source to be the port number
8955 * for 'TX' set the source to be the source HW VSI number (determined
8959 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8960 s_rule->pdata.lkup_tx_rx.src =
8961 CPU_TO_LE16(hw->port_info->lport);
8963 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8964 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8967 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8968 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8970 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8971 pkt_len, pkt_offsets);
8973 goto err_ice_add_adv_rule;
8975 if (rinfo->tun_type != ICE_NON_TUN &&
8976 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8977 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8978 s_rule->pdata.lkup_tx_rx.hdr,
8981 goto err_ice_add_adv_rule;
8984 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8985 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8988 goto err_ice_add_adv_rule;
8989 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8990 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8992 status = ICE_ERR_NO_MEMORY;
8993 goto err_ice_add_adv_rule;
8996 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8997 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8998 ICE_NONDMA_TO_NONDMA);
8999 if (!adv_fltr->lkups && !prof_rule) {
9000 status = ICE_ERR_NO_MEMORY;
9001 goto err_ice_add_adv_rule;
9004 adv_fltr->lkups_cnt = lkups_cnt;
9005 adv_fltr->rule_info = *rinfo;
9006 adv_fltr->rule_info.fltr_rule_id =
9007 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
9008 sw = hw->switch_info;
9009 sw->recp_list[rid].adv_rule = true;
9010 rule_head = &sw->recp_list[rid].filt_rules;
9012 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
9013 adv_fltr->vsi_count = 1;
9015 /* Add rule entry to book keeping list */
9016 LIST_ADD(&adv_fltr->list_entry, rule_head);
9018 added_entry->rid = rid;
9019 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
9020 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
9022 err_ice_add_adv_rule:
9023 if (status && adv_fltr) {
9024 ice_free(hw, adv_fltr->lkups);
9025 ice_free(hw, adv_fltr);
9028 ice_free(hw, s_rule);
9034 * ice_adv_rem_update_vsi_list
9035 * @hw: pointer to the hardware structure
9036 * @vsi_handle: VSI handle of the VSI to remove
9037 * @fm_list: filter management entry for which the VSI list management needs to
9040 static enum ice_status
9041 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
9042 struct ice_adv_fltr_mgmt_list_entry *fm_list)
9044 struct ice_vsi_list_map_info *vsi_list_info;
9045 enum ice_sw_lkup_type lkup_type;
9046 enum ice_status status;
9049 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
9050 fm_list->vsi_count == 0)
9051 return ICE_ERR_PARAM;
9053 /* A rule with the VSI being removed does not exist */
9054 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
9055 return ICE_ERR_DOES_NOT_EXIST;
9057 lkup_type = ICE_SW_LKUP_LAST;
9058 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
9059 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
9060 ice_aqc_opc_update_sw_rules,
9065 fm_list->vsi_count--;
9066 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
9067 vsi_list_info = fm_list->vsi_list_info;
9068 if (fm_list->vsi_count == 1) {
9069 struct ice_fltr_info tmp_fltr;
9072 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
9074 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
9075 return ICE_ERR_OUT_OF_RANGE;
9077 /* Make sure VSI list is empty before removing it below */
9078 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
9080 ice_aqc_opc_update_sw_rules,
9085 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
9086 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
9087 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
9088 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
9089 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
9090 tmp_fltr.fwd_id.hw_vsi_id =
9091 ice_get_hw_vsi_num(hw, rem_vsi_handle);
9092 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
9093 ice_get_hw_vsi_num(hw, rem_vsi_handle);
9094 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
9096 /* Update the previous switch rule of "MAC forward to VSI" to
9097 * "MAC fwd to VSI list"
9099 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
9101 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
9102 tmp_fltr.fwd_id.hw_vsi_id, status);
9105 fm_list->vsi_list_info->ref_cnt--;
9107 /* Remove the VSI list since it is no longer used */
9108 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
9110 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
9111 vsi_list_id, status);
9115 LIST_DEL(&vsi_list_info->list_entry);
9116 ice_free(hw, vsi_list_info);
9117 fm_list->vsi_list_info = NULL;
9124 * ice_rem_adv_rule - removes existing advanced switch rule
9125 * @hw: pointer to the hardware structure
9126 * @lkups: information on the words that needs to be looked up. All words
9127 * together makes one recipe
9128 * @lkups_cnt: num of entries in the lkups array
9129 * @rinfo: Its the pointer to the rule information for the rule
9131 * This function can be used to remove 1 rule at a time. The lkups is
9132 * used to describe all the words that forms the "lookup" portion of the
9133 * rule. These words can span multiple protocols. Callers to this function
9134 * need to pass in a list of protocol headers with lookup information along
9135 * and mask that determines which words are valid from the given protocol
9136 * header. rinfo describes other information related to this rule such as
9137 * forwarding IDs, priority of this rule, etc.
9140 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
9141 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
9143 struct ice_adv_fltr_mgmt_list_entry *list_elem;
9144 struct ice_prot_lkup_ext lkup_exts;
9145 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
9146 enum ice_status status = ICE_SUCCESS;
9147 bool remove_rule = false;
9148 u16 i, rid, vsi_handle;
9150 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
9151 for (i = 0; i < lkups_cnt; i++) {
9154 if (lkups[i].type >= ICE_PROTOCOL_LAST)
9157 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
9162 /* Create any special protocol/offset pairs, such as looking at tunnel
9163 * bits by extracting metadata
9165 status = ice_add_special_words(rinfo, &lkup_exts);
9169 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority);
9170 /* If did not find a recipe that match the existing criteria */
9171 if (rid == ICE_MAX_NUM_RECIPES)
9172 return ICE_ERR_PARAM;
9174 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
9175 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
9176 /* the rule is already removed */
9179 ice_acquire_lock(rule_lock);
9180 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
9182 } else if (list_elem->vsi_count > 1) {
9183 remove_rule = false;
9184 vsi_handle = rinfo->sw_act.vsi_handle;
9185 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9187 vsi_handle = rinfo->sw_act.vsi_handle;
9188 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9190 ice_release_lock(rule_lock);
9193 if (list_elem->vsi_count == 0)
9196 ice_release_lock(rule_lock);
9198 struct ice_aqc_sw_rules_elem *s_rule;
9201 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
9202 s_rule = (struct ice_aqc_sw_rules_elem *)
9203 ice_malloc(hw, rule_buf_sz);
9205 return ICE_ERR_NO_MEMORY;
9206 s_rule->pdata.lkup_tx_rx.act = 0;
9207 s_rule->pdata.lkup_tx_rx.index =
9208 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
9209 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
9210 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9212 ice_aqc_opc_remove_sw_rules, NULL);
9213 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
9214 struct ice_switch_info *sw = hw->switch_info;
9216 ice_acquire_lock(rule_lock);
9217 LIST_DEL(&list_elem->list_entry);
9218 ice_free(hw, list_elem->lkups);
9219 ice_free(hw, list_elem);
9220 ice_release_lock(rule_lock);
9221 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
9222 sw->recp_list[rid].adv_rule = false;
9224 ice_free(hw, s_rule);
9230 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
9231 * @hw: pointer to the hardware structure
9232 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
9234 * This function is used to remove 1 rule at a time. The removal is based on
9235 * the remove_entry parameter. This function will remove rule for a given
9236 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
9239 ice_rem_adv_rule_by_id(struct ice_hw *hw,
9240 struct ice_rule_query_data *remove_entry)
9242 struct ice_adv_fltr_mgmt_list_entry *list_itr;
9243 struct LIST_HEAD_TYPE *list_head;
9244 struct ice_adv_rule_info rinfo;
9245 struct ice_switch_info *sw;
9247 sw = hw->switch_info;
9248 if (!sw->recp_list[remove_entry->rid].recp_created)
9249 return ICE_ERR_PARAM;
9250 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
9251 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
9253 if (list_itr->rule_info.fltr_rule_id ==
9254 remove_entry->rule_id) {
9255 rinfo = list_itr->rule_info;
9256 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
9257 return ice_rem_adv_rule(hw, list_itr->lkups,
9258 list_itr->lkups_cnt, &rinfo);
9261 /* either list is empty or unable to find rule */
9262 return ICE_ERR_DOES_NOT_EXIST;
9266 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
9268 * @hw: pointer to the hardware structure
9269 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
9271 * This function is used to remove all the rules for a given VSI and as soon
9272 * as removing a rule fails, it will return immediately with the error code,
9273 * else it will return ICE_SUCCESS
9275 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
9277 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
9278 struct ice_vsi_list_map_info *map_info;
9279 struct LIST_HEAD_TYPE *list_head;
9280 struct ice_adv_rule_info rinfo;
9281 struct ice_switch_info *sw;
9282 enum ice_status status;
9285 sw = hw->switch_info;
9286 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
9287 if (!sw->recp_list[rid].recp_created)
9289 if (!sw->recp_list[rid].adv_rule)
9292 list_head = &sw->recp_list[rid].filt_rules;
9293 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
9294 ice_adv_fltr_mgmt_list_entry,
9296 rinfo = list_itr->rule_info;
9298 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
9299 map_info = list_itr->vsi_list_info;
9303 if (!ice_is_bit_set(map_info->vsi_map,
9306 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
9310 rinfo.sw_act.vsi_handle = vsi_handle;
9311 status = ice_rem_adv_rule(hw, list_itr->lkups,
9312 list_itr->lkups_cnt, &rinfo);
9322 * ice_replay_fltr - Replay all the filters stored by a specific list head
9323 * @hw: pointer to the hardware structure
9324 * @list_head: list for which filters needs to be replayed
9325 * @recp_id: Recipe ID for which rules need to be replayed
9327 static enum ice_status
9328 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
9330 struct ice_fltr_mgmt_list_entry *itr;
9331 enum ice_status status = ICE_SUCCESS;
9332 struct ice_sw_recipe *recp_list;
9333 u8 lport = hw->port_info->lport;
9334 struct LIST_HEAD_TYPE l_head;
9336 if (LIST_EMPTY(list_head))
9339 recp_list = &hw->switch_info->recp_list[recp_id];
9340 /* Move entries from the given list_head to a temporary l_head so that
9341 * they can be replayed. Otherwise when trying to re-add the same
9342 * filter, the function will return already exists
9344 LIST_REPLACE_INIT(list_head, &l_head);
9346 /* Mark the given list_head empty by reinitializing it so filters
9347 * could be added again by *handler
9349 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
9351 struct ice_fltr_list_entry f_entry;
9354 f_entry.fltr_info = itr->fltr_info;
9355 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
9356 status = ice_add_rule_internal(hw, recp_list, lport,
9358 if (status != ICE_SUCCESS)
9363 /* Add a filter per VSI separately */
9364 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
9366 if (!ice_is_vsi_valid(hw, vsi_handle))
9369 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9370 f_entry.fltr_info.vsi_handle = vsi_handle;
9371 f_entry.fltr_info.fwd_id.hw_vsi_id =
9372 ice_get_hw_vsi_num(hw, vsi_handle);
9373 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9374 if (recp_id == ICE_SW_LKUP_VLAN)
9375 status = ice_add_vlan_internal(hw, recp_list,
9378 status = ice_add_rule_internal(hw, recp_list,
9381 if (status != ICE_SUCCESS)
9386 /* Clear the filter management list */
9387 ice_rem_sw_rule_info(hw, &l_head);
9392 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9393 * @hw: pointer to the hardware structure
9395 * NOTE: This function does not clean up partially added filters on error.
9396 * It is up to caller of the function to issue a reset or fail early.
9398 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9400 struct ice_switch_info *sw = hw->switch_info;
9401 enum ice_status status = ICE_SUCCESS;
9404 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9405 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9407 status = ice_replay_fltr(hw, i, head);
9408 if (status != ICE_SUCCESS)
9415 * ice_replay_vsi_fltr - Replay filters for requested VSI
9416 * @hw: pointer to the hardware structure
9417 * @pi: pointer to port information structure
9418 * @sw: pointer to switch info struct for which function replays filters
9419 * @vsi_handle: driver VSI handle
9420 * @recp_id: Recipe ID for which rules need to be replayed
9421 * @list_head: list for which filters need to be replayed
9423 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9424 * It is required to pass valid VSI handle.
9426 static enum ice_status
9427 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9428 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9429 struct LIST_HEAD_TYPE *list_head)
9431 struct ice_fltr_mgmt_list_entry *itr;
9432 enum ice_status status = ICE_SUCCESS;
9433 struct ice_sw_recipe *recp_list;
9436 if (LIST_EMPTY(list_head))
9438 recp_list = &sw->recp_list[recp_id];
9439 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9441 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9443 struct ice_fltr_list_entry f_entry;
9445 f_entry.fltr_info = itr->fltr_info;
9446 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9447 itr->fltr_info.vsi_handle == vsi_handle) {
9448 /* update the src in case it is VSI num */
9449 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9450 f_entry.fltr_info.src = hw_vsi_id;
9451 status = ice_add_rule_internal(hw, recp_list,
9454 if (status != ICE_SUCCESS)
9458 if (!itr->vsi_list_info ||
9459 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9461 /* Clearing it so that the logic can add it back */
9462 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9463 f_entry.fltr_info.vsi_handle = vsi_handle;
9464 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9465 /* update the src in case it is VSI num */
9466 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9467 f_entry.fltr_info.src = hw_vsi_id;
9468 if (recp_id == ICE_SW_LKUP_VLAN)
9469 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9471 status = ice_add_rule_internal(hw, recp_list,
9474 if (status != ICE_SUCCESS)
9482 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9483 * @hw: pointer to the hardware structure
9484 * @vsi_handle: driver VSI handle
9485 * @list_head: list for which filters need to be replayed
9487 * Replay the advanced rule for the given VSI.
9489 static enum ice_status
9490 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9491 struct LIST_HEAD_TYPE *list_head)
9493 struct ice_rule_query_data added_entry = { 0 };
9494 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9495 enum ice_status status = ICE_SUCCESS;
9497 if (LIST_EMPTY(list_head))
9499 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9501 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9502 u16 lk_cnt = adv_fltr->lkups_cnt;
9504 if (vsi_handle != rinfo->sw_act.vsi_handle)
9506 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9515 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9516 * @hw: pointer to the hardware structure
9517 * @pi: pointer to port information structure
9518 * @vsi_handle: driver VSI handle
9520 * Replays filters for requested VSI via vsi_handle.
9523 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9526 struct ice_switch_info *sw = hw->switch_info;
9527 enum ice_status status;
9530 /* Update the recipes that were created */
9531 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9532 struct LIST_HEAD_TYPE *head;
9534 head = &sw->recp_list[i].filt_replay_rules;
9535 if (!sw->recp_list[i].adv_rule)
9536 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9539 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9540 if (status != ICE_SUCCESS)
9548 * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9549 * @hw: pointer to the HW struct
9550 * @sw: pointer to switch info struct for which function removes filters
9552 * Deletes the filter replay rules for given switch
9554 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9561 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9562 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9563 struct LIST_HEAD_TYPE *l_head;
9565 l_head = &sw->recp_list[i].filt_replay_rules;
9566 if (!sw->recp_list[i].adv_rule)
9567 ice_rem_sw_rule_info(hw, l_head);
9569 ice_rem_adv_rule_info(hw, l_head);
9575 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9576 * @hw: pointer to the HW struct
9578 * Deletes the filter replay rules.
9580 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9582 ice_rm_sw_replay_rule_info(hw, hw->switch_info);