1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_ETH_P_8021Q 0x8100
19 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
20 * struct to configure any switch filter rules.
21 * {DA (6 bytes), SA(6 bytes),
22 * Ether type (2 bytes for header without VLAN tag) OR
23 * VLAN tag (4 bytes for header with VLAN tag) }
25 * Word on Hardcoded values
26 * byte 0 = 0x2: to identify it as locally administered DA MAC
27 * byte 6 = 0x2: to identify it as locally administered SA MAC
28 * byte 12 = 0x81 & byte 13 = 0x00:
29 * In case of VLAN filter first two bytes defines ether type (0x8100)
30 * and remaining two bytes are placeholder for programming a given VLAN ID
31 * In case of Ether type filter it is treated as header without VLAN tag
32 * and byte 12 and 13 is used to program a given Ether type instead
34 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
38 struct ice_dummy_pkt_offsets {
39 enum ice_protocol_type type;
40 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46 { ICE_IPV4_OFOS, 14 },
51 { ICE_PROTOCOL_LAST, 0 },
54 static const u8 dummy_gre_tcp_packet[] = {
55 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
56 0x00, 0x00, 0x00, 0x00,
57 0x00, 0x00, 0x00, 0x00,
59 0x08, 0x00, /* ICE_ETYPE_OL 12 */
61 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x2F, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x00, 0x00, 0x00,
67 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
68 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
71 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00,
75 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x06, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x50, 0x02, 0x20, 0x00,
85 0x00, 0x00, 0x00, 0x00
88 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91 { ICE_IPV4_OFOS, 14 },
96 { ICE_PROTOCOL_LAST, 0 },
99 static const u8 dummy_gre_udp_packet[] = {
100 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
104 0x08, 0x00, /* ICE_ETYPE_OL 12 */
106 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x2F, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
112 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
113 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
120 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x11, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
127 0x00, 0x08, 0x00, 0x00,
130 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
132 { ICE_ETYPE_OL, 12 },
133 { ICE_IPV4_OFOS, 14 },
137 { ICE_VXLAN_GPE, 42 },
141 { ICE_PROTOCOL_LAST, 0 },
144 static const u8 dummy_udp_tun_tcp_packet[] = {
145 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
146 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00,
149 0x08, 0x00, /* ICE_ETYPE_OL 12 */
151 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
152 0x00, 0x01, 0x00, 0x00,
153 0x40, 0x11, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
158 0x00, 0x46, 0x00, 0x00,
160 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
161 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
164 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
168 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x06, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x50, 0x02, 0x20, 0x00,
178 0x00, 0x00, 0x00, 0x00
181 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
183 { ICE_ETYPE_OL, 12 },
184 { ICE_IPV4_OFOS, 14 },
188 { ICE_VXLAN_GPE, 42 },
191 { ICE_UDP_ILOS, 84 },
192 { ICE_PROTOCOL_LAST, 0 },
195 static const u8 dummy_udp_tun_udp_packet[] = {
196 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
200 0x08, 0x00, /* ICE_ETYPE_OL 12 */
202 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
203 0x00, 0x01, 0x00, 0x00,
204 0x00, 0x11, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
209 0x00, 0x3a, 0x00, 0x00,
211 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
212 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
215 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00,
219 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
226 0x00, 0x08, 0x00, 0x00,
229 /* offset info for MAC + IPv4 + UDP dummy packet */
230 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
232 { ICE_ETYPE_OL, 12 },
233 { ICE_IPV4_OFOS, 14 },
234 { ICE_UDP_ILOS, 34 },
235 { ICE_PROTOCOL_LAST, 0 },
238 /* Dummy packet for MAC + IPv4 + UDP */
239 static const u8 dummy_udp_packet[] = {
240 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
241 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00,
244 0x08, 0x00, /* ICE_ETYPE_OL 12 */
246 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
247 0x00, 0x01, 0x00, 0x00,
248 0x00, 0x11, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
253 0x00, 0x08, 0x00, 0x00,
255 0x00, 0x00, /* 2 bytes for 4 byte alignment */
258 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
259 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
261 { ICE_ETYPE_OL, 12 },
262 { ICE_VLAN_OFOS, 14 },
263 { ICE_IPV4_OFOS, 18 },
264 { ICE_UDP_ILOS, 38 },
265 { ICE_PROTOCOL_LAST, 0 },
268 /* C-tag (801.1Q), IPv4:UDP dummy packet */
269 static const u8 dummy_vlan_udp_packet[] = {
270 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x81, 0x00, /* ICE_ETYPE_OL 12 */
276 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
278 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
279 0x00, 0x01, 0x00, 0x00,
280 0x00, 0x11, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
285 0x00, 0x08, 0x00, 0x00,
287 0x00, 0x00, /* 2 bytes for 4 byte alignment */
290 /* offset info for MAC + IPv4 + TCP dummy packet */
291 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
293 { ICE_ETYPE_OL, 12 },
294 { ICE_IPV4_OFOS, 14 },
296 { ICE_PROTOCOL_LAST, 0 },
299 /* Dummy packet for MAC + IPv4 + TCP */
300 static const u8 dummy_tcp_packet[] = {
301 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x08, 0x00, /* ICE_ETYPE_OL 12 */
307 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
308 0x00, 0x01, 0x00, 0x00,
309 0x00, 0x06, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
314 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
316 0x50, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
319 0x00, 0x00, /* 2 bytes for 4 byte alignment */
322 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
323 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
325 { ICE_ETYPE_OL, 12 },
326 { ICE_VLAN_OFOS, 14 },
327 { ICE_IPV4_OFOS, 18 },
329 { ICE_PROTOCOL_LAST, 0 },
332 /* C-tag (801.1Q), IPv4:TCP dummy packet */
333 static const u8 dummy_vlan_tcp_packet[] = {
334 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
338 0x81, 0x00, /* ICE_ETYPE_OL 12 */
340 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
342 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
343 0x00, 0x01, 0x00, 0x00,
344 0x00, 0x06, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
349 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
351 0x50, 0x00, 0x00, 0x00,
352 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, /* 2 bytes for 4 byte alignment */
357 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
359 { ICE_ETYPE_OL, 12 },
360 { ICE_IPV6_OFOS, 14 },
362 { ICE_PROTOCOL_LAST, 0 },
365 static const u8 dummy_tcp_ipv6_packet[] = {
366 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
367 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00,
370 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
372 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
373 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
386 0x50, 0x00, 0x00, 0x00,
387 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, /* 2 bytes for 4 byte alignment */
392 /* C-tag (802.1Q): IPv6 + TCP */
393 static const struct ice_dummy_pkt_offsets
394 dummy_vlan_tcp_ipv6_packet_offsets[] = {
396 { ICE_ETYPE_OL, 12 },
397 { ICE_VLAN_OFOS, 14 },
398 { ICE_IPV6_OFOS, 18 },
400 { ICE_PROTOCOL_LAST, 0 },
403 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
404 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
405 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x81, 0x00, /* ICE_ETYPE_OL 12 */
411 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
413 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
414 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
425 0x00, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
427 0x50, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
430 0x00, 0x00, /* 2 bytes for 4 byte alignment */
434 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
436 { ICE_ETYPE_OL, 12 },
437 { ICE_IPV6_OFOS, 14 },
438 { ICE_UDP_ILOS, 54 },
439 { ICE_PROTOCOL_LAST, 0 },
442 /* IPv6 + UDP dummy packet */
443 static const u8 dummy_udp_ipv6_packet[] = {
444 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
448 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
450 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
451 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00,
461 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
462 0x00, 0x10, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
465 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, /* 2 bytes for 4 byte alignment */
470 /* C-tag (802.1Q): IPv6 + UDP */
471 static const struct ice_dummy_pkt_offsets
472 dummy_vlan_udp_ipv6_packet_offsets[] = {
474 { ICE_ETYPE_OL, 12 },
475 { ICE_VLAN_OFOS, 14 },
476 { ICE_IPV6_OFOS, 18 },
477 { ICE_UDP_ILOS, 58 },
478 { ICE_PROTOCOL_LAST, 0 },
481 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
482 static const u8 dummy_vlan_udp_ipv6_packet[] = {
483 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
484 0x00, 0x00, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00,
487 0x81, 0x00, /* ICE_ETYPE_OL 12 */
489 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
491 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
492 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00,
502 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
503 0x00, 0x08, 0x00, 0x00,
505 0x00, 0x00, /* 2 bytes for 4 byte alignment */
508 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
509 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
511 { ICE_IPV4_OFOS, 14 },
516 { ICE_PROTOCOL_LAST, 0 },
519 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
520 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
525 0x45, 0x00, 0x00, 0x58, /* IP 14 */
526 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x11, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x00,
529 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
532 0x00, 0x44, 0x00, 0x00,
534 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
535 0x00, 0x00, 0x00, 0x00,
536 0x00, 0x00, 0x00, 0x85,
538 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
539 0x00, 0x00, 0x00, 0x00,
541 0x45, 0x00, 0x00, 0x28, /* IP 62 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x06, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
548 0x00, 0x00, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x50, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
553 0x00, 0x00, /* 2 bytes for 4 byte alignment */
556 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
557 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
559 { ICE_IPV4_OFOS, 14 },
563 { ICE_UDP_ILOS, 82 },
564 { ICE_PROTOCOL_LAST, 0 },
567 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
568 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
573 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
574 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x11, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
580 0x00, 0x38, 0x00, 0x00,
582 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x85,
586 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
587 0x00, 0x00, 0x00, 0x00,
589 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x11, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00,
595 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
596 0x00, 0x08, 0x00, 0x00,
598 0x00, 0x00, /* 2 bytes for 4 byte alignment */
601 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
602 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
604 { ICE_IPV4_OFOS, 14 },
609 { ICE_PROTOCOL_LAST, 0 },
612 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
613 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
618 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x11, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
625 0x00, 0x58, 0x00, 0x00,
627 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
628 0x00, 0x00, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x85,
631 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
632 0x00, 0x00, 0x00, 0x00,
634 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
635 0x00, 0x14, 0x06, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x50, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 byte alignment */
654 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
656 { ICE_IPV4_OFOS, 14 },
660 { ICE_UDP_ILOS, 102 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
670 0x45, 0x00, 0x00, 0x60, /* IP 14 */
671 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x11, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00,
676 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
677 0x00, 0x4c, 0x00, 0x00,
679 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x85,
683 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
684 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
687 0x00, 0x08, 0x11, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
698 0x00, 0x08, 0x00, 0x00,
700 0x00, 0x00, /* 2 bytes for 4 byte alignment */
703 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
705 { ICE_IPV6_OFOS, 14 },
710 { ICE_PROTOCOL_LAST, 0 },
713 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
714 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
719 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
720 0x00, 0x44, 0x11, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
731 0x00, 0x44, 0x00, 0x00,
733 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
734 0x00, 0x00, 0x00, 0x00,
735 0x00, 0x00, 0x00, 0x85,
737 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
738 0x00, 0x00, 0x00, 0x00,
740 0x45, 0x00, 0x00, 0x28, /* IP 82 */
741 0x00, 0x00, 0x00, 0x00,
742 0x00, 0x06, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x50, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, /* 2 bytes for 4 byte alignment */
755 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
757 { ICE_IPV6_OFOS, 14 },
761 { ICE_UDP_ILOS, 102 },
762 { ICE_PROTOCOL_LAST, 0 },
765 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
766 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
771 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
772 0x00, 0x38, 0x11, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
783 0x00, 0x38, 0x00, 0x00,
785 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
786 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x85,
789 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
790 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
793 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x11, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
799 0x00, 0x08, 0x00, 0x00,
801 0x00, 0x00, /* 2 bytes for 4 byte alignment */
804 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
806 { ICE_IPV6_OFOS, 14 },
811 { ICE_PROTOCOL_LAST, 0 },
814 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
815 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
820 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
821 0x00, 0x58, 0x11, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, 0x00, 0x00,
831 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
832 0x00, 0x58, 0x00, 0x00,
834 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
835 0x00, 0x00, 0x00, 0x00,
836 0x00, 0x00, 0x00, 0x85,
838 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
839 0x00, 0x00, 0x00, 0x00,
841 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
842 0x00, 0x14, 0x06, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
855 0x50, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x00, 0x00,
858 0x00, 0x00, /* 2 bytes for 4 byte alignment */
861 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
863 { ICE_IPV6_OFOS, 14 },
867 { ICE_UDP_ILOS, 102 },
868 { ICE_PROTOCOL_LAST, 0 },
871 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
872 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
877 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
878 0x00, 0x4c, 0x11, 0x00,
879 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
889 0x00, 0x4c, 0x00, 0x00,
891 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
892 0x00, 0x00, 0x00, 0x00,
893 0x00, 0x00, 0x00, 0x85,
895 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
896 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
899 0x00, 0x08, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
910 0x00, 0x08, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 byte alignment */
915 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
917 { ICE_IPV4_OFOS, 14 },
920 { ICE_PROTOCOL_LAST, 0 },
923 static const u8 dummy_udp_gtp_packet[] = {
924 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
929 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x11, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
936 0x00, 0x1c, 0x00, 0x00,
938 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
939 0x00, 0x00, 0x00, 0x00,
940 0x00, 0x00, 0x00, 0x85,
942 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
943 0x00, 0x00, 0x00, 0x00,
947 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
949 { ICE_IPV4_OFOS, 14 },
953 { ICE_PROTOCOL_LAST, 0 },
956 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
957 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
958 0x00, 0x00, 0x00, 0x00,
959 0x00, 0x00, 0x00, 0x00,
962 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
963 0x00, 0x00, 0x40, 0x00,
964 0x40, 0x11, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
968 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
969 0x00, 0x00, 0x00, 0x00,
971 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
972 0x00, 0x00, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x85,
975 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
976 0x00, 0x00, 0x00, 0x00,
978 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
979 0x00, 0x00, 0x40, 0x00,
980 0x40, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x00,
982 0x00, 0x00, 0x00, 0x00,
987 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
989 { ICE_IPV4_OFOS, 14 },
993 { ICE_PROTOCOL_LAST, 0 },
996 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
997 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
998 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00,
1002 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
1003 0x00, 0x00, 0x40, 0x00,
1004 0x40, 0x11, 0x00, 0x00,
1005 0x00, 0x00, 0x00, 0x00,
1006 0x00, 0x00, 0x00, 0x00,
1008 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1009 0x00, 0x00, 0x00, 0x00,
1011 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x85,
1015 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1016 0x00, 0x00, 0x00, 0x00,
1018 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1019 0x00, 0x00, 0x3b, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1027 0x00, 0x00, 0x00, 0x00,
1033 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1034 { ICE_MAC_OFOS, 0 },
1035 { ICE_IPV6_OFOS, 14 },
1038 { ICE_IPV4_IL, 82 },
1039 { ICE_PROTOCOL_LAST, 0 },
1042 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1043 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1044 0x00, 0x00, 0x00, 0x00,
1045 0x00, 0x00, 0x00, 0x00,
1048 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1049 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1050 0x00, 0x00, 0x00, 0x00,
1051 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, 0x00, 0x00,
1056 0x00, 0x00, 0x00, 0x00,
1057 0x00, 0x00, 0x00, 0x00,
1059 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1060 0x00, 0x00, 0x00, 0x00,
1062 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1063 0x00, 0x00, 0x00, 0x00,
1064 0x00, 0x00, 0x00, 0x85,
1066 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1067 0x00, 0x00, 0x00, 0x00,
1069 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1070 0x00, 0x00, 0x40, 0x00,
1071 0x40, 0x00, 0x00, 0x00,
1072 0x00, 0x00, 0x00, 0x00,
1073 0x00, 0x00, 0x00, 0x00,
1079 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1080 { ICE_MAC_OFOS, 0 },
1081 { ICE_IPV6_OFOS, 14 },
1084 { ICE_IPV6_IL, 82 },
1085 { ICE_PROTOCOL_LAST, 0 },
1088 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1089 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1094 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1095 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1096 0x00, 0x00, 0x00, 0x00,
1097 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 0x00, 0x00, 0x00, 0x00,
1103 0x00, 0x00, 0x00, 0x00,
1105 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1106 0x00, 0x00, 0x00, 0x00,
1108 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1109 0x00, 0x00, 0x00, 0x00,
1110 0x00, 0x00, 0x00, 0x85,
1112 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1113 0x00, 0x00, 0x00, 0x00,
1115 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1116 0x00, 0x00, 0x3b, 0x00,
1117 0x00, 0x00, 0x00, 0x00,
1118 0x00, 0x00, 0x00, 0x00,
1119 0x00, 0x00, 0x00, 0x00,
1120 0x00, 0x00, 0x00, 0x00,
1121 0x00, 0x00, 0x00, 0x00,
1122 0x00, 0x00, 0x00, 0x00,
1123 0x00, 0x00, 0x00, 0x00,
1124 0x00, 0x00, 0x00, 0x00,
1130 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1131 { ICE_MAC_OFOS, 0 },
1132 { ICE_IPV4_OFOS, 14 },
1134 { ICE_GTP_NO_PAY, 42 },
1135 { ICE_PROTOCOL_LAST, 0 },
1139 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1140 { ICE_MAC_OFOS, 0 },
1141 { ICE_IPV6_OFOS, 14 },
1143 { ICE_GTP_NO_PAY, 62 },
1144 { ICE_PROTOCOL_LAST, 0 },
1147 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1148 { ICE_MAC_OFOS, 0 },
1149 { ICE_ETYPE_OL, 12 },
1150 { ICE_VLAN_OFOS, 14},
1152 { ICE_PROTOCOL_LAST, 0 },
1155 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1156 { ICE_MAC_OFOS, 0 },
1157 { ICE_ETYPE_OL, 12 },
1158 { ICE_VLAN_OFOS, 14},
1160 { ICE_IPV4_OFOS, 26 },
1161 { ICE_PROTOCOL_LAST, 0 },
1164 static const u8 dummy_pppoe_ipv4_packet[] = {
1165 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1166 0x00, 0x00, 0x00, 0x00,
1167 0x00, 0x00, 0x00, 0x00,
1169 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1171 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1173 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1176 0x00, 0x21, /* PPP Link Layer 24 */
1178 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1181 0x00, 0x00, 0x00, 0x00,
1182 0x00, 0x00, 0x00, 0x00,
1184 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1188 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1189 { ICE_MAC_OFOS, 0 },
1190 { ICE_ETYPE_OL, 12 },
1191 { ICE_VLAN_OFOS, 14},
1193 { ICE_IPV4_OFOS, 26 },
1195 { ICE_PROTOCOL_LAST, 0 },
1198 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1199 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1200 0x00, 0x00, 0x00, 0x00,
1201 0x00, 0x00, 0x00, 0x00,
1203 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1205 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1207 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1210 0x00, 0x21, /* PPP Link Layer 24 */
1212 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1213 0x00, 0x01, 0x00, 0x00,
1214 0x00, 0x06, 0x00, 0x00,
1215 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1219 0x00, 0x00, 0x00, 0x00,
1220 0x00, 0x00, 0x00, 0x00,
1221 0x50, 0x00, 0x00, 0x00,
1222 0x00, 0x00, 0x00, 0x00,
1224 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1228 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1229 { ICE_MAC_OFOS, 0 },
1230 { ICE_ETYPE_OL, 12 },
1231 { ICE_VLAN_OFOS, 14},
1233 { ICE_IPV4_OFOS, 26 },
1234 { ICE_UDP_ILOS, 46 },
1235 { ICE_PROTOCOL_LAST, 0 },
1238 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1240 0x00, 0x00, 0x00, 0x00,
1241 0x00, 0x00, 0x00, 0x00,
1243 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1245 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1247 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1250 0x00, 0x21, /* PPP Link Layer 24 */
1252 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1253 0x00, 0x01, 0x00, 0x00,
1254 0x00, 0x11, 0x00, 0x00,
1255 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00,
1258 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1259 0x00, 0x08, 0x00, 0x00,
1261 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1264 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1265 { ICE_MAC_OFOS, 0 },
1266 { ICE_ETYPE_OL, 12 },
1267 { ICE_VLAN_OFOS, 14},
1269 { ICE_IPV6_OFOS, 26 },
1270 { ICE_PROTOCOL_LAST, 0 },
1273 static const u8 dummy_pppoe_ipv6_packet[] = {
1274 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1275 0x00, 0x00, 0x00, 0x00,
1276 0x00, 0x00, 0x00, 0x00,
1278 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1280 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1282 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1285 0x00, 0x57, /* PPP Link Layer 24 */
1287 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1288 0x00, 0x00, 0x3b, 0x00,
1289 0x00, 0x00, 0x00, 0x00,
1290 0x00, 0x00, 0x00, 0x00,
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1293 0x00, 0x00, 0x00, 0x00,
1294 0x00, 0x00, 0x00, 0x00,
1295 0x00, 0x00, 0x00, 0x00,
1296 0x00, 0x00, 0x00, 0x00,
1298 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1302 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1303 { ICE_MAC_OFOS, 0 },
1304 { ICE_ETYPE_OL, 12 },
1305 { ICE_VLAN_OFOS, 14},
1307 { ICE_IPV6_OFOS, 26 },
1309 { ICE_PROTOCOL_LAST, 0 },
1312 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1313 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1314 0x00, 0x00, 0x00, 0x00,
1315 0x00, 0x00, 0x00, 0x00,
1317 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1319 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1321 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1324 0x00, 0x57, /* PPP Link Layer 24 */
1326 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1327 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1328 0x00, 0x00, 0x00, 0x00,
1329 0x00, 0x00, 0x00, 0x00,
1330 0x00, 0x00, 0x00, 0x00,
1331 0x00, 0x00, 0x00, 0x00,
1332 0x00, 0x00, 0x00, 0x00,
1333 0x00, 0x00, 0x00, 0x00,
1334 0x00, 0x00, 0x00, 0x00,
1335 0x00, 0x00, 0x00, 0x00,
1337 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1338 0x00, 0x00, 0x00, 0x00,
1339 0x00, 0x00, 0x00, 0x00,
1340 0x50, 0x00, 0x00, 0x00,
1341 0x00, 0x00, 0x00, 0x00,
1343 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1347 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1348 { ICE_MAC_OFOS, 0 },
1349 { ICE_ETYPE_OL, 12 },
1350 { ICE_VLAN_OFOS, 14},
1352 { ICE_IPV6_OFOS, 26 },
1353 { ICE_UDP_ILOS, 66 },
1354 { ICE_PROTOCOL_LAST, 0 },
1357 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1358 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1359 0x00, 0x00, 0x00, 0x00,
1360 0x00, 0x00, 0x00, 0x00,
1362 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1364 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1366 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1369 0x00, 0x57, /* PPP Link Layer 24 */
1371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1372 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1373 0x00, 0x00, 0x00, 0x00,
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1379 0x00, 0x00, 0x00, 0x00,
1380 0x00, 0x00, 0x00, 0x00,
1382 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1383 0x00, 0x08, 0x00, 0x00,
1385 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1388 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1389 { ICE_MAC_OFOS, 0 },
1390 { ICE_IPV4_OFOS, 14 },
1392 { ICE_PROTOCOL_LAST, 0 },
1395 static const u8 dummy_ipv4_esp_pkt[] = {
1396 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1397 0x00, 0x00, 0x00, 0x00,
1398 0x00, 0x00, 0x00, 0x00,
1401 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1402 0x00, 0x00, 0x40, 0x00,
1403 0x40, 0x32, 0x00, 0x00,
1404 0x00, 0x00, 0x00, 0x00,
1405 0x00, 0x00, 0x00, 0x00,
1407 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1408 0x00, 0x00, 0x00, 0x00,
1409 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1412 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1413 { ICE_MAC_OFOS, 0 },
1414 { ICE_IPV6_OFOS, 14 },
1416 { ICE_PROTOCOL_LAST, 0 },
1419 static const u8 dummy_ipv6_esp_pkt[] = {
1420 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1421 0x00, 0x00, 0x00, 0x00,
1422 0x00, 0x00, 0x00, 0x00,
1425 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1426 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1427 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00,
1429 0x00, 0x00, 0x00, 0x00,
1430 0x00, 0x00, 0x00, 0x00,
1431 0x00, 0x00, 0x00, 0x00,
1432 0x00, 0x00, 0x00, 0x00,
1433 0x00, 0x00, 0x00, 0x00,
1434 0x00, 0x00, 0x00, 0x00,
1436 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1437 0x00, 0x00, 0x00, 0x00,
1438 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1441 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1442 { ICE_MAC_OFOS, 0 },
1443 { ICE_IPV4_OFOS, 14 },
1445 { ICE_PROTOCOL_LAST, 0 },
1448 static const u8 dummy_ipv4_ah_pkt[] = {
1449 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1450 0x00, 0x00, 0x00, 0x00,
1451 0x00, 0x00, 0x00, 0x00,
1454 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1455 0x00, 0x00, 0x40, 0x00,
1456 0x40, 0x33, 0x00, 0x00,
1457 0x00, 0x00, 0x00, 0x00,
1458 0x00, 0x00, 0x00, 0x00,
1460 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1461 0x00, 0x00, 0x00, 0x00,
1462 0x00, 0x00, 0x00, 0x00,
1463 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1466 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1467 { ICE_MAC_OFOS, 0 },
1468 { ICE_IPV6_OFOS, 14 },
1470 { ICE_PROTOCOL_LAST, 0 },
1473 static const u8 dummy_ipv6_ah_pkt[] = {
1474 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1475 0x00, 0x00, 0x00, 0x00,
1476 0x00, 0x00, 0x00, 0x00,
1479 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1480 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1481 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, 0x00, 0x00,
1485 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x00,
1487 0x00, 0x00, 0x00, 0x00,
1488 0x00, 0x00, 0x00, 0x00,
1490 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1491 0x00, 0x00, 0x00, 0x00,
1492 0x00, 0x00, 0x00, 0x00,
1493 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1496 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1497 { ICE_MAC_OFOS, 0 },
1498 { ICE_IPV4_OFOS, 14 },
1499 { ICE_UDP_ILOS, 34 },
1501 { ICE_PROTOCOL_LAST, 0 },
1504 static const u8 dummy_ipv4_nat_pkt[] = {
1505 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1506 0x00, 0x00, 0x00, 0x00,
1507 0x00, 0x00, 0x00, 0x00,
1510 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1511 0x00, 0x00, 0x40, 0x00,
1512 0x40, 0x11, 0x00, 0x00,
1513 0x00, 0x00, 0x00, 0x00,
1514 0x00, 0x00, 0x00, 0x00,
1516 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1517 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, 0x00, 0x00,
1520 0x00, 0x00, 0x00, 0x00,
1521 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1524 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1525 { ICE_MAC_OFOS, 0 },
1526 { ICE_IPV6_OFOS, 14 },
1527 { ICE_UDP_ILOS, 54 },
1529 { ICE_PROTOCOL_LAST, 0 },
1532 static const u8 dummy_ipv6_nat_pkt[] = {
1533 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1534 0x00, 0x00, 0x00, 0x00,
1535 0x00, 0x00, 0x00, 0x00,
1538 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1539 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1540 0x00, 0x00, 0x00, 0x00,
1541 0x00, 0x00, 0x00, 0x00,
1542 0x00, 0x00, 0x00, 0x00,
1543 0x00, 0x00, 0x00, 0x00,
1544 0x00, 0x00, 0x00, 0x00,
1545 0x00, 0x00, 0x00, 0x00,
1546 0x00, 0x00, 0x00, 0x00,
1547 0x00, 0x00, 0x00, 0x00,
1549 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1550 0x00, 0x00, 0x00, 0x00,
1552 0x00, 0x00, 0x00, 0x00,
1553 0x00, 0x00, 0x00, 0x00,
1554 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1558 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1559 { ICE_MAC_OFOS, 0 },
1560 { ICE_IPV4_OFOS, 14 },
1562 { ICE_PROTOCOL_LAST, 0 },
1565 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1566 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1567 0x00, 0x00, 0x00, 0x00,
1568 0x00, 0x00, 0x00, 0x00,
1571 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1572 0x00, 0x00, 0x40, 0x00,
1573 0x40, 0x73, 0x00, 0x00,
1574 0x00, 0x00, 0x00, 0x00,
1575 0x00, 0x00, 0x00, 0x00,
1577 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1578 0x00, 0x00, 0x00, 0x00,
1579 0x00, 0x00, 0x00, 0x00,
1580 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1583 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1584 { ICE_MAC_OFOS, 0 },
1585 { ICE_IPV6_OFOS, 14 },
1587 { ICE_PROTOCOL_LAST, 0 },
1590 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1591 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1592 0x00, 0x00, 0x00, 0x00,
1593 0x00, 0x00, 0x00, 0x00,
1596 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1597 0x00, 0x0c, 0x73, 0x40,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, 0x00, 0x00,
1601 0x00, 0x00, 0x00, 0x00,
1602 0x00, 0x00, 0x00, 0x00,
1603 0x00, 0x00, 0x00, 0x00,
1604 0x00, 0x00, 0x00, 0x00,
1605 0x00, 0x00, 0x00, 0x00,
1607 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1608 0x00, 0x00, 0x00, 0x00,
1609 0x00, 0x00, 0x00, 0x00,
1610 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1613 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1614 { ICE_MAC_OFOS, 0 },
1615 { ICE_VLAN_EX, 14 },
1616 { ICE_VLAN_OFOS, 18 },
1617 { ICE_IPV4_OFOS, 22 },
1618 { ICE_PROTOCOL_LAST, 0 },
1621 static const u8 dummy_qinq_ipv4_pkt[] = {
1622 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1623 0x00, 0x00, 0x00, 0x00,
1624 0x00, 0x00, 0x00, 0x00,
1627 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1628 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1630 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1631 0x00, 0x01, 0x00, 0x00,
1632 0x00, 0x11, 0x00, 0x00,
1633 0x00, 0x00, 0x00, 0x00,
1634 0x00, 0x00, 0x00, 0x00,
1636 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1637 0x00, 0x08, 0x00, 0x00,
1639 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1642 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1643 { ICE_MAC_OFOS, 0 },
1644 { ICE_VLAN_EX, 14 },
1645 { ICE_VLAN_OFOS, 18 },
1646 { ICE_IPV6_OFOS, 22 },
1647 { ICE_PROTOCOL_LAST, 0 },
1650 static const u8 dummy_qinq_ipv6_pkt[] = {
1651 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1652 0x00, 0x00, 0x00, 0x00,
1653 0x00, 0x00, 0x00, 0x00,
1656 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1657 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1659 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1660 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1661 0x00, 0x00, 0x00, 0x00,
1662 0x00, 0x00, 0x00, 0x00,
1663 0x00, 0x00, 0x00, 0x00,
1664 0x00, 0x00, 0x00, 0x00,
1665 0x00, 0x00, 0x00, 0x00,
1666 0x00, 0x00, 0x00, 0x00,
1667 0x00, 0x00, 0x00, 0x00,
1668 0x00, 0x00, 0x00, 0x00,
1670 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1671 0x00, 0x10, 0x00, 0x00,
1673 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1674 0x00, 0x00, 0x00, 0x00,
1676 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1679 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1680 { ICE_MAC_OFOS, 0 },
1681 { ICE_VLAN_EX, 14 },
1682 { ICE_VLAN_OFOS, 18 },
1684 { ICE_PROTOCOL_LAST, 0 },
1688 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1689 { ICE_MAC_OFOS, 0 },
1690 { ICE_VLAN_EX, 14 },
1691 { ICE_VLAN_OFOS, 18 },
1693 { ICE_IPV4_OFOS, 30 },
1694 { ICE_PROTOCOL_LAST, 0 },
1697 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1698 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1699 0x00, 0x00, 0x00, 0x00,
1700 0x00, 0x00, 0x00, 0x00,
1703 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1704 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1706 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1709 0x00, 0x21, /* PPP Link Layer 28 */
1711 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1712 0x00, 0x00, 0x00, 0x00,
1713 0x00, 0x00, 0x00, 0x00,
1714 0x00, 0x00, 0x00, 0x00,
1715 0x00, 0x00, 0x00, 0x00,
1717 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1721 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1722 { ICE_MAC_OFOS, 0 },
1723 { ICE_ETYPE_OL, 12 },
1725 { ICE_VLAN_OFOS, 18 },
1727 { ICE_IPV6_OFOS, 30 },
1728 { ICE_PROTOCOL_LAST, 0 },
1731 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1732 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1733 0x00, 0x00, 0x00, 0x00,
1734 0x00, 0x00, 0x00, 0x00,
1736 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1738 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1739 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1741 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1744 0x00, 0x57, /* PPP Link Layer 28*/
1746 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1747 0x00, 0x00, 0x3b, 0x00,
1748 0x00, 0x00, 0x00, 0x00,
1749 0x00, 0x00, 0x00, 0x00,
1750 0x00, 0x00, 0x00, 0x00,
1751 0x00, 0x00, 0x00, 0x00,
1752 0x00, 0x00, 0x00, 0x00,
1753 0x00, 0x00, 0x00, 0x00,
1754 0x00, 0x00, 0x00, 0x00,
1755 0x00, 0x00, 0x00, 0x00,
1757 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1760 /* this is a recipe to profile association bitmap */
1761 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1762 ICE_MAX_NUM_PROFILES);
1764 /* this is a profile to recipe association bitmap */
1765 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1766 ICE_MAX_NUM_RECIPES);
1768 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1771 * ice_collect_result_idx - copy result index values
1772 * @buf: buffer that contains the result index
1773 * @recp: the recipe struct to copy data into
1775 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1776 struct ice_sw_recipe *recp)
1778 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1779 ice_set_bit(buf->content.result_indx &
1780 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1784 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1785 * @rid: recipe ID that we are populating
1787 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1789 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1790 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1791 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1792 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1793 enum ice_sw_tunnel_type tun_type;
1794 u16 i, j, profile_num = 0;
1795 bool non_tun_valid = false;
1796 bool pppoe_valid = false;
1797 bool vxlan_valid = false;
1798 bool gre_valid = false;
1799 bool gtp_valid = false;
1800 bool flag_valid = false;
1802 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1803 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1808 for (i = 0; i < 12; i++) {
1809 if (gre_profile[i] == j)
1813 for (i = 0; i < 12; i++) {
1814 if (vxlan_profile[i] == j)
1818 for (i = 0; i < 7; i++) {
1819 if (pppoe_profile[i] == j)
1823 for (i = 0; i < 6; i++) {
1824 if (non_tun_profile[i] == j)
1825 non_tun_valid = true;
1828 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1829 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1832 if ((j >= ICE_PROFID_IPV4_ESP &&
1833 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1834 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1835 j <= ICE_PROFID_IPV6_GTPU_TEID))
1839 if (!non_tun_valid && vxlan_valid)
1840 tun_type = ICE_SW_TUN_VXLAN;
1841 else if (!non_tun_valid && gre_valid)
1842 tun_type = ICE_SW_TUN_NVGRE;
1843 else if (!non_tun_valid && pppoe_valid)
1844 tun_type = ICE_SW_TUN_PPPOE;
1845 else if (!non_tun_valid && gtp_valid)
1846 tun_type = ICE_SW_TUN_GTP;
1847 else if (non_tun_valid &&
1848 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1849 tun_type = ICE_SW_TUN_AND_NON_TUN;
1850 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1852 tun_type = ICE_NON_TUN;
1854 tun_type = ICE_NON_TUN;
1856 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1857 i = ice_is_bit_set(recipe_to_profile[rid],
1858 ICE_PROFID_PPPOE_IPV4_OTHER);
1859 j = ice_is_bit_set(recipe_to_profile[rid],
1860 ICE_PROFID_PPPOE_IPV6_OTHER);
1862 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1864 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1867 if (tun_type == ICE_SW_TUN_GTP) {
1868 if (ice_is_bit_set(recipe_to_profile[rid],
1869 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1870 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1871 else if (ice_is_bit_set(recipe_to_profile[rid],
1872 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1873 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1874 else if (ice_is_bit_set(recipe_to_profile[rid],
1875 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1876 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1877 else if (ice_is_bit_set(recipe_to_profile[rid],
1878 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1879 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1882 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1883 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1884 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1886 case ICE_PROFID_IPV4_TCP:
1887 tun_type = ICE_SW_IPV4_TCP;
1889 case ICE_PROFID_IPV4_UDP:
1890 tun_type = ICE_SW_IPV4_UDP;
1892 case ICE_PROFID_IPV6_TCP:
1893 tun_type = ICE_SW_IPV6_TCP;
1895 case ICE_PROFID_IPV6_UDP:
1896 tun_type = ICE_SW_IPV6_UDP;
1898 case ICE_PROFID_PPPOE_PAY:
1899 tun_type = ICE_SW_TUN_PPPOE_PAY;
1901 case ICE_PROFID_PPPOE_IPV4_TCP:
1902 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1904 case ICE_PROFID_PPPOE_IPV4_UDP:
1905 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1907 case ICE_PROFID_PPPOE_IPV4_OTHER:
1908 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1910 case ICE_PROFID_PPPOE_IPV6_TCP:
1911 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1913 case ICE_PROFID_PPPOE_IPV6_UDP:
1914 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1916 case ICE_PROFID_PPPOE_IPV6_OTHER:
1917 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1919 case ICE_PROFID_IPV4_ESP:
1920 tun_type = ICE_SW_TUN_IPV4_ESP;
1922 case ICE_PROFID_IPV6_ESP:
1923 tun_type = ICE_SW_TUN_IPV6_ESP;
1925 case ICE_PROFID_IPV4_AH:
1926 tun_type = ICE_SW_TUN_IPV4_AH;
1928 case ICE_PROFID_IPV6_AH:
1929 tun_type = ICE_SW_TUN_IPV6_AH;
1931 case ICE_PROFID_IPV4_NAT_T:
1932 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1934 case ICE_PROFID_IPV6_NAT_T:
1935 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1937 case ICE_PROFID_IPV4_PFCP_NODE:
1939 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1941 case ICE_PROFID_IPV6_PFCP_NODE:
1943 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1945 case ICE_PROFID_IPV4_PFCP_SESSION:
1947 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1949 case ICE_PROFID_IPV6_PFCP_SESSION:
1951 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1953 case ICE_PROFID_MAC_IPV4_L2TPV3:
1954 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1956 case ICE_PROFID_MAC_IPV6_L2TPV3:
1957 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1959 case ICE_PROFID_IPV4_GTPU_TEID:
1960 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1962 case ICE_PROFID_IPV6_GTPU_TEID:
1963 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1974 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1975 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1976 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1977 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1978 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1979 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1980 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1981 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1982 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1983 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1984 else if (vlan && tun_type == ICE_NON_TUN)
1985 tun_type = ICE_NON_TUN_QINQ;
1991 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1992 * @hw: pointer to hardware structure
1993 * @recps: struct that we need to populate
1994 * @rid: recipe ID that we are populating
1995 * @refresh_required: true if we should get recipe to profile mapping from FW
1997 * This function is used to populate all the necessary entries into our
1998 * bookkeeping so that we have a current list of all the recipes that are
1999 * programmed in the firmware.
2001 static enum ice_status
2002 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2003 bool *refresh_required)
2005 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2006 struct ice_aqc_recipe_data_elem *tmp;
2007 u16 num_recps = ICE_MAX_NUM_RECIPES;
2008 struct ice_prot_lkup_ext *lkup_exts;
2009 enum ice_status status;
2014 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2016 /* we need a buffer big enough to accommodate all the recipes */
2017 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2018 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2020 return ICE_ERR_NO_MEMORY;
2022 tmp[0].recipe_indx = rid;
2023 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2024 /* non-zero status meaning recipe doesn't exist */
2028 /* Get recipe to profile map so that we can get the fv from lkups that
2029 * we read for a recipe from FW. Since we want to minimize the number of
2030 * times we make this FW call, just make one call and cache the copy
2031 * until a new recipe is added. This operation is only required the
2032 * first time to get the changes from FW. Then to search existing
2033 * entries we don't need to update the cache again until another recipe
2036 if (*refresh_required) {
2037 ice_get_recp_to_prof_map(hw);
2038 *refresh_required = false;
2041 /* Start populating all the entries for recps[rid] based on lkups from
2042 * firmware. Note that we are only creating the root recipe in our
2045 lkup_exts = &recps[rid].lkup_exts;
2047 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2048 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2049 struct ice_recp_grp_entry *rg_entry;
2050 u8 i, prof, idx, prot = 0;
2054 rg_entry = (struct ice_recp_grp_entry *)
2055 ice_malloc(hw, sizeof(*rg_entry));
2057 status = ICE_ERR_NO_MEMORY;
2061 idx = root_bufs.recipe_indx;
2062 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2064 /* Mark all result indices in this chain */
2065 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2066 ice_set_bit(root_bufs.content.result_indx &
2067 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2069 /* get the first profile that is associated with rid */
2070 prof = ice_find_first_bit(recipe_to_profile[idx],
2071 ICE_MAX_NUM_PROFILES);
2072 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2073 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2075 rg_entry->fv_idx[i] = lkup_indx;
2076 rg_entry->fv_mask[i] =
2077 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2079 /* If the recipe is a chained recipe then all its
2080 * child recipe's result will have a result index.
2081 * To fill fv_words we should not use those result
2082 * index, we only need the protocol ids and offsets.
2083 * We will skip all the fv_idx which stores result
2084 * index in them. We also need to skip any fv_idx which
2085 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2086 * valid offset value.
2088 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2089 rg_entry->fv_idx[i]) ||
2090 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2091 rg_entry->fv_idx[i] == 0)
2094 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2095 rg_entry->fv_idx[i], &prot, &off);
2096 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2097 lkup_exts->fv_words[fv_word_idx].off = off;
2098 lkup_exts->field_mask[fv_word_idx] =
2099 rg_entry->fv_mask[i];
2100 if (prot == ICE_META_DATA_ID_HW &&
2101 off == ICE_TUN_FLAG_MDID_OFF)
2105 /* populate rg_list with the data from the child entry of this
2108 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2110 /* Propagate some data to the recipe database */
2111 recps[idx].is_root = !!is_root;
2112 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2113 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2114 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2115 recps[idx].chain_idx = root_bufs.content.result_indx &
2116 ~ICE_AQ_RECIPE_RESULT_EN;
2117 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2119 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2125 /* Only do the following for root recipes entries */
2126 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2127 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2128 recps[idx].root_rid = root_bufs.content.rid &
2129 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2130 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2133 /* Complete initialization of the root recipe entry */
2134 lkup_exts->n_val_words = fv_word_idx;
2135 recps[rid].big_recp = (num_recps > 1);
2136 recps[rid].n_grp_count = (u8)num_recps;
2137 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2138 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2139 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2140 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2141 if (!recps[rid].root_buf)
2144 /* Copy result indexes */
2145 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2146 recps[rid].recp_created = true;
2154 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2155 * @hw: pointer to hardware structure
2157 * This function is used to populate recipe_to_profile matrix where index to
2158 * this array is the recipe ID and the element is the mapping of which profiles
2159 * is this recipe mapped to.
2161 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2163 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2166 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2169 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2170 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2171 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2173 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2174 ICE_MAX_NUM_RECIPES);
2175 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2176 ice_set_bit(i, recipe_to_profile[j]);
2181 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2182 * @hw: pointer to the HW struct
2183 * @recp_list: pointer to sw recipe list
2185 * Allocate memory for the entire recipe table and initialize the structures/
2186 * entries corresponding to basic recipes.
2189 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2191 struct ice_sw_recipe *recps;
2194 recps = (struct ice_sw_recipe *)
2195 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2197 return ICE_ERR_NO_MEMORY;
2199 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2200 recps[i].root_rid = i;
2201 INIT_LIST_HEAD(&recps[i].filt_rules);
2202 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2203 INIT_LIST_HEAD(&recps[i].rg_list);
2204 ice_init_lock(&recps[i].filt_rule_lock);
2213 * ice_aq_get_sw_cfg - get switch configuration
2214 * @hw: pointer to the hardware structure
2215 * @buf: pointer to the result buffer
2216 * @buf_size: length of the buffer available for response
2217 * @req_desc: pointer to requested descriptor
2218 * @num_elems: pointer to number of elements
2219 * @cd: pointer to command details structure or NULL
2221 * Get switch configuration (0x0200) to be placed in buf.
2222 * This admin command returns information such as initial VSI/port number
2223 * and switch ID it belongs to.
2225 * NOTE: *req_desc is both an input/output parameter.
2226 * The caller of this function first calls this function with *request_desc set
2227 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2228 * configuration information has been returned; if non-zero (meaning not all
2229 * the information was returned), the caller should call this function again
2230 * with *req_desc set to the previous value returned by f/w to get the
2231 * next block of switch configuration information.
2233 * *num_elems is output only parameter. This reflects the number of elements
2234 * in response buffer. The caller of this function to use *num_elems while
2235 * parsing the response buffer.
2237 static enum ice_status
2238 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2239 u16 buf_size, u16 *req_desc, u16 *num_elems,
2240 struct ice_sq_cd *cd)
2242 struct ice_aqc_get_sw_cfg *cmd;
2243 struct ice_aq_desc desc;
2244 enum ice_status status;
2246 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2247 cmd = &desc.params.get_sw_conf;
2248 cmd->element = CPU_TO_LE16(*req_desc);
2250 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2252 *req_desc = LE16_TO_CPU(cmd->element);
2253 *num_elems = LE16_TO_CPU(cmd->num_elems);
2260 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2261 * @hw: pointer to the HW struct
2262 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2263 * @global_lut_id: output parameter for the RSS global LUT's ID
2265 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2267 struct ice_aqc_alloc_free_res_elem *sw_buf;
2268 enum ice_status status;
2271 buf_len = ice_struct_size(sw_buf, elem, 1);
2272 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2274 return ICE_ERR_NO_MEMORY;
2276 sw_buf->num_elems = CPU_TO_LE16(1);
2277 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2278 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2279 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2281 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2283 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2284 shared_res ? "shared" : "dedicated", status);
2285 goto ice_alloc_global_lut_exit;
2288 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2290 ice_alloc_global_lut_exit:
2291 ice_free(hw, sw_buf);
2296 * ice_free_global_lut - free a RSS global LUT
2297 * @hw: pointer to the HW struct
2298 * @global_lut_id: ID of the RSS global LUT to free
2300 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2302 struct ice_aqc_alloc_free_res_elem *sw_buf;
2303 u16 buf_len, num_elems = 1;
2304 enum ice_status status;
2306 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2307 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2309 return ICE_ERR_NO_MEMORY;
2311 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2312 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2313 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2315 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2317 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2318 global_lut_id, status);
2320 ice_free(hw, sw_buf);
2325 * ice_alloc_sw - allocate resources specific to switch
2326 * @hw: pointer to the HW struct
2327 * @ena_stats: true to turn on VEB stats
2328 * @shared_res: true for shared resource, false for dedicated resource
2329 * @sw_id: switch ID returned
2330 * @counter_id: VEB counter ID returned
2332 * allocates switch resources (SWID and VEB counter) (0x0208)
2335 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2338 struct ice_aqc_alloc_free_res_elem *sw_buf;
2339 struct ice_aqc_res_elem *sw_ele;
2340 enum ice_status status;
2343 buf_len = ice_struct_size(sw_buf, elem, 1);
2344 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2346 return ICE_ERR_NO_MEMORY;
2348 /* Prepare buffer for switch ID.
2349 * The number of resource entries in buffer is passed as 1 since only a
2350 * single switch/VEB instance is allocated, and hence a single sw_id
2353 sw_buf->num_elems = CPU_TO_LE16(1);
2355 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2356 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2357 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2359 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2360 ice_aqc_opc_alloc_res, NULL);
2363 goto ice_alloc_sw_exit;
2365 sw_ele = &sw_buf->elem[0];
2366 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2369 /* Prepare buffer for VEB Counter */
2370 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2371 struct ice_aqc_alloc_free_res_elem *counter_buf;
2372 struct ice_aqc_res_elem *counter_ele;
2374 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2375 ice_malloc(hw, buf_len);
2377 status = ICE_ERR_NO_MEMORY;
2378 goto ice_alloc_sw_exit;
2381 /* The number of resource entries in buffer is passed as 1 since
2382 * only a single switch/VEB instance is allocated, and hence a
2383 * single VEB counter is requested.
2385 counter_buf->num_elems = CPU_TO_LE16(1);
2386 counter_buf->res_type =
2387 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2388 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2389 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2393 ice_free(hw, counter_buf);
2394 goto ice_alloc_sw_exit;
2396 counter_ele = &counter_buf->elem[0];
2397 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2398 ice_free(hw, counter_buf);
2402 ice_free(hw, sw_buf);
2407 * ice_free_sw - free resources specific to switch
2408 * @hw: pointer to the HW struct
2409 * @sw_id: switch ID returned
2410 * @counter_id: VEB counter ID returned
2412 * free switch resources (SWID and VEB counter) (0x0209)
2414 * NOTE: This function frees multiple resources. It continues
2415 * releasing other resources even after it encounters error.
2416 * The error code returned is the last error it encountered.
2418 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2420 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2421 enum ice_status status, ret_status;
2424 buf_len = ice_struct_size(sw_buf, elem, 1);
2425 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2427 return ICE_ERR_NO_MEMORY;
2429 /* Prepare buffer to free for switch ID res.
2430 * The number of resource entries in buffer is passed as 1 since only a
2431 * single switch/VEB instance is freed, and hence a single sw_id
2434 sw_buf->num_elems = CPU_TO_LE16(1);
2435 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2436 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2438 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2439 ice_aqc_opc_free_res, NULL);
2442 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2444 /* Prepare buffer to free for VEB Counter resource */
2445 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2446 ice_malloc(hw, buf_len);
2448 ice_free(hw, sw_buf);
2449 return ICE_ERR_NO_MEMORY;
2452 /* The number of resource entries in buffer is passed as 1 since only a
2453 * single switch/VEB instance is freed, and hence a single VEB counter
2456 counter_buf->num_elems = CPU_TO_LE16(1);
2457 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2458 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2460 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2461 ice_aqc_opc_free_res, NULL);
2463 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2464 ret_status = status;
2467 ice_free(hw, counter_buf);
2468 ice_free(hw, sw_buf);
2474 * @hw: pointer to the HW struct
2475 * @vsi_ctx: pointer to a VSI context struct
2476 * @cd: pointer to command details structure or NULL
2478 * Add a VSI context to the hardware (0x0210)
2481 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2482 struct ice_sq_cd *cd)
2484 struct ice_aqc_add_update_free_vsi_resp *res;
2485 struct ice_aqc_add_get_update_free_vsi *cmd;
2486 struct ice_aq_desc desc;
2487 enum ice_status status;
2489 cmd = &desc.params.vsi_cmd;
2490 res = &desc.params.add_update_free_vsi_res;
2492 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2494 if (!vsi_ctx->alloc_from_pool)
2495 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2496 ICE_AQ_VSI_IS_VALID);
2498 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2500 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2502 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2503 sizeof(vsi_ctx->info), cd);
2506 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2507 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2508 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2516 * @hw: pointer to the HW struct
2517 * @vsi_ctx: pointer to a VSI context struct
2518 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2519 * @cd: pointer to command details structure or NULL
2521 * Free VSI context info from hardware (0x0213)
2524 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2525 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2527 struct ice_aqc_add_update_free_vsi_resp *resp;
2528 struct ice_aqc_add_get_update_free_vsi *cmd;
2529 struct ice_aq_desc desc;
2530 enum ice_status status;
2532 cmd = &desc.params.vsi_cmd;
2533 resp = &desc.params.add_update_free_vsi_res;
2535 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2537 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2539 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2541 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2543 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2544 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2552 * @hw: pointer to the HW struct
2553 * @vsi_ctx: pointer to a VSI context struct
2554 * @cd: pointer to command details structure or NULL
2556 * Update VSI context in the hardware (0x0211)
2559 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2560 struct ice_sq_cd *cd)
2562 struct ice_aqc_add_update_free_vsi_resp *resp;
2563 struct ice_aqc_add_get_update_free_vsi *cmd;
2564 struct ice_aq_desc desc;
2565 enum ice_status status;
2567 cmd = &desc.params.vsi_cmd;
2568 resp = &desc.params.add_update_free_vsi_res;
2570 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2572 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2574 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2576 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2577 sizeof(vsi_ctx->info), cd);
2580 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2581 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2588 * ice_is_vsi_valid - check whether the VSI is valid or not
2589 * @hw: pointer to the HW struct
2590 * @vsi_handle: VSI handle
2592 * check whether the VSI is valid or not
2594 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2596 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2600 * ice_get_hw_vsi_num - return the HW VSI number
2601 * @hw: pointer to the HW struct
2602 * @vsi_handle: VSI handle
2604 * return the HW VSI number
2605 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2607 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2609 return hw->vsi_ctx[vsi_handle]->vsi_num;
2613 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2614 * @hw: pointer to the HW struct
2615 * @vsi_handle: VSI handle
2617 * return the VSI context entry for a given VSI handle
2619 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2621 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2625 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2626 * @hw: pointer to the HW struct
2627 * @vsi_handle: VSI handle
2628 * @vsi: VSI context pointer
2630 * save the VSI context entry for a given VSI handle
2633 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2635 hw->vsi_ctx[vsi_handle] = vsi;
2639 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2640 * @hw: pointer to the HW struct
2641 * @vsi_handle: VSI handle
2643 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2645 struct ice_vsi_ctx *vsi;
2648 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2651 ice_for_each_traffic_class(i) {
2652 if (vsi->lan_q_ctx[i]) {
2653 ice_free(hw, vsi->lan_q_ctx[i]);
2654 vsi->lan_q_ctx[i] = NULL;
2660 * ice_clear_vsi_ctx - clear the VSI context entry
2661 * @hw: pointer to the HW struct
2662 * @vsi_handle: VSI handle
2664 * clear the VSI context entry
2666 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2668 struct ice_vsi_ctx *vsi;
2670 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2672 ice_clear_vsi_q_ctx(hw, vsi_handle);
2674 hw->vsi_ctx[vsi_handle] = NULL;
2679 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2680 * @hw: pointer to the HW struct
2682 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2686 for (i = 0; i < ICE_MAX_VSI; i++)
2687 ice_clear_vsi_ctx(hw, i);
2691 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2692 * @hw: pointer to the HW struct
2693 * @vsi_handle: unique VSI handle provided by drivers
2694 * @vsi_ctx: pointer to a VSI context struct
2695 * @cd: pointer to command details structure or NULL
2697 * Add a VSI context to the hardware also add it into the VSI handle list.
2698 * If this function gets called after reset for existing VSIs then update
2699 * with the new HW VSI number in the corresponding VSI handle list entry.
2702 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2703 struct ice_sq_cd *cd)
2705 struct ice_vsi_ctx *tmp_vsi_ctx;
2706 enum ice_status status;
2708 if (vsi_handle >= ICE_MAX_VSI)
2709 return ICE_ERR_PARAM;
2710 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2713 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2715 /* Create a new VSI context */
2716 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2717 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2719 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2720 return ICE_ERR_NO_MEMORY;
2722 *tmp_vsi_ctx = *vsi_ctx;
2724 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2726 /* update with new HW VSI num */
2727 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2734 * ice_free_vsi- free VSI context from hardware and VSI handle list
2735 * @hw: pointer to the HW struct
2736 * @vsi_handle: unique VSI handle
2737 * @vsi_ctx: pointer to a VSI context struct
2738 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2739 * @cd: pointer to command details structure or NULL
2741 * Free VSI context info from hardware as well as from VSI handle list
2744 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2745 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2747 enum ice_status status;
2749 if (!ice_is_vsi_valid(hw, vsi_handle))
2750 return ICE_ERR_PARAM;
2751 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2752 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2754 ice_clear_vsi_ctx(hw, vsi_handle);
2760 * @hw: pointer to the HW struct
2761 * @vsi_handle: unique VSI handle
2762 * @vsi_ctx: pointer to a VSI context struct
2763 * @cd: pointer to command details structure or NULL
2765 * Update VSI context in the hardware
2768 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2769 struct ice_sq_cd *cd)
2771 if (!ice_is_vsi_valid(hw, vsi_handle))
2772 return ICE_ERR_PARAM;
2773 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2774 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2778 * ice_aq_get_vsi_params
2779 * @hw: pointer to the HW struct
2780 * @vsi_ctx: pointer to a VSI context struct
2781 * @cd: pointer to command details structure or NULL
2783 * Get VSI context info from hardware (0x0212)
2786 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2787 struct ice_sq_cd *cd)
2789 struct ice_aqc_add_get_update_free_vsi *cmd;
2790 struct ice_aqc_get_vsi_resp *resp;
2791 struct ice_aq_desc desc;
2792 enum ice_status status;
2794 cmd = &desc.params.vsi_cmd;
2795 resp = &desc.params.get_vsi_resp;
2797 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2799 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2801 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2802 sizeof(vsi_ctx->info), cd);
2804 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2806 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2807 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2814 * ice_aq_add_update_mir_rule - add/update a mirror rule
2815 * @hw: pointer to the HW struct
2816 * @rule_type: Rule Type
2817 * @dest_vsi: VSI number to which packets will be mirrored
2818 * @count: length of the list
2819 * @mr_buf: buffer for list of mirrored VSI numbers
2820 * @cd: pointer to command details structure or NULL
2823 * Add/Update Mirror Rule (0x260).
2826 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2827 u16 count, struct ice_mir_rule_buf *mr_buf,
2828 struct ice_sq_cd *cd, u16 *rule_id)
2830 struct ice_aqc_add_update_mir_rule *cmd;
2831 struct ice_aq_desc desc;
2832 enum ice_status status;
2833 __le16 *mr_list = NULL;
2836 switch (rule_type) {
2837 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2838 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2839 /* Make sure count and mr_buf are set for these rule_types */
2840 if (!(count && mr_buf))
2841 return ICE_ERR_PARAM;
2843 buf_size = count * sizeof(__le16);
2844 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2846 return ICE_ERR_NO_MEMORY;
2848 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2849 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2850 /* Make sure count and mr_buf are not set for these
2853 if (count || mr_buf)
2854 return ICE_ERR_PARAM;
2857 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2858 return ICE_ERR_OUT_OF_RANGE;
2861 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2863 /* Pre-process 'mr_buf' items for add/update of virtual port
2864 * ingress/egress mirroring (but not physical port ingress/egress
2870 for (i = 0; i < count; i++) {
2873 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2875 /* Validate specified VSI number, make sure it is less
2876 * than ICE_MAX_VSI, if not return with error.
2878 if (id >= ICE_MAX_VSI) {
2879 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2881 ice_free(hw, mr_list);
2882 return ICE_ERR_OUT_OF_RANGE;
2885 /* add VSI to mirror rule */
2888 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2889 else /* remove VSI from mirror rule */
2890 mr_list[i] = CPU_TO_LE16(id);
2894 cmd = &desc.params.add_update_rule;
2895 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2896 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2897 ICE_AQC_RULE_ID_VALID_M);
2898 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2899 cmd->num_entries = CPU_TO_LE16(count);
2900 cmd->dest = CPU_TO_LE16(dest_vsi);
2902 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2904 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2906 ice_free(hw, mr_list);
2912 * ice_aq_delete_mir_rule - delete a mirror rule
2913 * @hw: pointer to the HW struct
2914 * @rule_id: Mirror rule ID (to be deleted)
2915 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2916 * otherwise it is returned to the shared pool
2917 * @cd: pointer to command details structure or NULL
2919 * Delete Mirror Rule (0x261).
2922 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2923 struct ice_sq_cd *cd)
2925 struct ice_aqc_delete_mir_rule *cmd;
2926 struct ice_aq_desc desc;
2928 /* rule_id should be in the range 0...63 */
2929 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2930 return ICE_ERR_OUT_OF_RANGE;
2932 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2934 cmd = &desc.params.del_rule;
2935 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2936 cmd->rule_id = CPU_TO_LE16(rule_id);
2939 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2941 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2945 * ice_aq_alloc_free_vsi_list
2946 * @hw: pointer to the HW struct
2947 * @vsi_list_id: VSI list ID returned or used for lookup
2948 * @lkup_type: switch rule filter lookup type
2949 * @opc: switch rules population command type - pass in the command opcode
2951 * allocates or free a VSI list resource
2953 static enum ice_status
2954 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2955 enum ice_sw_lkup_type lkup_type,
2956 enum ice_adminq_opc opc)
2958 struct ice_aqc_alloc_free_res_elem *sw_buf;
2959 struct ice_aqc_res_elem *vsi_ele;
2960 enum ice_status status;
2963 buf_len = ice_struct_size(sw_buf, elem, 1);
2964 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2966 return ICE_ERR_NO_MEMORY;
2967 sw_buf->num_elems = CPU_TO_LE16(1);
2969 if (lkup_type == ICE_SW_LKUP_MAC ||
2970 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2971 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2972 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2973 lkup_type == ICE_SW_LKUP_PROMISC ||
2974 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2975 lkup_type == ICE_SW_LKUP_LAST) {
2976 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2977 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2979 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2981 status = ICE_ERR_PARAM;
2982 goto ice_aq_alloc_free_vsi_list_exit;
2985 if (opc == ice_aqc_opc_free_res)
2986 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2988 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2990 goto ice_aq_alloc_free_vsi_list_exit;
2992 if (opc == ice_aqc_opc_alloc_res) {
2993 vsi_ele = &sw_buf->elem[0];
2994 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2997 ice_aq_alloc_free_vsi_list_exit:
2998 ice_free(hw, sw_buf);
3003 * ice_aq_set_storm_ctrl - Sets storm control configuration
3004 * @hw: pointer to the HW struct
3005 * @bcast_thresh: represents the upper threshold for broadcast storm control
3006 * @mcast_thresh: represents the upper threshold for multicast storm control
3007 * @ctl_bitmask: storm control knobs
3009 * Sets the storm control configuration (0x0280)
3012 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3015 struct ice_aqc_storm_cfg *cmd;
3016 struct ice_aq_desc desc;
3018 cmd = &desc.params.storm_conf;
3020 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3022 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3023 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3024 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3026 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3030 * ice_aq_get_storm_ctrl - gets storm control configuration
3031 * @hw: pointer to the HW struct
3032 * @bcast_thresh: represents the upper threshold for broadcast storm control
3033 * @mcast_thresh: represents the upper threshold for multicast storm control
3034 * @ctl_bitmask: storm control knobs
3036 * Gets the storm control configuration (0x0281)
3039 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3042 enum ice_status status;
3043 struct ice_aq_desc desc;
3045 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3047 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3049 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3052 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3055 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3058 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3065 * ice_aq_sw_rules - add/update/remove switch rules
3066 * @hw: pointer to the HW struct
3067 * @rule_list: pointer to switch rule population list
3068 * @rule_list_sz: total size of the rule list in bytes
3069 * @num_rules: number of switch rules in the rule_list
3070 * @opc: switch rules population command type - pass in the command opcode
3071 * @cd: pointer to command details structure or NULL
3073 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3075 static enum ice_status
3076 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3077 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3079 struct ice_aq_desc desc;
3080 enum ice_status status;
3082 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3084 if (opc != ice_aqc_opc_add_sw_rules &&
3085 opc != ice_aqc_opc_update_sw_rules &&
3086 opc != ice_aqc_opc_remove_sw_rules)
3087 return ICE_ERR_PARAM;
3089 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3091 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3092 desc.params.sw_rules.num_rules_fltr_entry_index =
3093 CPU_TO_LE16(num_rules);
3094 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3095 if (opc != ice_aqc_opc_add_sw_rules &&
3096 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3097 status = ICE_ERR_DOES_NOT_EXIST;
3103 * ice_aq_add_recipe - add switch recipe
3104 * @hw: pointer to the HW struct
3105 * @s_recipe_list: pointer to switch rule population list
3106 * @num_recipes: number of switch recipes in the list
3107 * @cd: pointer to command details structure or NULL
3112 ice_aq_add_recipe(struct ice_hw *hw,
3113 struct ice_aqc_recipe_data_elem *s_recipe_list,
3114 u16 num_recipes, struct ice_sq_cd *cd)
3116 struct ice_aqc_add_get_recipe *cmd;
3117 struct ice_aq_desc desc;
3120 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3121 cmd = &desc.params.add_get_recipe;
3122 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3124 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3125 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3127 buf_size = num_recipes * sizeof(*s_recipe_list);
3129 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3133 * ice_aq_get_recipe - get switch recipe
3134 * @hw: pointer to the HW struct
3135 * @s_recipe_list: pointer to switch rule population list
3136 * @num_recipes: pointer to the number of recipes (input and output)
3137 * @recipe_root: root recipe number of recipe(s) to retrieve
3138 * @cd: pointer to command details structure or NULL
3142 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3143 * On output, *num_recipes will equal the number of entries returned in
3146 * The caller must supply enough space in s_recipe_list to hold all possible
3147 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3150 ice_aq_get_recipe(struct ice_hw *hw,
3151 struct ice_aqc_recipe_data_elem *s_recipe_list,
3152 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3154 struct ice_aqc_add_get_recipe *cmd;
3155 struct ice_aq_desc desc;
3156 enum ice_status status;
3159 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3160 return ICE_ERR_PARAM;
3162 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3163 cmd = &desc.params.add_get_recipe;
3164 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3166 cmd->return_index = CPU_TO_LE16(recipe_root);
3167 cmd->num_sub_recipes = 0;
3169 buf_size = *num_recipes * sizeof(*s_recipe_list);
3171 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3172 /* cppcheck-suppress constArgument */
3173 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3179 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3180 * @hw: pointer to the HW struct
3181 * @profile_id: package profile ID to associate the recipe with
3182 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3183 * @cd: pointer to command details structure or NULL
3184 * Recipe to profile association (0x0291)
3187 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3188 struct ice_sq_cd *cd)
3190 struct ice_aqc_recipe_to_profile *cmd;
3191 struct ice_aq_desc desc;
3193 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3194 cmd = &desc.params.recipe_to_profile;
3195 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3196 cmd->profile_id = CPU_TO_LE16(profile_id);
3197 /* Set the recipe ID bit in the bitmask to let the device know which
3198 * profile we are associating the recipe to
3200 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3201 ICE_NONDMA_TO_NONDMA);
3203 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3207 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3208 * @hw: pointer to the HW struct
3209 * @profile_id: package profile ID to associate the recipe with
3210 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3211 * @cd: pointer to command details structure or NULL
3212 * Associate profile ID with given recipe (0x0293)
3215 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3216 struct ice_sq_cd *cd)
3218 struct ice_aqc_recipe_to_profile *cmd;
3219 struct ice_aq_desc desc;
3220 enum ice_status status;
3222 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3223 cmd = &desc.params.recipe_to_profile;
3224 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3225 cmd->profile_id = CPU_TO_LE16(profile_id);
3227 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3229 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3230 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3236 * ice_alloc_recipe - add recipe resource
3237 * @hw: pointer to the hardware structure
3238 * @rid: recipe ID returned as response to AQ call
3240 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3242 struct ice_aqc_alloc_free_res_elem *sw_buf;
3243 enum ice_status status;
3246 buf_len = ice_struct_size(sw_buf, elem, 1);
3247 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3249 return ICE_ERR_NO_MEMORY;
3251 sw_buf->num_elems = CPU_TO_LE16(1);
3252 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3253 ICE_AQC_RES_TYPE_S) |
3254 ICE_AQC_RES_TYPE_FLAG_SHARED);
3255 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3256 ice_aqc_opc_alloc_res, NULL);
3258 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3259 ice_free(hw, sw_buf);
3264 /* ice_init_port_info - Initialize port_info with switch configuration data
3265 * @pi: pointer to port_info
3266 * @vsi_port_num: VSI number or port number
3267 * @type: Type of switch element (port or VSI)
3268 * @swid: switch ID of the switch the element is attached to
3269 * @pf_vf_num: PF or VF number
3270 * @is_vf: true if the element is a VF, false otherwise
3273 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3274 u16 swid, u16 pf_vf_num, bool is_vf)
3277 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3278 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3280 pi->pf_vf_num = pf_vf_num;
3282 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3283 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3286 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3291 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3292 * @hw: pointer to the hardware structure
3294 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3296 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3297 enum ice_status status;
3304 num_total_ports = 1;
3306 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3307 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3310 return ICE_ERR_NO_MEMORY;
3312 /* Multiple calls to ice_aq_get_sw_cfg may be required
3313 * to get all the switch configuration information. The need
3314 * for additional calls is indicated by ice_aq_get_sw_cfg
3315 * writing a non-zero value in req_desc
3318 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3320 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3321 &req_desc, &num_elems, NULL);
3326 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3327 u16 pf_vf_num, swid, vsi_port_num;
3331 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3332 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3334 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3335 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3337 swid = LE16_TO_CPU(ele->swid);
3339 if (LE16_TO_CPU(ele->pf_vf_num) &
3340 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3343 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3344 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3347 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3348 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3349 if (j == num_total_ports) {
3350 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3351 status = ICE_ERR_CFG;
3354 ice_init_port_info(hw->port_info,
3355 vsi_port_num, res_type, swid,
3363 } while (req_desc && !status);
3371 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3372 * @hw: pointer to the hardware structure
3373 * @fi: filter info structure to fill/update
3375 * This helper function populates the lb_en and lan_en elements of the provided
3376 * ice_fltr_info struct using the switch's type and characteristics of the
3377 * switch rule being configured.
3379 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3381 if ((fi->flag & ICE_FLTR_RX) &&
3382 (fi->fltr_act == ICE_FWD_TO_VSI ||
3383 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3384 fi->lkup_type == ICE_SW_LKUP_LAST)
3388 if ((fi->flag & ICE_FLTR_TX) &&
3389 (fi->fltr_act == ICE_FWD_TO_VSI ||
3390 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3391 fi->fltr_act == ICE_FWD_TO_Q ||
3392 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3393 /* Setting LB for prune actions will result in replicated
3394 * packets to the internal switch that will be dropped.
3396 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3399 /* Set lan_en to TRUE if
3400 * 1. The switch is a VEB AND
3402 * 2.1 The lookup is a directional lookup like ethertype,
3403 * promiscuous, ethertype-MAC, promiscuous-VLAN
3404 * and default-port OR
3405 * 2.2 The lookup is VLAN, OR
3406 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3407 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3411 * The switch is a VEPA.
3413 * In all other cases, the LAN enable has to be set to false.
3416 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3417 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3418 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3419 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3420 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3421 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3422 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3423 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3424 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3425 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3434 * ice_fill_sw_rule - Helper function to fill switch rule structure
3435 * @hw: pointer to the hardware structure
3436 * @f_info: entry containing packet forwarding information
3437 * @s_rule: switch rule structure to be filled in based on mac_entry
3438 * @opc: switch rules population command type - pass in the command opcode
3441 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3442 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3444 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3445 u16 vlan_tpid = ICE_ETH_P_8021Q;
3453 if (opc == ice_aqc_opc_remove_sw_rules) {
3454 s_rule->pdata.lkup_tx_rx.act = 0;
3455 s_rule->pdata.lkup_tx_rx.index =
3456 CPU_TO_LE16(f_info->fltr_rule_id);
3457 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3461 eth_hdr_sz = sizeof(dummy_eth_header);
3462 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3464 /* initialize the ether header with a dummy header */
3465 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3466 ice_fill_sw_info(hw, f_info);
3468 switch (f_info->fltr_act) {
3469 case ICE_FWD_TO_VSI:
3470 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3471 ICE_SINGLE_ACT_VSI_ID_M;
3472 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3473 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3474 ICE_SINGLE_ACT_VALID_BIT;
3476 case ICE_FWD_TO_VSI_LIST:
3477 act |= ICE_SINGLE_ACT_VSI_LIST;
3478 act |= (f_info->fwd_id.vsi_list_id <<
3479 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3480 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3481 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3482 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3483 ICE_SINGLE_ACT_VALID_BIT;
3486 act |= ICE_SINGLE_ACT_TO_Q;
3487 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3488 ICE_SINGLE_ACT_Q_INDEX_M;
3490 case ICE_DROP_PACKET:
3491 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3492 ICE_SINGLE_ACT_VALID_BIT;
3494 case ICE_FWD_TO_QGRP:
3495 q_rgn = f_info->qgrp_size > 0 ?
3496 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3497 act |= ICE_SINGLE_ACT_TO_Q;
3498 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3499 ICE_SINGLE_ACT_Q_INDEX_M;
3500 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3501 ICE_SINGLE_ACT_Q_REGION_M;
3508 act |= ICE_SINGLE_ACT_LB_ENABLE;
3510 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3512 switch (f_info->lkup_type) {
3513 case ICE_SW_LKUP_MAC:
3514 daddr = f_info->l_data.mac.mac_addr;
3516 case ICE_SW_LKUP_VLAN:
3517 vlan_id = f_info->l_data.vlan.vlan_id;
3518 if (f_info->l_data.vlan.tpid_valid)
3519 vlan_tpid = f_info->l_data.vlan.tpid;
3520 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3521 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3522 act |= ICE_SINGLE_ACT_PRUNE;
3523 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3526 case ICE_SW_LKUP_ETHERTYPE_MAC:
3527 daddr = f_info->l_data.ethertype_mac.mac_addr;
3529 case ICE_SW_LKUP_ETHERTYPE:
3530 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3531 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3533 case ICE_SW_LKUP_MAC_VLAN:
3534 daddr = f_info->l_data.mac_vlan.mac_addr;
3535 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3537 case ICE_SW_LKUP_PROMISC_VLAN:
3538 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3540 case ICE_SW_LKUP_PROMISC:
3541 daddr = f_info->l_data.mac_vlan.mac_addr;
3547 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3548 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3549 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3551 /* Recipe set depending on lookup type */
3552 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3553 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3554 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3557 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3558 ICE_NONDMA_TO_NONDMA);
3560 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3561 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3562 *off = CPU_TO_BE16(vlan_id);
3563 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3564 *off = CPU_TO_BE16(vlan_tpid);
3567 /* Create the switch rule with the final dummy Ethernet header */
3568 if (opc != ice_aqc_opc_update_sw_rules)
3569 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3573 * ice_add_marker_act
3574 * @hw: pointer to the hardware structure
3575 * @m_ent: the management entry for which sw marker needs to be added
3576 * @sw_marker: sw marker to tag the Rx descriptor with
3577 * @l_id: large action resource ID
3579 * Create a large action to hold software marker and update the switch rule
3580 * entry pointed by m_ent with newly created large action
3582 static enum ice_status
3583 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3584 u16 sw_marker, u16 l_id)
3586 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3587 /* For software marker we need 3 large actions
3588 * 1. FWD action: FWD TO VSI or VSI LIST
3589 * 2. GENERIC VALUE action to hold the profile ID
3590 * 3. GENERIC VALUE action to hold the software marker ID
3592 const u16 num_lg_acts = 3;
3593 enum ice_status status;
3599 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3600 return ICE_ERR_PARAM;
3602 /* Create two back-to-back switch rules and submit them to the HW using
3603 * one memory buffer:
3607 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3608 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3609 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3611 return ICE_ERR_NO_MEMORY;
3613 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3615 /* Fill in the first switch rule i.e. large action */
3616 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3617 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3618 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3620 /* First action VSI forwarding or VSI list forwarding depending on how
3623 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3624 m_ent->fltr_info.fwd_id.hw_vsi_id;
3626 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3627 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3628 if (m_ent->vsi_count > 1)
3629 act |= ICE_LG_ACT_VSI_LIST;
3630 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3632 /* Second action descriptor type */
3633 act = ICE_LG_ACT_GENERIC;
3635 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3636 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3638 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3639 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3641 /* Third action Marker value */
3642 act |= ICE_LG_ACT_GENERIC;
3643 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3644 ICE_LG_ACT_GENERIC_VALUE_M;
3646 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3648 /* call the fill switch rule to fill the lookup Tx Rx structure */
3649 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3650 ice_aqc_opc_update_sw_rules);
3652 /* Update the action to point to the large action ID */
3653 rx_tx->pdata.lkup_tx_rx.act =
3654 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3655 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3656 ICE_SINGLE_ACT_PTR_VAL_M));
3658 /* Use the filter rule ID of the previously created rule with single
3659 * act. Once the update happens, hardware will treat this as large
3662 rx_tx->pdata.lkup_tx_rx.index =
3663 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3665 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3666 ice_aqc_opc_update_sw_rules, NULL);
3668 m_ent->lg_act_idx = l_id;
3669 m_ent->sw_marker_id = sw_marker;
3672 ice_free(hw, lg_act);
3677 * ice_add_counter_act - add/update filter rule with counter action
3678 * @hw: pointer to the hardware structure
3679 * @m_ent: the management entry for which counter needs to be added
3680 * @counter_id: VLAN counter ID returned as part of allocate resource
3681 * @l_id: large action resource ID
3683 static enum ice_status
3684 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3685 u16 counter_id, u16 l_id)
3687 struct ice_aqc_sw_rules_elem *lg_act;
3688 struct ice_aqc_sw_rules_elem *rx_tx;
3689 enum ice_status status;
3690 /* 2 actions will be added while adding a large action counter */
3691 const int num_acts = 2;
3698 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3699 return ICE_ERR_PARAM;
3701 /* Create two back-to-back switch rules and submit them to the HW using
3702 * one memory buffer:
3706 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3707 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3708 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3710 return ICE_ERR_NO_MEMORY;
3712 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3714 /* Fill in the first switch rule i.e. large action */
3715 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3716 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3717 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3719 /* First action VSI forwarding or VSI list forwarding depending on how
3722 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3723 m_ent->fltr_info.fwd_id.hw_vsi_id;
3725 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3726 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3727 ICE_LG_ACT_VSI_LIST_ID_M;
3728 if (m_ent->vsi_count > 1)
3729 act |= ICE_LG_ACT_VSI_LIST;
3730 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3732 /* Second action counter ID */
3733 act = ICE_LG_ACT_STAT_COUNT;
3734 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3735 ICE_LG_ACT_STAT_COUNT_M;
3736 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3738 /* call the fill switch rule to fill the lookup Tx Rx structure */
3739 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3740 ice_aqc_opc_update_sw_rules);
3742 act = ICE_SINGLE_ACT_PTR;
3743 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3744 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3746 /* Use the filter rule ID of the previously created rule with single
3747 * act. Once the update happens, hardware will treat this as large
3750 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3751 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3753 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3754 ice_aqc_opc_update_sw_rules, NULL);
3756 m_ent->lg_act_idx = l_id;
3757 m_ent->counter_index = counter_id;
3760 ice_free(hw, lg_act);
3765 * ice_create_vsi_list_map
3766 * @hw: pointer to the hardware structure
3767 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3768 * @num_vsi: number of VSI handles in the array
3769 * @vsi_list_id: VSI list ID generated as part of allocate resource
3771 * Helper function to create a new entry of VSI list ID to VSI mapping
3772 * using the given VSI list ID
3774 static struct ice_vsi_list_map_info *
3775 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3778 struct ice_switch_info *sw = hw->switch_info;
3779 struct ice_vsi_list_map_info *v_map;
3782 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3786 v_map->vsi_list_id = vsi_list_id;
3788 for (i = 0; i < num_vsi; i++)
3789 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3791 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3796 * ice_update_vsi_list_rule
3797 * @hw: pointer to the hardware structure
3798 * @vsi_handle_arr: array of VSI handles to form a VSI list
3799 * @num_vsi: number of VSI handles in the array
3800 * @vsi_list_id: VSI list ID generated as part of allocate resource
3801 * @remove: Boolean value to indicate if this is a remove action
3802 * @opc: switch rules population command type - pass in the command opcode
3803 * @lkup_type: lookup type of the filter
3805 * Call AQ command to add a new switch rule or update existing switch rule
3806 * using the given VSI list ID
3808 static enum ice_status
3809 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3810 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3811 enum ice_sw_lkup_type lkup_type)
3813 struct ice_aqc_sw_rules_elem *s_rule;
3814 enum ice_status status;
3820 return ICE_ERR_PARAM;
3822 if (lkup_type == ICE_SW_LKUP_MAC ||
3823 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3824 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3825 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3826 lkup_type == ICE_SW_LKUP_PROMISC ||
3827 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3828 lkup_type == ICE_SW_LKUP_LAST)
3829 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3830 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3831 else if (lkup_type == ICE_SW_LKUP_VLAN)
3832 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3833 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3835 return ICE_ERR_PARAM;
3837 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3838 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3840 return ICE_ERR_NO_MEMORY;
3841 for (i = 0; i < num_vsi; i++) {
3842 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3843 status = ICE_ERR_PARAM;
3846 /* AQ call requires hw_vsi_id(s) */
3847 s_rule->pdata.vsi_list.vsi[i] =
3848 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3851 s_rule->type = CPU_TO_LE16(rule_type);
3852 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3853 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3855 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3858 ice_free(hw, s_rule);
3863 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3864 * @hw: pointer to the HW struct
3865 * @vsi_handle_arr: array of VSI handles to form a VSI list
3866 * @num_vsi: number of VSI handles in the array
3867 * @vsi_list_id: stores the ID of the VSI list to be created
3868 * @lkup_type: switch rule filter's lookup type
3870 static enum ice_status
3871 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3872 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3874 enum ice_status status;
3876 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3877 ice_aqc_opc_alloc_res);
3881 /* Update the newly created VSI list to include the specified VSIs */
3882 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3883 *vsi_list_id, false,
3884 ice_aqc_opc_add_sw_rules, lkup_type);
3888 * ice_create_pkt_fwd_rule
3889 * @hw: pointer to the hardware structure
3890 * @recp_list: corresponding filter management list
3891 * @f_entry: entry containing packet forwarding information
3893 * Create switch rule with given filter information and add an entry
3894 * to the corresponding filter management list to track this switch rule
3897 static enum ice_status
3898 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3899 struct ice_fltr_list_entry *f_entry)
3901 struct ice_fltr_mgmt_list_entry *fm_entry;
3902 struct ice_aqc_sw_rules_elem *s_rule;
3903 enum ice_status status;
3905 s_rule = (struct ice_aqc_sw_rules_elem *)
3906 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3908 return ICE_ERR_NO_MEMORY;
3909 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3910 ice_malloc(hw, sizeof(*fm_entry));
3912 status = ICE_ERR_NO_MEMORY;
3913 goto ice_create_pkt_fwd_rule_exit;
3916 fm_entry->fltr_info = f_entry->fltr_info;
3918 /* Initialize all the fields for the management entry */
3919 fm_entry->vsi_count = 1;
3920 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3921 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3922 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3924 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3925 ice_aqc_opc_add_sw_rules);
3927 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3928 ice_aqc_opc_add_sw_rules, NULL);
3930 ice_free(hw, fm_entry);
3931 goto ice_create_pkt_fwd_rule_exit;
3934 f_entry->fltr_info.fltr_rule_id =
3935 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3936 fm_entry->fltr_info.fltr_rule_id =
3937 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3939 /* The book keeping entries will get removed when base driver
3940 * calls remove filter AQ command
3942 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3944 ice_create_pkt_fwd_rule_exit:
3945 ice_free(hw, s_rule);
3950 * ice_update_pkt_fwd_rule
3951 * @hw: pointer to the hardware structure
3952 * @f_info: filter information for switch rule
3954 * Call AQ command to update a previously created switch rule with a
3957 static enum ice_status
3958 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3960 struct ice_aqc_sw_rules_elem *s_rule;
3961 enum ice_status status;
3963 s_rule = (struct ice_aqc_sw_rules_elem *)
3964 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3966 return ICE_ERR_NO_MEMORY;
3968 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3970 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3972 /* Update switch rule with new rule set to forward VSI list */
3973 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3974 ice_aqc_opc_update_sw_rules, NULL);
3976 ice_free(hw, s_rule);
3981 * ice_update_sw_rule_bridge_mode
3982 * @hw: pointer to the HW struct
3984 * Updates unicast switch filter rules based on VEB/VEPA mode
3986 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3988 struct ice_switch_info *sw = hw->switch_info;
3989 struct ice_fltr_mgmt_list_entry *fm_entry;
3990 enum ice_status status = ICE_SUCCESS;
3991 struct LIST_HEAD_TYPE *rule_head;
3992 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3994 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3995 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3997 ice_acquire_lock(rule_lock);
3998 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4000 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4001 u8 *addr = fi->l_data.mac.mac_addr;
4003 /* Update unicast Tx rules to reflect the selected
4006 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4007 (fi->fltr_act == ICE_FWD_TO_VSI ||
4008 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4009 fi->fltr_act == ICE_FWD_TO_Q ||
4010 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4011 status = ice_update_pkt_fwd_rule(hw, fi);
4017 ice_release_lock(rule_lock);
4023 * ice_add_update_vsi_list
4024 * @hw: pointer to the hardware structure
4025 * @m_entry: pointer to current filter management list entry
4026 * @cur_fltr: filter information from the book keeping entry
4027 * @new_fltr: filter information with the new VSI to be added
4029 * Call AQ command to add or update previously created VSI list with new VSI.
4031 * Helper function to do book keeping associated with adding filter information
4032 * The algorithm to do the book keeping is described below :
4033 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4034 * if only one VSI has been added till now
4035 * Allocate a new VSI list and add two VSIs
4036 * to this list using switch rule command
4037 * Update the previously created switch rule with the
4038 * newly created VSI list ID
4039 * if a VSI list was previously created
4040 * Add the new VSI to the previously created VSI list set
4041 * using the update switch rule command
4043 static enum ice_status
4044 ice_add_update_vsi_list(struct ice_hw *hw,
4045 struct ice_fltr_mgmt_list_entry *m_entry,
4046 struct ice_fltr_info *cur_fltr,
4047 struct ice_fltr_info *new_fltr)
4049 enum ice_status status = ICE_SUCCESS;
4050 u16 vsi_list_id = 0;
4052 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4053 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4054 return ICE_ERR_NOT_IMPL;
4056 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4057 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4058 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4059 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4060 return ICE_ERR_NOT_IMPL;
4062 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4063 /* Only one entry existed in the mapping and it was not already
4064 * a part of a VSI list. So, create a VSI list with the old and
4067 struct ice_fltr_info tmp_fltr;
4068 u16 vsi_handle_arr[2];
4070 /* A rule already exists with the new VSI being added */
4071 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4072 return ICE_ERR_ALREADY_EXISTS;
4074 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4075 vsi_handle_arr[1] = new_fltr->vsi_handle;
4076 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4078 new_fltr->lkup_type);
4082 tmp_fltr = *new_fltr;
4083 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4084 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4085 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4086 /* Update the previous switch rule of "MAC forward to VSI" to
4087 * "MAC fwd to VSI list"
4089 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4093 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4094 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4095 m_entry->vsi_list_info =
4096 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4099 if (!m_entry->vsi_list_info)
4100 return ICE_ERR_NO_MEMORY;
4102 /* If this entry was large action then the large action needs
4103 * to be updated to point to FWD to VSI list
4105 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4107 ice_add_marker_act(hw, m_entry,
4108 m_entry->sw_marker_id,
4109 m_entry->lg_act_idx);
4111 u16 vsi_handle = new_fltr->vsi_handle;
4112 enum ice_adminq_opc opcode;
4114 if (!m_entry->vsi_list_info)
4117 /* A rule already exists with the new VSI being added */
4118 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4121 /* Update the previously created VSI list set with
4122 * the new VSI ID passed in
4124 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4125 opcode = ice_aqc_opc_update_sw_rules;
4127 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4128 vsi_list_id, false, opcode,
4129 new_fltr->lkup_type);
4130 /* update VSI list mapping info with new VSI ID */
4132 ice_set_bit(vsi_handle,
4133 m_entry->vsi_list_info->vsi_map);
4136 m_entry->vsi_count++;
4141 * ice_find_rule_entry - Search a rule entry
4142 * @list_head: head of rule list
4143 * @f_info: rule information
4145 * Helper function to search for a given rule entry
4146 * Returns pointer to entry storing the rule if found
4148 static struct ice_fltr_mgmt_list_entry *
4149 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4150 struct ice_fltr_info *f_info)
4152 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4154 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4156 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4157 sizeof(f_info->l_data)) &&
4158 f_info->flag == list_itr->fltr_info.flag) {
4167 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4168 * @recp_list: VSI lists needs to be searched
4169 * @vsi_handle: VSI handle to be found in VSI list
4170 * @vsi_list_id: VSI list ID found containing vsi_handle
4172 * Helper function to search a VSI list with single entry containing given VSI
4173 * handle element. This can be extended further to search VSI list with more
4174 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4176 static struct ice_vsi_list_map_info *
4177 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4180 struct ice_vsi_list_map_info *map_info = NULL;
4181 struct LIST_HEAD_TYPE *list_head;
4183 list_head = &recp_list->filt_rules;
4184 if (recp_list->adv_rule) {
4185 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4187 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4188 ice_adv_fltr_mgmt_list_entry,
4190 if (list_itr->vsi_list_info) {
4191 map_info = list_itr->vsi_list_info;
4192 if (ice_is_bit_set(map_info->vsi_map,
4194 *vsi_list_id = map_info->vsi_list_id;
4200 struct ice_fltr_mgmt_list_entry *list_itr;
4202 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4203 ice_fltr_mgmt_list_entry,
4205 if (list_itr->vsi_count == 1 &&
4206 list_itr->vsi_list_info) {
4207 map_info = list_itr->vsi_list_info;
4208 if (ice_is_bit_set(map_info->vsi_map,
4210 *vsi_list_id = map_info->vsi_list_id;
4220 * ice_add_rule_internal - add rule for a given lookup type
4221 * @hw: pointer to the hardware structure
4222 * @recp_list: recipe list for which rule has to be added
4223 * @lport: logic port number on which function add rule
4224 * @f_entry: structure containing MAC forwarding information
4226 * Adds or updates the rule lists for a given recipe
4228 static enum ice_status
4229 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4230 u8 lport, struct ice_fltr_list_entry *f_entry)
4232 struct ice_fltr_info *new_fltr, *cur_fltr;
4233 struct ice_fltr_mgmt_list_entry *m_entry;
4234 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4235 enum ice_status status = ICE_SUCCESS;
4237 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4238 return ICE_ERR_PARAM;
4240 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4241 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4242 f_entry->fltr_info.fwd_id.hw_vsi_id =
4243 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4245 rule_lock = &recp_list->filt_rule_lock;
4247 ice_acquire_lock(rule_lock);
4248 new_fltr = &f_entry->fltr_info;
4249 if (new_fltr->flag & ICE_FLTR_RX)
4250 new_fltr->src = lport;
4251 else if (new_fltr->flag & ICE_FLTR_TX)
4253 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4255 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4257 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4258 goto exit_add_rule_internal;
4261 cur_fltr = &m_entry->fltr_info;
4262 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4264 exit_add_rule_internal:
4265 ice_release_lock(rule_lock);
4270 * ice_remove_vsi_list_rule
4271 * @hw: pointer to the hardware structure
4272 * @vsi_list_id: VSI list ID generated as part of allocate resource
4273 * @lkup_type: switch rule filter lookup type
4275 * The VSI list should be emptied before this function is called to remove the
4278 static enum ice_status
4279 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4280 enum ice_sw_lkup_type lkup_type)
4282 /* Free the vsi_list resource that we allocated. It is assumed that the
4283 * list is empty at this point.
4285 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4286 ice_aqc_opc_free_res);
4290 * ice_rem_update_vsi_list
4291 * @hw: pointer to the hardware structure
4292 * @vsi_handle: VSI handle of the VSI to remove
4293 * @fm_list: filter management entry for which the VSI list management needs to
4296 static enum ice_status
4297 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4298 struct ice_fltr_mgmt_list_entry *fm_list)
4300 enum ice_sw_lkup_type lkup_type;
4301 enum ice_status status = ICE_SUCCESS;
4304 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4305 fm_list->vsi_count == 0)
4306 return ICE_ERR_PARAM;
4308 /* A rule with the VSI being removed does not exist */
4309 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4310 return ICE_ERR_DOES_NOT_EXIST;
4312 lkup_type = fm_list->fltr_info.lkup_type;
4313 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4314 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4315 ice_aqc_opc_update_sw_rules,
4320 fm_list->vsi_count--;
4321 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4323 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4324 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4325 struct ice_vsi_list_map_info *vsi_list_info =
4326 fm_list->vsi_list_info;
4329 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4331 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4332 return ICE_ERR_OUT_OF_RANGE;
4334 /* Make sure VSI list is empty before removing it below */
4335 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4337 ice_aqc_opc_update_sw_rules,
4342 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4343 tmp_fltr_info.fwd_id.hw_vsi_id =
4344 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4345 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4346 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4348 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4349 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4353 fm_list->fltr_info = tmp_fltr_info;
4356 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4357 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4358 struct ice_vsi_list_map_info *vsi_list_info =
4359 fm_list->vsi_list_info;
4361 /* Remove the VSI list since it is no longer used */
4362 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4364 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4365 vsi_list_id, status);
4369 LIST_DEL(&vsi_list_info->list_entry);
4370 ice_free(hw, vsi_list_info);
4371 fm_list->vsi_list_info = NULL;
4378 * ice_remove_rule_internal - Remove a filter rule of a given type
4380 * @hw: pointer to the hardware structure
4381 * @recp_list: recipe list for which the rule needs to removed
4382 * @f_entry: rule entry containing filter information
4384 static enum ice_status
4385 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4386 struct ice_fltr_list_entry *f_entry)
4388 struct ice_fltr_mgmt_list_entry *list_elem;
4389 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4390 enum ice_status status = ICE_SUCCESS;
4391 bool remove_rule = false;
4394 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4395 return ICE_ERR_PARAM;
4396 f_entry->fltr_info.fwd_id.hw_vsi_id =
4397 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4399 rule_lock = &recp_list->filt_rule_lock;
4400 ice_acquire_lock(rule_lock);
4401 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4402 &f_entry->fltr_info);
4404 status = ICE_ERR_DOES_NOT_EXIST;
4408 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4410 } else if (!list_elem->vsi_list_info) {
4411 status = ICE_ERR_DOES_NOT_EXIST;
4413 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4414 /* a ref_cnt > 1 indicates that the vsi_list is being
4415 * shared by multiple rules. Decrement the ref_cnt and
4416 * remove this rule, but do not modify the list, as it
4417 * is in-use by other rules.
4419 list_elem->vsi_list_info->ref_cnt--;
4422 /* a ref_cnt of 1 indicates the vsi_list is only used
4423 * by one rule. However, the original removal request is only
4424 * for a single VSI. Update the vsi_list first, and only
4425 * remove the rule if there are no further VSIs in this list.
4427 vsi_handle = f_entry->fltr_info.vsi_handle;
4428 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4431 /* if VSI count goes to zero after updating the VSI list */
4432 if (list_elem->vsi_count == 0)
4437 /* Remove the lookup rule */
4438 struct ice_aqc_sw_rules_elem *s_rule;
4440 s_rule = (struct ice_aqc_sw_rules_elem *)
4441 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4443 status = ICE_ERR_NO_MEMORY;
4447 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4448 ice_aqc_opc_remove_sw_rules);
4450 status = ice_aq_sw_rules(hw, s_rule,
4451 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4452 ice_aqc_opc_remove_sw_rules, NULL);
4454 /* Remove a book keeping from the list */
4455 ice_free(hw, s_rule);
4460 LIST_DEL(&list_elem->list_entry);
4461 ice_free(hw, list_elem);
4464 ice_release_lock(rule_lock);
4469 * ice_aq_get_res_alloc - get allocated resources
4470 * @hw: pointer to the HW struct
4471 * @num_entries: pointer to u16 to store the number of resource entries returned
4472 * @buf: pointer to buffer
4473 * @buf_size: size of buf
4474 * @cd: pointer to command details structure or NULL
4476 * The caller-supplied buffer must be large enough to store the resource
4477 * information for all resource types. Each resource type is an
4478 * ice_aqc_get_res_resp_elem structure.
4481 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4482 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4483 struct ice_sq_cd *cd)
4485 struct ice_aqc_get_res_alloc *resp;
4486 enum ice_status status;
4487 struct ice_aq_desc desc;
4490 return ICE_ERR_BAD_PTR;
4492 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4493 return ICE_ERR_INVAL_SIZE;
4495 resp = &desc.params.get_res;
4497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4498 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4500 if (!status && num_entries)
4501 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4507 * ice_aq_get_res_descs - get allocated resource descriptors
4508 * @hw: pointer to the hardware structure
4509 * @num_entries: number of resource entries in buffer
4510 * @buf: structure to hold response data buffer
4511 * @buf_size: size of buffer
4512 * @res_type: resource type
4513 * @res_shared: is resource shared
4514 * @desc_id: input - first desc ID to start; output - next desc ID
4515 * @cd: pointer to command details structure or NULL
4518 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4519 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4520 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4522 struct ice_aqc_get_allocd_res_desc *cmd;
4523 struct ice_aq_desc desc;
4524 enum ice_status status;
4526 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4528 cmd = &desc.params.get_res_desc;
4531 return ICE_ERR_PARAM;
4533 if (buf_size != (num_entries * sizeof(*buf)))
4534 return ICE_ERR_PARAM;
4536 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4538 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4539 ICE_AQC_RES_TYPE_M) | (res_shared ?
4540 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4541 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4543 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4545 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4551 * ice_add_mac_rule - Add a MAC address based filter rule
4552 * @hw: pointer to the hardware structure
4553 * @m_list: list of MAC addresses and forwarding information
4554 * @sw: pointer to switch info struct for which function add rule
4555 * @lport: logic port number on which function add rule
4557 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4558 * multiple unicast addresses, the function assumes that all the
4559 * addresses are unique in a given add_mac call. It doesn't
4560 * check for duplicates in this case, removing duplicates from a given
4561 * list should be taken care of in the caller of this function.
4563 static enum ice_status
4564 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4565 struct ice_switch_info *sw, u8 lport)
4567 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4568 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4569 struct ice_fltr_list_entry *m_list_itr;
4570 struct LIST_HEAD_TYPE *rule_head;
4571 u16 total_elem_left, s_rule_size;
4572 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4573 enum ice_status status = ICE_SUCCESS;
4574 u16 num_unicast = 0;
4578 rule_lock = &recp_list->filt_rule_lock;
4579 rule_head = &recp_list->filt_rules;
4581 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4583 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4587 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4588 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4589 if (!ice_is_vsi_valid(hw, vsi_handle))
4590 return ICE_ERR_PARAM;
4591 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4592 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4593 /* update the src in case it is VSI num */
4594 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4595 return ICE_ERR_PARAM;
4596 m_list_itr->fltr_info.src = hw_vsi_id;
4597 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4598 IS_ZERO_ETHER_ADDR(add))
4599 return ICE_ERR_PARAM;
4600 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4601 /* Don't overwrite the unicast address */
4602 ice_acquire_lock(rule_lock);
4603 if (ice_find_rule_entry(rule_head,
4604 &m_list_itr->fltr_info)) {
4605 ice_release_lock(rule_lock);
4606 return ICE_ERR_ALREADY_EXISTS;
4608 ice_release_lock(rule_lock);
4610 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4611 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4612 m_list_itr->status =
4613 ice_add_rule_internal(hw, recp_list, lport,
4615 if (m_list_itr->status)
4616 return m_list_itr->status;
4620 ice_acquire_lock(rule_lock);
4621 /* Exit if no suitable entries were found for adding bulk switch rule */
4623 status = ICE_SUCCESS;
4624 goto ice_add_mac_exit;
4627 /* Allocate switch rule buffer for the bulk update for unicast */
4628 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4629 s_rule = (struct ice_aqc_sw_rules_elem *)
4630 ice_calloc(hw, num_unicast, s_rule_size);
4632 status = ICE_ERR_NO_MEMORY;
4633 goto ice_add_mac_exit;
4637 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4639 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4640 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4642 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4643 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4644 ice_aqc_opc_add_sw_rules);
4645 r_iter = (struct ice_aqc_sw_rules_elem *)
4646 ((u8 *)r_iter + s_rule_size);
4650 /* Call AQ bulk switch rule update for all unicast addresses */
4652 /* Call AQ switch rule in AQ_MAX chunk */
4653 for (total_elem_left = num_unicast; total_elem_left > 0;
4654 total_elem_left -= elem_sent) {
4655 struct ice_aqc_sw_rules_elem *entry = r_iter;
4657 elem_sent = MIN_T(u8, total_elem_left,
4658 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4659 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4660 elem_sent, ice_aqc_opc_add_sw_rules,
4663 goto ice_add_mac_exit;
4664 r_iter = (struct ice_aqc_sw_rules_elem *)
4665 ((u8 *)r_iter + (elem_sent * s_rule_size));
4668 /* Fill up rule ID based on the value returned from FW */
4670 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4672 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4673 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4674 struct ice_fltr_mgmt_list_entry *fm_entry;
4676 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4677 f_info->fltr_rule_id =
4678 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4679 f_info->fltr_act = ICE_FWD_TO_VSI;
4680 /* Create an entry to track this MAC address */
4681 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4682 ice_malloc(hw, sizeof(*fm_entry));
4684 status = ICE_ERR_NO_MEMORY;
4685 goto ice_add_mac_exit;
4687 fm_entry->fltr_info = *f_info;
4688 fm_entry->vsi_count = 1;
4689 /* The book keeping entries will get removed when
4690 * base driver calls remove filter AQ command
4693 LIST_ADD(&fm_entry->list_entry, rule_head);
4694 r_iter = (struct ice_aqc_sw_rules_elem *)
4695 ((u8 *)r_iter + s_rule_size);
4700 ice_release_lock(rule_lock);
4702 ice_free(hw, s_rule);
4707 * ice_add_mac - Add a MAC address based filter rule
4708 * @hw: pointer to the hardware structure
4709 * @m_list: list of MAC addresses and forwarding information
4711 * Function add MAC rule for logical port from HW struct
4713 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4716 return ICE_ERR_PARAM;
4718 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4719 hw->port_info->lport);
4723 * ice_add_vlan_internal - Add one VLAN based filter rule
4724 * @hw: pointer to the hardware structure
4725 * @recp_list: recipe list for which rule has to be added
4726 * @f_entry: filter entry containing one VLAN information
4728 static enum ice_status
4729 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4730 struct ice_fltr_list_entry *f_entry)
4732 struct ice_fltr_mgmt_list_entry *v_list_itr;
4733 struct ice_fltr_info *new_fltr, *cur_fltr;
4734 enum ice_sw_lkup_type lkup_type;
4735 u16 vsi_list_id = 0, vsi_handle;
4736 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4737 enum ice_status status = ICE_SUCCESS;
4739 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4740 return ICE_ERR_PARAM;
4742 f_entry->fltr_info.fwd_id.hw_vsi_id =
4743 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4744 new_fltr = &f_entry->fltr_info;
4746 /* VLAN ID should only be 12 bits */
4747 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4748 return ICE_ERR_PARAM;
4750 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4751 return ICE_ERR_PARAM;
4753 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4754 lkup_type = new_fltr->lkup_type;
4755 vsi_handle = new_fltr->vsi_handle;
4756 rule_lock = &recp_list->filt_rule_lock;
4757 ice_acquire_lock(rule_lock);
4758 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4760 struct ice_vsi_list_map_info *map_info = NULL;
4762 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4763 /* All VLAN pruning rules use a VSI list. Check if
4764 * there is already a VSI list containing VSI that we
4765 * want to add. If found, use the same vsi_list_id for
4766 * this new VLAN rule or else create a new list.
4768 map_info = ice_find_vsi_list_entry(recp_list,
4772 status = ice_create_vsi_list_rule(hw,
4780 /* Convert the action to forwarding to a VSI list. */
4781 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4782 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4785 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4787 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4790 status = ICE_ERR_DOES_NOT_EXIST;
4793 /* reuse VSI list for new rule and increment ref_cnt */
4795 v_list_itr->vsi_list_info = map_info;
4796 map_info->ref_cnt++;
4798 v_list_itr->vsi_list_info =
4799 ice_create_vsi_list_map(hw, &vsi_handle,
4803 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4804 /* Update existing VSI list to add new VSI ID only if it used
4807 cur_fltr = &v_list_itr->fltr_info;
4808 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4811 /* If VLAN rule exists and VSI list being used by this rule is
4812 * referenced by more than 1 VLAN rule. Then create a new VSI
4813 * list appending previous VSI with new VSI and update existing
4814 * VLAN rule to point to new VSI list ID
4816 struct ice_fltr_info tmp_fltr;
4817 u16 vsi_handle_arr[2];
4820 /* Current implementation only supports reusing VSI list with
4821 * one VSI count. We should never hit below condition
4823 if (v_list_itr->vsi_count > 1 &&
4824 v_list_itr->vsi_list_info->ref_cnt > 1) {
4825 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4826 status = ICE_ERR_CFG;
4831 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4834 /* A rule already exists with the new VSI being added */
4835 if (cur_handle == vsi_handle) {
4836 status = ICE_ERR_ALREADY_EXISTS;
4840 vsi_handle_arr[0] = cur_handle;
4841 vsi_handle_arr[1] = vsi_handle;
4842 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4843 &vsi_list_id, lkup_type);
4847 tmp_fltr = v_list_itr->fltr_info;
4848 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4849 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4850 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4851 /* Update the previous switch rule to a new VSI list which
4852 * includes current VSI that is requested
4854 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4858 /* before overriding VSI list map info. decrement ref_cnt of
4861 v_list_itr->vsi_list_info->ref_cnt--;
4863 /* now update to newly created list */
4864 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4865 v_list_itr->vsi_list_info =
4866 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4868 v_list_itr->vsi_count++;
4872 ice_release_lock(rule_lock);
4877 * ice_add_vlan_rule - Add VLAN based filter rule
4878 * @hw: pointer to the hardware structure
4879 * @v_list: list of VLAN entries and forwarding information
4880 * @sw: pointer to switch info struct for which function add rule
4882 static enum ice_status
4883 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4884 struct ice_switch_info *sw)
4886 struct ice_fltr_list_entry *v_list_itr;
4887 struct ice_sw_recipe *recp_list;
4889 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4890 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4892 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4893 return ICE_ERR_PARAM;
4894 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4895 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4897 if (v_list_itr->status)
4898 return v_list_itr->status;
4904 * ice_add_vlan - Add a VLAN based filter rule
4905 * @hw: pointer to the hardware structure
4906 * @v_list: list of VLAN and forwarding information
4908 * Function add VLAN rule for logical port from HW struct
4910 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4913 return ICE_ERR_PARAM;
4915 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4919 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4920 * @hw: pointer to the hardware structure
4921 * @mv_list: list of MAC and VLAN filters
4922 * @sw: pointer to switch info struct for which function add rule
4923 * @lport: logic port number on which function add rule
4925 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4926 * pruning bits enabled, then it is the responsibility of the caller to make
4927 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4928 * VLAN won't be received on that VSI otherwise.
4930 static enum ice_status
4931 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4932 struct ice_switch_info *sw, u8 lport)
4934 struct ice_fltr_list_entry *mv_list_itr;
4935 struct ice_sw_recipe *recp_list;
4937 if (!mv_list || !hw)
4938 return ICE_ERR_PARAM;
4940 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4941 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4943 enum ice_sw_lkup_type l_type =
4944 mv_list_itr->fltr_info.lkup_type;
4946 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4947 return ICE_ERR_PARAM;
4948 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4949 mv_list_itr->status =
4950 ice_add_rule_internal(hw, recp_list, lport,
4952 if (mv_list_itr->status)
4953 return mv_list_itr->status;
4959 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4960 * @hw: pointer to the hardware structure
4961 * @mv_list: list of MAC VLAN addresses and forwarding information
4963 * Function add MAC VLAN rule for logical port from HW struct
4966 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4968 if (!mv_list || !hw)
4969 return ICE_ERR_PARAM;
4971 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4972 hw->port_info->lport);
4976 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4977 * @hw: pointer to the hardware structure
4978 * @em_list: list of ether type MAC filter, MAC is optional
4979 * @sw: pointer to switch info struct for which function add rule
4980 * @lport: logic port number on which function add rule
4982 * This function requires the caller to populate the entries in
4983 * the filter list with the necessary fields (including flags to
4984 * indicate Tx or Rx rules).
4986 static enum ice_status
4987 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4988 struct ice_switch_info *sw, u8 lport)
4990 struct ice_fltr_list_entry *em_list_itr;
4992 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4994 struct ice_sw_recipe *recp_list;
4995 enum ice_sw_lkup_type l_type;
4997 l_type = em_list_itr->fltr_info.lkup_type;
4998 recp_list = &sw->recp_list[l_type];
5000 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5001 l_type != ICE_SW_LKUP_ETHERTYPE)
5002 return ICE_ERR_PARAM;
5004 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5007 if (em_list_itr->status)
5008 return em_list_itr->status;
5014 * ice_add_eth_mac - Add a ethertype based filter rule
5015 * @hw: pointer to the hardware structure
5016 * @em_list: list of ethertype and forwarding information
5018 * Function add ethertype rule for logical port from HW struct
5021 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5023 if (!em_list || !hw)
5024 return ICE_ERR_PARAM;
5026 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5027 hw->port_info->lport);
5031 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5032 * @hw: pointer to the hardware structure
5033 * @em_list: list of ethertype or ethertype MAC entries
5034 * @sw: pointer to switch info struct for which function add rule
5036 static enum ice_status
5037 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5038 struct ice_switch_info *sw)
5040 struct ice_fltr_list_entry *em_list_itr, *tmp;
5042 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5044 struct ice_sw_recipe *recp_list;
5045 enum ice_sw_lkup_type l_type;
5047 l_type = em_list_itr->fltr_info.lkup_type;
5049 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5050 l_type != ICE_SW_LKUP_ETHERTYPE)
5051 return ICE_ERR_PARAM;
5053 recp_list = &sw->recp_list[l_type];
5054 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5056 if (em_list_itr->status)
5057 return em_list_itr->status;
5063 * ice_remove_eth_mac - remove a ethertype based filter rule
5064 * @hw: pointer to the hardware structure
5065 * @em_list: list of ethertype and forwarding information
5069 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5071 if (!em_list || !hw)
5072 return ICE_ERR_PARAM;
5074 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5078 * ice_rem_sw_rule_info
5079 * @hw: pointer to the hardware structure
5080 * @rule_head: pointer to the switch list structure that we want to delete
5083 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5085 if (!LIST_EMPTY(rule_head)) {
5086 struct ice_fltr_mgmt_list_entry *entry;
5087 struct ice_fltr_mgmt_list_entry *tmp;
5089 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5090 ice_fltr_mgmt_list_entry, list_entry) {
5091 LIST_DEL(&entry->list_entry);
5092 ice_free(hw, entry);
5098 * ice_rem_adv_rule_info
5099 * @hw: pointer to the hardware structure
5100 * @rule_head: pointer to the switch list structure that we want to delete
5103 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5105 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5106 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5108 if (LIST_EMPTY(rule_head))
5111 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5112 ice_adv_fltr_mgmt_list_entry, list_entry) {
5113 LIST_DEL(&lst_itr->list_entry);
5114 ice_free(hw, lst_itr->lkups);
5115 ice_free(hw, lst_itr);
5120 * ice_rem_all_sw_rules_info
5121 * @hw: pointer to the hardware structure
5123 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5125 struct ice_switch_info *sw = hw->switch_info;
5128 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5129 struct LIST_HEAD_TYPE *rule_head;
5131 rule_head = &sw->recp_list[i].filt_rules;
5132 if (!sw->recp_list[i].adv_rule)
5133 ice_rem_sw_rule_info(hw, rule_head);
5135 ice_rem_adv_rule_info(hw, rule_head);
5136 if (sw->recp_list[i].adv_rule &&
5137 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5138 sw->recp_list[i].adv_rule = false;
5143 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5144 * @pi: pointer to the port_info structure
5145 * @vsi_handle: VSI handle to set as default
5146 * @set: true to add the above mentioned switch rule, false to remove it
5147 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5149 * add filter rule to set/unset given VSI as default VSI for the switch
5150 * (represented by swid)
5153 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5156 struct ice_aqc_sw_rules_elem *s_rule;
5157 struct ice_fltr_info f_info;
5158 struct ice_hw *hw = pi->hw;
5159 enum ice_adminq_opc opcode;
5160 enum ice_status status;
5164 if (!ice_is_vsi_valid(hw, vsi_handle))
5165 return ICE_ERR_PARAM;
5166 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5168 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5169 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5171 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5173 return ICE_ERR_NO_MEMORY;
5175 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5177 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5178 f_info.flag = direction;
5179 f_info.fltr_act = ICE_FWD_TO_VSI;
5180 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5182 if (f_info.flag & ICE_FLTR_RX) {
5183 f_info.src = pi->lport;
5184 f_info.src_id = ICE_SRC_ID_LPORT;
5186 f_info.fltr_rule_id =
5187 pi->dflt_rx_vsi_rule_id;
5188 } else if (f_info.flag & ICE_FLTR_TX) {
5189 f_info.src_id = ICE_SRC_ID_VSI;
5190 f_info.src = hw_vsi_id;
5192 f_info.fltr_rule_id =
5193 pi->dflt_tx_vsi_rule_id;
5197 opcode = ice_aqc_opc_add_sw_rules;
5199 opcode = ice_aqc_opc_remove_sw_rules;
5201 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5203 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5204 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5207 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5209 if (f_info.flag & ICE_FLTR_TX) {
5210 pi->dflt_tx_vsi_num = hw_vsi_id;
5211 pi->dflt_tx_vsi_rule_id = index;
5212 } else if (f_info.flag & ICE_FLTR_RX) {
5213 pi->dflt_rx_vsi_num = hw_vsi_id;
5214 pi->dflt_rx_vsi_rule_id = index;
5217 if (f_info.flag & ICE_FLTR_TX) {
5218 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5219 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5220 } else if (f_info.flag & ICE_FLTR_RX) {
5221 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5222 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5227 ice_free(hw, s_rule);
5232 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5233 * @list_head: head of rule list
5234 * @f_info: rule information
5236 * Helper function to search for a unicast rule entry - this is to be used
5237 * to remove unicast MAC filter that is not shared with other VSIs on the
5240 * Returns pointer to entry storing the rule if found
5242 static struct ice_fltr_mgmt_list_entry *
5243 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5244 struct ice_fltr_info *f_info)
5246 struct ice_fltr_mgmt_list_entry *list_itr;
5248 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5250 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5251 sizeof(f_info->l_data)) &&
5252 f_info->fwd_id.hw_vsi_id ==
5253 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5254 f_info->flag == list_itr->fltr_info.flag)
5261 * ice_remove_mac_rule - remove a MAC based filter rule
5262 * @hw: pointer to the hardware structure
5263 * @m_list: list of MAC addresses and forwarding information
5264 * @recp_list: list from which function remove MAC address
5266 * This function removes either a MAC filter rule or a specific VSI from a
5267 * VSI list for a multicast MAC address.
5269 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5270 * ice_add_mac. Caller should be aware that this call will only work if all
5271 * the entries passed into m_list were added previously. It will not attempt to
5272 * do a partial remove of entries that were found.
5274 static enum ice_status
5275 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5276 struct ice_sw_recipe *recp_list)
5278 struct ice_fltr_list_entry *list_itr, *tmp;
5279 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5282 return ICE_ERR_PARAM;
5284 rule_lock = &recp_list->filt_rule_lock;
5285 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5287 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5288 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5291 if (l_type != ICE_SW_LKUP_MAC)
5292 return ICE_ERR_PARAM;
5294 vsi_handle = list_itr->fltr_info.vsi_handle;
5295 if (!ice_is_vsi_valid(hw, vsi_handle))
5296 return ICE_ERR_PARAM;
5298 list_itr->fltr_info.fwd_id.hw_vsi_id =
5299 ice_get_hw_vsi_num(hw, vsi_handle);
5300 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5301 /* Don't remove the unicast address that belongs to
5302 * another VSI on the switch, since it is not being
5305 ice_acquire_lock(rule_lock);
5306 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5307 &list_itr->fltr_info)) {
5308 ice_release_lock(rule_lock);
5309 return ICE_ERR_DOES_NOT_EXIST;
5311 ice_release_lock(rule_lock);
5313 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5315 if (list_itr->status)
5316 return list_itr->status;
5322 * ice_remove_mac - remove a MAC address based filter rule
5323 * @hw: pointer to the hardware structure
5324 * @m_list: list of MAC addresses and forwarding information
5327 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5329 struct ice_sw_recipe *recp_list;
5331 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5332 return ice_remove_mac_rule(hw, m_list, recp_list);
5336 * ice_remove_vlan_rule - Remove VLAN based filter rule
5337 * @hw: pointer to the hardware structure
5338 * @v_list: list of VLAN entries and forwarding information
5339 * @recp_list: list from which function remove VLAN
5341 static enum ice_status
5342 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5343 struct ice_sw_recipe *recp_list)
5345 struct ice_fltr_list_entry *v_list_itr, *tmp;
5347 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5349 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5351 if (l_type != ICE_SW_LKUP_VLAN)
5352 return ICE_ERR_PARAM;
5353 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5355 if (v_list_itr->status)
5356 return v_list_itr->status;
5362 * ice_remove_vlan - remove a VLAN address based filter rule
5363 * @hw: pointer to the hardware structure
5364 * @v_list: list of VLAN and forwarding information
5368 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5370 struct ice_sw_recipe *recp_list;
5373 return ICE_ERR_PARAM;
5375 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5376 return ice_remove_vlan_rule(hw, v_list, recp_list);
5380 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5381 * @hw: pointer to the hardware structure
5382 * @v_list: list of MAC VLAN entries and forwarding information
5383 * @recp_list: list from which function remove MAC VLAN
5385 static enum ice_status
5386 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5387 struct ice_sw_recipe *recp_list)
5389 struct ice_fltr_list_entry *v_list_itr, *tmp;
5391 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5392 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5394 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5396 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5397 return ICE_ERR_PARAM;
5398 v_list_itr->status =
5399 ice_remove_rule_internal(hw, recp_list,
5401 if (v_list_itr->status)
5402 return v_list_itr->status;
5408 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5409 * @hw: pointer to the hardware structure
5410 * @mv_list: list of MAC VLAN and forwarding information
5413 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5415 struct ice_sw_recipe *recp_list;
5417 if (!mv_list || !hw)
5418 return ICE_ERR_PARAM;
5420 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5421 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5425 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5426 * @fm_entry: filter entry to inspect
5427 * @vsi_handle: VSI handle to compare with filter info
5430 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5432 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5433 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5434 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5435 fm_entry->vsi_list_info &&
5436 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5441 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5442 * @hw: pointer to the hardware structure
5443 * @vsi_handle: VSI handle to remove filters from
5444 * @vsi_list_head: pointer to the list to add entry to
5445 * @fi: pointer to fltr_info of filter entry to copy & add
5447 * Helper function, used when creating a list of filters to remove from
5448 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5449 * original filter entry, with the exception of fltr_info.fltr_act and
5450 * fltr_info.fwd_id fields. These are set such that later logic can
5451 * extract which VSI to remove the fltr from, and pass on that information.
5453 static enum ice_status
5454 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5455 struct LIST_HEAD_TYPE *vsi_list_head,
5456 struct ice_fltr_info *fi)
5458 struct ice_fltr_list_entry *tmp;
5460 /* this memory is freed up in the caller function
5461 * once filters for this VSI are removed
5463 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5465 return ICE_ERR_NO_MEMORY;
5467 tmp->fltr_info = *fi;
5469 /* Overwrite these fields to indicate which VSI to remove filter from,
5470 * so find and remove logic can extract the information from the
5471 * list entries. Note that original entries will still have proper
5474 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5475 tmp->fltr_info.vsi_handle = vsi_handle;
5476 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5478 LIST_ADD(&tmp->list_entry, vsi_list_head);
5484 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5485 * @hw: pointer to the hardware structure
5486 * @vsi_handle: VSI handle to remove filters from
5487 * @lkup_list_head: pointer to the list that has certain lookup type filters
5488 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5490 * Locates all filters in lkup_list_head that are used by the given VSI,
5491 * and adds COPIES of those entries to vsi_list_head (intended to be used
5492 * to remove the listed filters).
5493 * Note that this means all entries in vsi_list_head must be explicitly
5494 * deallocated by the caller when done with list.
5496 static enum ice_status
5497 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5498 struct LIST_HEAD_TYPE *lkup_list_head,
5499 struct LIST_HEAD_TYPE *vsi_list_head)
5501 struct ice_fltr_mgmt_list_entry *fm_entry;
5502 enum ice_status status = ICE_SUCCESS;
5504 /* check to make sure VSI ID is valid and within boundary */
5505 if (!ice_is_vsi_valid(hw, vsi_handle))
5506 return ICE_ERR_PARAM;
5508 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5509 ice_fltr_mgmt_list_entry, list_entry) {
5510 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5513 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5515 &fm_entry->fltr_info);
5523 * ice_determine_promisc_mask
5524 * @fi: filter info to parse
5526 * Helper function to determine which ICE_PROMISC_ mask corresponds
5527 * to given filter into.
5529 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5531 u16 vid = fi->l_data.mac_vlan.vlan_id;
5532 u8 *macaddr = fi->l_data.mac.mac_addr;
5533 bool is_tx_fltr = false;
5534 u8 promisc_mask = 0;
5536 if (fi->flag == ICE_FLTR_TX)
5539 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5540 promisc_mask |= is_tx_fltr ?
5541 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5542 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5543 promisc_mask |= is_tx_fltr ?
5544 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5545 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5546 promisc_mask |= is_tx_fltr ?
5547 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5549 promisc_mask |= is_tx_fltr ?
5550 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5552 return promisc_mask;
5556 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5557 * @hw: pointer to the hardware structure
5558 * @vsi_handle: VSI handle to retrieve info from
5559 * @promisc_mask: pointer to mask to be filled in
5560 * @vid: VLAN ID of promisc VLAN VSI
5561 * @sw: pointer to switch info struct for which function add rule
5563 static enum ice_status
5564 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5565 u16 *vid, struct ice_switch_info *sw)
5567 struct ice_fltr_mgmt_list_entry *itr;
5568 struct LIST_HEAD_TYPE *rule_head;
5569 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5571 if (!ice_is_vsi_valid(hw, vsi_handle))
5572 return ICE_ERR_PARAM;
5576 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5577 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5579 ice_acquire_lock(rule_lock);
5580 LIST_FOR_EACH_ENTRY(itr, rule_head,
5581 ice_fltr_mgmt_list_entry, list_entry) {
5582 /* Continue if this filter doesn't apply to this VSI or the
5583 * VSI ID is not in the VSI map for this filter
5585 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5588 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5590 ice_release_lock(rule_lock);
5596 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5597 * @hw: pointer to the hardware structure
5598 * @vsi_handle: VSI handle to retrieve info from
5599 * @promisc_mask: pointer to mask to be filled in
5600 * @vid: VLAN ID of promisc VLAN VSI
5603 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5606 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5607 vid, hw->switch_info);
5611 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5612 * @hw: pointer to the hardware structure
5613 * @vsi_handle: VSI handle to retrieve info from
5614 * @promisc_mask: pointer to mask to be filled in
5615 * @vid: VLAN ID of promisc VLAN VSI
5616 * @sw: pointer to switch info struct for which function add rule
5618 static enum ice_status
5619 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5620 u16 *vid, struct ice_switch_info *sw)
5622 struct ice_fltr_mgmt_list_entry *itr;
5623 struct LIST_HEAD_TYPE *rule_head;
5624 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5626 if (!ice_is_vsi_valid(hw, vsi_handle))
5627 return ICE_ERR_PARAM;
5631 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5632 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5634 ice_acquire_lock(rule_lock);
5635 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5637 /* Continue if this filter doesn't apply to this VSI or the
5638 * VSI ID is not in the VSI map for this filter
5640 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5643 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5645 ice_release_lock(rule_lock);
5651 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5652 * @hw: pointer to the hardware structure
5653 * @vsi_handle: VSI handle to retrieve info from
5654 * @promisc_mask: pointer to mask to be filled in
5655 * @vid: VLAN ID of promisc VLAN VSI
5658 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5661 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5662 vid, hw->switch_info);
5666 * ice_remove_promisc - Remove promisc based filter rules
5667 * @hw: pointer to the hardware structure
5668 * @recp_id: recipe ID for which the rule needs to removed
5669 * @v_list: list of promisc entries
5671 static enum ice_status
5672 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5673 struct LIST_HEAD_TYPE *v_list)
5675 struct ice_fltr_list_entry *v_list_itr, *tmp;
5676 struct ice_sw_recipe *recp_list;
5678 recp_list = &hw->switch_info->recp_list[recp_id];
5679 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5681 v_list_itr->status =
5682 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5683 if (v_list_itr->status)
5684 return v_list_itr->status;
5690 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5691 * @hw: pointer to the hardware structure
5692 * @vsi_handle: VSI handle to clear mode
5693 * @promisc_mask: mask of promiscuous config bits to clear
5694 * @vid: VLAN ID to clear VLAN promiscuous
5695 * @sw: pointer to switch info struct for which function add rule
5697 static enum ice_status
5698 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5699 u16 vid, struct ice_switch_info *sw)
5701 struct ice_fltr_list_entry *fm_entry, *tmp;
5702 struct LIST_HEAD_TYPE remove_list_head;
5703 struct ice_fltr_mgmt_list_entry *itr;
5704 struct LIST_HEAD_TYPE *rule_head;
5705 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5706 enum ice_status status = ICE_SUCCESS;
5709 if (!ice_is_vsi_valid(hw, vsi_handle))
5710 return ICE_ERR_PARAM;
5712 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5713 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5715 recipe_id = ICE_SW_LKUP_PROMISC;
5717 rule_head = &sw->recp_list[recipe_id].filt_rules;
5718 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5720 INIT_LIST_HEAD(&remove_list_head);
5722 ice_acquire_lock(rule_lock);
5723 LIST_FOR_EACH_ENTRY(itr, rule_head,
5724 ice_fltr_mgmt_list_entry, list_entry) {
5725 struct ice_fltr_info *fltr_info;
5726 u8 fltr_promisc_mask = 0;
5728 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5730 fltr_info = &itr->fltr_info;
5732 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5733 vid != fltr_info->l_data.mac_vlan.vlan_id)
5736 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5738 /* Skip if filter is not completely specified by given mask */
5739 if (fltr_promisc_mask & ~promisc_mask)
5742 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5746 ice_release_lock(rule_lock);
5747 goto free_fltr_list;
5750 ice_release_lock(rule_lock);
5752 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5755 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5756 ice_fltr_list_entry, list_entry) {
5757 LIST_DEL(&fm_entry->list_entry);
5758 ice_free(hw, fm_entry);
5765 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5766 * @hw: pointer to the hardware structure
5767 * @vsi_handle: VSI handle to clear mode
5768 * @promisc_mask: mask of promiscuous config bits to clear
5769 * @vid: VLAN ID to clear VLAN promiscuous
5772 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5773 u8 promisc_mask, u16 vid)
5775 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5776 vid, hw->switch_info);
5780 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5781 * @hw: pointer to the hardware structure
5782 * @vsi_handle: VSI handle to configure
5783 * @promisc_mask: mask of promiscuous config bits
5784 * @vid: VLAN ID to set VLAN promiscuous
5785 * @lport: logical port number to configure promisc mode
5786 * @sw: pointer to switch info struct for which function add rule
5788 static enum ice_status
5789 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5790 u16 vid, u8 lport, struct ice_switch_info *sw)
5792 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5793 struct ice_fltr_list_entry f_list_entry;
5794 struct ice_fltr_info new_fltr;
5795 enum ice_status status = ICE_SUCCESS;
5801 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5803 if (!ice_is_vsi_valid(hw, vsi_handle))
5804 return ICE_ERR_PARAM;
5805 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5807 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5809 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5810 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5811 new_fltr.l_data.mac_vlan.vlan_id = vid;
5812 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5814 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5815 recipe_id = ICE_SW_LKUP_PROMISC;
5818 /* Separate filters must be set for each direction/packet type
5819 * combination, so we will loop over the mask value, store the
5820 * individual type, and clear it out in the input mask as it
5823 while (promisc_mask) {
5824 struct ice_sw_recipe *recp_list;
5830 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5831 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5832 pkt_type = UCAST_FLTR;
5833 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5834 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5835 pkt_type = UCAST_FLTR;
5837 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5838 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5839 pkt_type = MCAST_FLTR;
5840 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5841 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5842 pkt_type = MCAST_FLTR;
5844 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5845 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5846 pkt_type = BCAST_FLTR;
5847 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5848 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5849 pkt_type = BCAST_FLTR;
5853 /* Check for VLAN promiscuous flag */
5854 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5855 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5856 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5857 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5861 /* Set filter DA based on packet type */
5862 mac_addr = new_fltr.l_data.mac.mac_addr;
5863 if (pkt_type == BCAST_FLTR) {
5864 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5865 } else if (pkt_type == MCAST_FLTR ||
5866 pkt_type == UCAST_FLTR) {
5867 /* Use the dummy ether header DA */
5868 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5869 ICE_NONDMA_TO_NONDMA);
5870 if (pkt_type == MCAST_FLTR)
5871 mac_addr[0] |= 0x1; /* Set multicast bit */
5874 /* Need to reset this to zero for all iterations */
5877 new_fltr.flag |= ICE_FLTR_TX;
5878 new_fltr.src = hw_vsi_id;
5880 new_fltr.flag |= ICE_FLTR_RX;
5881 new_fltr.src = lport;
5884 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5885 new_fltr.vsi_handle = vsi_handle;
5886 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5887 f_list_entry.fltr_info = new_fltr;
5888 recp_list = &sw->recp_list[recipe_id];
5890 status = ice_add_rule_internal(hw, recp_list, lport,
5892 if (status != ICE_SUCCESS)
5893 goto set_promisc_exit;
5901 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5902 * @hw: pointer to the hardware structure
5903 * @vsi_handle: VSI handle to configure
5904 * @promisc_mask: mask of promiscuous config bits
5905 * @vid: VLAN ID to set VLAN promiscuous
5908 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5911 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5912 hw->port_info->lport,
5917 * _ice_set_vlan_vsi_promisc
5918 * @hw: pointer to the hardware structure
5919 * @vsi_handle: VSI handle to configure
5920 * @promisc_mask: mask of promiscuous config bits
5921 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5922 * @lport: logical port number to configure promisc mode
5923 * @sw: pointer to switch info struct for which function add rule
5925 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5927 static enum ice_status
5928 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5929 bool rm_vlan_promisc, u8 lport,
5930 struct ice_switch_info *sw)
5932 struct ice_fltr_list_entry *list_itr, *tmp;
5933 struct LIST_HEAD_TYPE vsi_list_head;
5934 struct LIST_HEAD_TYPE *vlan_head;
5935 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5936 enum ice_status status;
5939 INIT_LIST_HEAD(&vsi_list_head);
5940 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5941 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5942 ice_acquire_lock(vlan_lock);
5943 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5945 ice_release_lock(vlan_lock);
5947 goto free_fltr_list;
5949 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5951 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5952 if (rm_vlan_promisc)
5953 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5957 status = _ice_set_vsi_promisc(hw, vsi_handle,
5958 promisc_mask, vlan_id,
5965 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5966 ice_fltr_list_entry, list_entry) {
5967 LIST_DEL(&list_itr->list_entry);
5968 ice_free(hw, list_itr);
5974 * ice_set_vlan_vsi_promisc
5975 * @hw: pointer to the hardware structure
5976 * @vsi_handle: VSI handle to configure
5977 * @promisc_mask: mask of promiscuous config bits
5978 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5980 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5983 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5984 bool rm_vlan_promisc)
5986 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5987 rm_vlan_promisc, hw->port_info->lport,
5992 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5993 * @hw: pointer to the hardware structure
5994 * @vsi_handle: VSI handle to remove filters from
5995 * @recp_list: recipe list from which function remove fltr
5996 * @lkup: switch rule filter lookup type
5999 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6000 struct ice_sw_recipe *recp_list,
6001 enum ice_sw_lkup_type lkup)
6003 struct ice_fltr_list_entry *fm_entry;
6004 struct LIST_HEAD_TYPE remove_list_head;
6005 struct LIST_HEAD_TYPE *rule_head;
6006 struct ice_fltr_list_entry *tmp;
6007 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6008 enum ice_status status;
6010 INIT_LIST_HEAD(&remove_list_head);
6011 rule_lock = &recp_list[lkup].filt_rule_lock;
6012 rule_head = &recp_list[lkup].filt_rules;
6013 ice_acquire_lock(rule_lock);
6014 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6016 ice_release_lock(rule_lock);
6021 case ICE_SW_LKUP_MAC:
6022 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6024 case ICE_SW_LKUP_VLAN:
6025 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6027 case ICE_SW_LKUP_PROMISC:
6028 case ICE_SW_LKUP_PROMISC_VLAN:
6029 ice_remove_promisc(hw, lkup, &remove_list_head);
6031 case ICE_SW_LKUP_MAC_VLAN:
6032 ice_remove_mac_vlan(hw, &remove_list_head);
6034 case ICE_SW_LKUP_ETHERTYPE:
6035 case ICE_SW_LKUP_ETHERTYPE_MAC:
6036 ice_remove_eth_mac(hw, &remove_list_head);
6038 case ICE_SW_LKUP_DFLT:
6039 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6041 case ICE_SW_LKUP_LAST:
6042 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6046 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6047 ice_fltr_list_entry, list_entry) {
6048 LIST_DEL(&fm_entry->list_entry);
6049 ice_free(hw, fm_entry);
6054 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6055 * @hw: pointer to the hardware structure
6056 * @vsi_handle: VSI handle to remove filters from
6057 * @sw: pointer to switch info struct
6060 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6061 struct ice_switch_info *sw)
6063 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6065 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6066 sw->recp_list, ICE_SW_LKUP_MAC);
6067 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6068 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6069 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6070 sw->recp_list, ICE_SW_LKUP_PROMISC);
6071 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6072 sw->recp_list, ICE_SW_LKUP_VLAN);
6073 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6074 sw->recp_list, ICE_SW_LKUP_DFLT);
6075 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6076 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6077 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6078 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6079 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6080 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6084 * ice_remove_vsi_fltr - Remove all filters for a VSI
6085 * @hw: pointer to the hardware structure
6086 * @vsi_handle: VSI handle to remove filters from
6088 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6090 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6094 * ice_alloc_res_cntr - allocating resource counter
6095 * @hw: pointer to the hardware structure
6096 * @type: type of resource
6097 * @alloc_shared: if set it is shared else dedicated
6098 * @num_items: number of entries requested for FD resource type
6099 * @counter_id: counter index returned by AQ call
6102 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6105 struct ice_aqc_alloc_free_res_elem *buf;
6106 enum ice_status status;
6109 /* Allocate resource */
6110 buf_len = ice_struct_size(buf, elem, 1);
6111 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6113 return ICE_ERR_NO_MEMORY;
6115 buf->num_elems = CPU_TO_LE16(num_items);
6116 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6117 ICE_AQC_RES_TYPE_M) | alloc_shared);
6119 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6120 ice_aqc_opc_alloc_res, NULL);
6124 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6132 * ice_free_res_cntr - free resource counter
6133 * @hw: pointer to the hardware structure
6134 * @type: type of resource
6135 * @alloc_shared: if set it is shared else dedicated
6136 * @num_items: number of entries to be freed for FD resource type
6137 * @counter_id: counter ID resource which needs to be freed
6140 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6143 struct ice_aqc_alloc_free_res_elem *buf;
6144 enum ice_status status;
6148 buf_len = ice_struct_size(buf, elem, 1);
6149 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6151 return ICE_ERR_NO_MEMORY;
6153 buf->num_elems = CPU_TO_LE16(num_items);
6154 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6155 ICE_AQC_RES_TYPE_M) | alloc_shared);
6156 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6158 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6159 ice_aqc_opc_free_res, NULL);
6161 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6168 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6169 * @hw: pointer to the hardware structure
6170 * @counter_id: returns counter index
6172 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6174 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6175 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6180 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6181 * @hw: pointer to the hardware structure
6182 * @counter_id: counter index to be freed
6184 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6186 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6187 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6192 * ice_alloc_res_lg_act - add large action resource
6193 * @hw: pointer to the hardware structure
6194 * @l_id: large action ID to fill it in
6195 * @num_acts: number of actions to hold with a large action entry
6197 static enum ice_status
6198 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6200 struct ice_aqc_alloc_free_res_elem *sw_buf;
6201 enum ice_status status;
6204 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6205 return ICE_ERR_PARAM;
6207 /* Allocate resource for large action */
6208 buf_len = ice_struct_size(sw_buf, elem, 1);
6209 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6211 return ICE_ERR_NO_MEMORY;
6213 sw_buf->num_elems = CPU_TO_LE16(1);
6215 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6216 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6217 * If num_acts is greater than 2, then use
6218 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6219 * The num_acts cannot exceed 4. This was ensured at the
6220 * beginning of the function.
6223 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6224 else if (num_acts == 2)
6225 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6227 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6229 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6230 ice_aqc_opc_alloc_res, NULL);
6232 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6234 ice_free(hw, sw_buf);
6239 * ice_add_mac_with_sw_marker - add filter with sw marker
6240 * @hw: pointer to the hardware structure
6241 * @f_info: filter info structure containing the MAC filter information
6242 * @sw_marker: sw marker to tag the Rx descriptor with
6245 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6248 struct ice_fltr_mgmt_list_entry *m_entry;
6249 struct ice_fltr_list_entry fl_info;
6250 struct ice_sw_recipe *recp_list;
6251 struct LIST_HEAD_TYPE l_head;
6252 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6253 enum ice_status ret;
6257 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6258 return ICE_ERR_PARAM;
6260 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6261 return ICE_ERR_PARAM;
6263 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6264 return ICE_ERR_PARAM;
6266 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6267 return ICE_ERR_PARAM;
6268 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6270 /* Add filter if it doesn't exist so then the adding of large
6271 * action always results in update
6274 INIT_LIST_HEAD(&l_head);
6275 fl_info.fltr_info = *f_info;
6276 LIST_ADD(&fl_info.list_entry, &l_head);
6278 entry_exists = false;
6279 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6280 hw->port_info->lport);
6281 if (ret == ICE_ERR_ALREADY_EXISTS)
6282 entry_exists = true;
6286 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6287 rule_lock = &recp_list->filt_rule_lock;
6288 ice_acquire_lock(rule_lock);
6289 /* Get the book keeping entry for the filter */
6290 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6294 /* If counter action was enabled for this rule then don't enable
6295 * sw marker large action
6297 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6298 ret = ICE_ERR_PARAM;
6302 /* if same marker was added before */
6303 if (m_entry->sw_marker_id == sw_marker) {
6304 ret = ICE_ERR_ALREADY_EXISTS;
6308 /* Allocate a hardware table entry to hold large act. Three actions
6309 * for marker based large action
6311 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6315 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6318 /* Update the switch rule to add the marker action */
6319 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6321 ice_release_lock(rule_lock);
6326 ice_release_lock(rule_lock);
6327 /* only remove entry if it did not exist previously */
6329 ret = ice_remove_mac(hw, &l_head);
6335 * ice_add_mac_with_counter - add filter with counter enabled
6336 * @hw: pointer to the hardware structure
6337 * @f_info: pointer to filter info structure containing the MAC filter
6341 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6343 struct ice_fltr_mgmt_list_entry *m_entry;
6344 struct ice_fltr_list_entry fl_info;
6345 struct ice_sw_recipe *recp_list;
6346 struct LIST_HEAD_TYPE l_head;
6347 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6348 enum ice_status ret;
6353 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6354 return ICE_ERR_PARAM;
6356 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6357 return ICE_ERR_PARAM;
6359 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6360 return ICE_ERR_PARAM;
6361 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6362 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6364 entry_exist = false;
6366 rule_lock = &recp_list->filt_rule_lock;
6368 /* Add filter if it doesn't exist so then the adding of large
6369 * action always results in update
6371 INIT_LIST_HEAD(&l_head);
6373 fl_info.fltr_info = *f_info;
6374 LIST_ADD(&fl_info.list_entry, &l_head);
6376 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6377 hw->port_info->lport);
6378 if (ret == ICE_ERR_ALREADY_EXISTS)
6383 ice_acquire_lock(rule_lock);
6384 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6386 ret = ICE_ERR_BAD_PTR;
6390 /* Don't enable counter for a filter for which sw marker was enabled */
6391 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6392 ret = ICE_ERR_PARAM;
6396 /* If a counter was already enabled then don't need to add again */
6397 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6398 ret = ICE_ERR_ALREADY_EXISTS;
6402 /* Allocate a hardware table entry to VLAN counter */
6403 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6407 /* Allocate a hardware table entry to hold large act. Two actions for
6408 * counter based large action
6410 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6414 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6417 /* Update the switch rule to add the counter action */
6418 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6420 ice_release_lock(rule_lock);
6425 ice_release_lock(rule_lock);
6426 /* only remove entry if it did not exist previously */
6428 ret = ice_remove_mac(hw, &l_head);
6433 /* This is mapping table entry that maps every word within a given protocol
6434 * structure to the real byte offset as per the specification of that
6436 * for example dst address is 3 words in ethertype header and corresponding
6437 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6438 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6439 * matching entry describing its field. This needs to be updated if new
6440 * structure is added to that union.
6442 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6443 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6444 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6445 { ICE_ETYPE_OL, { 0 } },
6446 { ICE_VLAN_OFOS, { 0, 2 } },
6447 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6448 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6449 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6450 26, 28, 30, 32, 34, 36, 38 } },
6451 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6452 26, 28, 30, 32, 34, 36, 38 } },
6453 { ICE_TCP_IL, { 0, 2 } },
6454 { ICE_UDP_OF, { 0, 2 } },
6455 { ICE_UDP_ILOS, { 0, 2 } },
6456 { ICE_SCTP_IL, { 0, 2 } },
6457 { ICE_VXLAN, { 8, 10, 12, 14 } },
6458 { ICE_GENEVE, { 8, 10, 12, 14 } },
6459 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6460 { ICE_NVGRE, { 0, 2, 4, 6 } },
6461 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6462 { ICE_PPPOE, { 0, 2, 4, 6 } },
6463 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6464 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6465 { ICE_ESP, { 0, 2, 4, 6 } },
6466 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6467 { ICE_NAT_T, { 8, 10, 12, 14 } },
6468 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6469 { ICE_VLAN_EX, { 0, 2 } },
6472 /* The following table describes preferred grouping of recipes.
6473 * If a recipe that needs to be programmed is a superset or matches one of the
6474 * following combinations, then the recipe needs to be chained as per the
6478 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6479 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6480 { ICE_MAC_IL, ICE_MAC_IL_HW },
6481 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6482 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6483 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6484 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6485 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6486 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6487 { ICE_TCP_IL, ICE_TCP_IL_HW },
6488 { ICE_UDP_OF, ICE_UDP_OF_HW },
6489 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6490 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6491 { ICE_VXLAN, ICE_UDP_OF_HW },
6492 { ICE_GENEVE, ICE_UDP_OF_HW },
6493 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6494 { ICE_NVGRE, ICE_GRE_OF_HW },
6495 { ICE_GTP, ICE_UDP_OF_HW },
6496 { ICE_PPPOE, ICE_PPPOE_HW },
6497 { ICE_PFCP, ICE_UDP_ILOS_HW },
6498 { ICE_L2TPV3, ICE_L2TPV3_HW },
6499 { ICE_ESP, ICE_ESP_HW },
6500 { ICE_AH, ICE_AH_HW },
6501 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6502 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6503 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6507 * ice_find_recp - find a recipe
6508 * @hw: pointer to the hardware structure
6509 * @lkup_exts: extension sequence to match
6511 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6513 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6514 enum ice_sw_tunnel_type tun_type)
6516 bool refresh_required = true;
6517 struct ice_sw_recipe *recp;
6520 /* Walk through existing recipes to find a match */
6521 recp = hw->switch_info->recp_list;
6522 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6523 /* If recipe was not created for this ID, in SW bookkeeping,
6524 * check if FW has an entry for this recipe. If the FW has an
6525 * entry update it in our SW bookkeeping and continue with the
6528 if (!recp[i].recp_created)
6529 if (ice_get_recp_frm_fw(hw,
6530 hw->switch_info->recp_list, i,
6534 /* Skip inverse action recipes */
6535 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6536 ICE_AQ_RECIPE_ACT_INV_ACT)
6539 /* if number of words we are looking for match */
6540 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6541 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6542 struct ice_fv_word *be = lkup_exts->fv_words;
6543 u16 *cr = recp[i].lkup_exts.field_mask;
6544 u16 *de = lkup_exts->field_mask;
6548 /* ar, cr, and qr are related to the recipe words, while
6549 * be, de, and pe are related to the lookup words
6551 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6552 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6554 if (ar[qr].off == be[pe].off &&
6555 ar[qr].prot_id == be[pe].prot_id &&
6557 /* Found the "pe"th word in the
6562 /* After walking through all the words in the
6563 * "i"th recipe if "p"th word was not found then
6564 * this recipe is not what we are looking for.
6565 * So break out from this loop and try the next
6568 if (qr >= recp[i].lkup_exts.n_val_words) {
6573 /* If for "i"th recipe the found was never set to false
6574 * then it means we found our match
6576 if (tun_type == recp[i].tun_type && found)
6577 return i; /* Return the recipe ID */
6580 return ICE_MAX_NUM_RECIPES;
6584 * ice_prot_type_to_id - get protocol ID from protocol type
6585 * @type: protocol type
6586 * @id: pointer to variable that will receive the ID
6588 * Returns true if found, false otherwise
6590 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6594 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6595 if (ice_prot_id_tbl[i].type == type) {
6596 *id = ice_prot_id_tbl[i].protocol_id;
6603 * ice_find_valid_words - count valid words
6604 * @rule: advanced rule with lookup information
6605 * @lkup_exts: byte offset extractions of the words that are valid
6607 * calculate valid words in a lookup rule using mask value
6610 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6611 struct ice_prot_lkup_ext *lkup_exts)
6613 u8 j, word, prot_id, ret_val;
6615 if (!ice_prot_type_to_id(rule->type, &prot_id))
6618 word = lkup_exts->n_val_words;
6620 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6621 if (((u16 *)&rule->m_u)[j] &&
6622 rule->type < ARRAY_SIZE(ice_prot_ext)) {
6623 /* No more space to accommodate */
6624 if (word >= ICE_MAX_CHAIN_WORDS)
6626 lkup_exts->fv_words[word].off =
6627 ice_prot_ext[rule->type].offs[j];
6628 lkup_exts->fv_words[word].prot_id =
6629 ice_prot_id_tbl[rule->type].protocol_id;
6630 lkup_exts->field_mask[word] =
6631 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6635 ret_val = word - lkup_exts->n_val_words;
6636 lkup_exts->n_val_words = word;
6642 * ice_create_first_fit_recp_def - Create a recipe grouping
6643 * @hw: pointer to the hardware structure
6644 * @lkup_exts: an array of protocol header extractions
6645 * @rg_list: pointer to a list that stores new recipe groups
6646 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6648 * Using first fit algorithm, take all the words that are still not done
6649 * and start grouping them in 4-word groups. Each group makes up one
6652 static enum ice_status
6653 ice_create_first_fit_recp_def(struct ice_hw *hw,
6654 struct ice_prot_lkup_ext *lkup_exts,
6655 struct LIST_HEAD_TYPE *rg_list,
6658 struct ice_pref_recipe_group *grp = NULL;
6663 if (!lkup_exts->n_val_words) {
6664 struct ice_recp_grp_entry *entry;
6666 entry = (struct ice_recp_grp_entry *)
6667 ice_malloc(hw, sizeof(*entry));
6669 return ICE_ERR_NO_MEMORY;
6670 LIST_ADD(&entry->l_entry, rg_list);
6671 grp = &entry->r_group;
6673 grp->n_val_pairs = 0;
6676 /* Walk through every word in the rule to check if it is not done. If so
6677 * then this word needs to be part of a new recipe.
6679 for (j = 0; j < lkup_exts->n_val_words; j++)
6680 if (!ice_is_bit_set(lkup_exts->done, j)) {
6682 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6683 struct ice_recp_grp_entry *entry;
6685 entry = (struct ice_recp_grp_entry *)
6686 ice_malloc(hw, sizeof(*entry));
6688 return ICE_ERR_NO_MEMORY;
6689 LIST_ADD(&entry->l_entry, rg_list);
6690 grp = &entry->r_group;
6694 grp->pairs[grp->n_val_pairs].prot_id =
6695 lkup_exts->fv_words[j].prot_id;
6696 grp->pairs[grp->n_val_pairs].off =
6697 lkup_exts->fv_words[j].off;
6698 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6706 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6707 * @hw: pointer to the hardware structure
6708 * @fv_list: field vector with the extraction sequence information
6709 * @rg_list: recipe groupings with protocol-offset pairs
6711 * Helper function to fill in the field vector indices for protocol-offset
6712 * pairs. These indexes are then ultimately programmed into a recipe.
6714 static enum ice_status
6715 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6716 struct LIST_HEAD_TYPE *rg_list)
6718 struct ice_sw_fv_list_entry *fv;
6719 struct ice_recp_grp_entry *rg;
6720 struct ice_fv_word *fv_ext;
6722 if (LIST_EMPTY(fv_list))
6725 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6726 fv_ext = fv->fv_ptr->ew;
6728 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6731 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6732 struct ice_fv_word *pr;
6737 pr = &rg->r_group.pairs[i];
6738 mask = rg->r_group.mask[i];
6740 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6741 if (fv_ext[j].prot_id == pr->prot_id &&
6742 fv_ext[j].off == pr->off) {
6745 /* Store index of field vector */
6747 rg->fv_mask[i] = mask;
6751 /* Protocol/offset could not be found, caller gave an
6755 return ICE_ERR_PARAM;
6763 * ice_find_free_recp_res_idx - find free result indexes for recipe
6764 * @hw: pointer to hardware structure
6765 * @profiles: bitmap of profiles that will be associated with the new recipe
6766 * @free_idx: pointer to variable to receive the free index bitmap
6768 * The algorithm used here is:
6769 * 1. When creating a new recipe, create a set P which contains all
6770 * Profiles that will be associated with our new recipe
6772 * 2. For each Profile p in set P:
6773 * a. Add all recipes associated with Profile p into set R
6774 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6775 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6776 * i. Or just assume they all have the same possible indexes:
6778 * i.e., PossibleIndexes = 0x0000F00000000000
6780 * 3. For each Recipe r in set R:
6781 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6782 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6784 * FreeIndexes will contain the bits indicating the indexes free for use,
6785 * then the code needs to update the recipe[r].used_result_idx_bits to
6786 * indicate which indexes were selected for use by this recipe.
6789 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6790 ice_bitmap_t *free_idx)
6792 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6793 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6794 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6797 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6798 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6799 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6800 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6802 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6804 /* For each profile we are going to associate the recipe with, add the
6805 * recipes that are associated with that profile. This will give us
6806 * the set of recipes that our recipe may collide with. Also, determine
6807 * what possible result indexes are usable given this set of profiles.
6809 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6810 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6811 ICE_MAX_NUM_RECIPES);
6812 ice_and_bitmap(possible_idx, possible_idx,
6813 hw->switch_info->prof_res_bm[bit],
6817 /* For each recipe that our new recipe may collide with, determine
6818 * which indexes have been used.
6820 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6821 ice_or_bitmap(used_idx, used_idx,
6822 hw->switch_info->recp_list[bit].res_idxs,
6825 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6827 /* return number of free indexes */
6828 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6832 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6833 * @hw: pointer to hardware structure
6834 * @rm: recipe management list entry
6835 * @profiles: bitmap of profiles that will be associated.
6837 static enum ice_status
6838 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6839 ice_bitmap_t *profiles)
6841 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6842 struct ice_aqc_recipe_data_elem *tmp;
6843 struct ice_aqc_recipe_data_elem *buf;
6844 struct ice_recp_grp_entry *entry;
6845 enum ice_status status;
6851 /* When more than one recipe are required, another recipe is needed to
6852 * chain them together. Matching a tunnel metadata ID takes up one of
6853 * the match fields in the chaining recipe reducing the number of
6854 * chained recipes by one.
6856 /* check number of free result indices */
6857 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6858 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6860 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6861 free_res_idx, rm->n_grp_count);
6863 if (rm->n_grp_count > 1) {
6864 if (rm->n_grp_count > free_res_idx)
6865 return ICE_ERR_MAX_LIMIT;
6870 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6871 return ICE_ERR_MAX_LIMIT;
6873 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6874 ICE_MAX_NUM_RECIPES,
6877 return ICE_ERR_NO_MEMORY;
6879 buf = (struct ice_aqc_recipe_data_elem *)
6880 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6882 status = ICE_ERR_NO_MEMORY;
6886 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6887 recipe_count = ICE_MAX_NUM_RECIPES;
6888 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6890 if (status || recipe_count == 0)
6893 /* Allocate the recipe resources, and configure them according to the
6894 * match fields from protocol headers and extracted field vectors.
6896 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6897 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6900 status = ice_alloc_recipe(hw, &entry->rid);
6904 /* Clear the result index of the located recipe, as this will be
6905 * updated, if needed, later in the recipe creation process.
6907 tmp[0].content.result_indx = 0;
6909 buf[recps] = tmp[0];
6910 buf[recps].recipe_indx = (u8)entry->rid;
6911 /* if the recipe is a non-root recipe RID should be programmed
6912 * as 0 for the rules to be applied correctly.
6914 buf[recps].content.rid = 0;
6915 ice_memset(&buf[recps].content.lkup_indx, 0,
6916 sizeof(buf[recps].content.lkup_indx),
6919 /* All recipes use look-up index 0 to match switch ID. */
6920 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6921 buf[recps].content.mask[0] =
6922 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6923 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6926 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6927 buf[recps].content.lkup_indx[i] = 0x80;
6928 buf[recps].content.mask[i] = 0;
6931 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6932 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6933 buf[recps].content.mask[i + 1] =
6934 CPU_TO_LE16(entry->fv_mask[i]);
6937 if (rm->n_grp_count > 1) {
6938 /* Checks to see if there really is a valid result index
6941 if (chain_idx >= ICE_MAX_FV_WORDS) {
6942 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6943 status = ICE_ERR_MAX_LIMIT;
6947 entry->chain_idx = chain_idx;
6948 buf[recps].content.result_indx =
6949 ICE_AQ_RECIPE_RESULT_EN |
6950 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6951 ICE_AQ_RECIPE_RESULT_DATA_M);
6952 ice_clear_bit(chain_idx, result_idx_bm);
6953 chain_idx = ice_find_first_bit(result_idx_bm,
6957 /* fill recipe dependencies */
6958 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6959 ICE_MAX_NUM_RECIPES);
6960 ice_set_bit(buf[recps].recipe_indx,
6961 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6962 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6966 if (rm->n_grp_count == 1) {
6967 rm->root_rid = buf[0].recipe_indx;
6968 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6969 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6970 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6971 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6972 sizeof(buf[0].recipe_bitmap),
6973 ICE_NONDMA_TO_NONDMA);
6975 status = ICE_ERR_BAD_PTR;
6978 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6979 * the recipe which is getting created if specified
6980 * by user. Usually any advanced switch filter, which results
6981 * into new extraction sequence, ended up creating a new recipe
6982 * of type ROOT and usually recipes are associated with profiles
6983 * Switch rule referreing newly created recipe, needs to have
6984 * either/or 'fwd' or 'join' priority, otherwise switch rule
6985 * evaluation will not happen correctly. In other words, if
6986 * switch rule to be evaluated on priority basis, then recipe
6987 * needs to have priority, otherwise it will be evaluated last.
6989 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6991 struct ice_recp_grp_entry *last_chain_entry;
6994 /* Allocate the last recipe that will chain the outcomes of the
6995 * other recipes together
6997 status = ice_alloc_recipe(hw, &rid);
7001 buf[recps].recipe_indx = (u8)rid;
7002 buf[recps].content.rid = (u8)rid;
7003 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7004 /* the new entry created should also be part of rg_list to
7005 * make sure we have complete recipe
7007 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7008 sizeof(*last_chain_entry));
7009 if (!last_chain_entry) {
7010 status = ICE_ERR_NO_MEMORY;
7013 last_chain_entry->rid = rid;
7014 ice_memset(&buf[recps].content.lkup_indx, 0,
7015 sizeof(buf[recps].content.lkup_indx),
7017 /* All recipes use look-up index 0 to match switch ID. */
7018 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7019 buf[recps].content.mask[0] =
7020 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7021 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7022 buf[recps].content.lkup_indx[i] =
7023 ICE_AQ_RECIPE_LKUP_IGNORE;
7024 buf[recps].content.mask[i] = 0;
7028 /* update r_bitmap with the recp that is used for chaining */
7029 ice_set_bit(rid, rm->r_bitmap);
7030 /* this is the recipe that chains all the other recipes so it
7031 * should not have a chaining ID to indicate the same
7033 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7034 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7036 last_chain_entry->fv_idx[i] = entry->chain_idx;
7037 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7038 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7039 ice_set_bit(entry->rid, rm->r_bitmap);
7041 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7042 if (sizeof(buf[recps].recipe_bitmap) >=
7043 sizeof(rm->r_bitmap)) {
7044 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7045 sizeof(buf[recps].recipe_bitmap),
7046 ICE_NONDMA_TO_NONDMA);
7048 status = ICE_ERR_BAD_PTR;
7051 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7054 rm->root_rid = (u8)rid;
7056 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7060 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7061 ice_release_change_lock(hw);
7065 /* Every recipe that just got created add it to the recipe
7068 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7069 struct ice_switch_info *sw = hw->switch_info;
7070 bool is_root, idx_found = false;
7071 struct ice_sw_recipe *recp;
7072 u16 idx, buf_idx = 0;
7074 /* find buffer index for copying some data */
7075 for (idx = 0; idx < rm->n_grp_count; idx++)
7076 if (buf[idx].recipe_indx == entry->rid) {
7082 status = ICE_ERR_OUT_OF_RANGE;
7086 recp = &sw->recp_list[entry->rid];
7087 is_root = (rm->root_rid == entry->rid);
7088 recp->is_root = is_root;
7090 recp->root_rid = entry->rid;
7091 recp->big_recp = (is_root && rm->n_grp_count > 1);
7093 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7094 entry->r_group.n_val_pairs *
7095 sizeof(struct ice_fv_word),
7096 ICE_NONDMA_TO_NONDMA);
7098 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7099 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7101 /* Copy non-result fv index values and masks to recipe. This
7102 * call will also update the result recipe bitmask.
7104 ice_collect_result_idx(&buf[buf_idx], recp);
7106 /* for non-root recipes, also copy to the root, this allows
7107 * easier matching of a complete chained recipe
7110 ice_collect_result_idx(&buf[buf_idx],
7111 &sw->recp_list[rm->root_rid]);
7113 recp->n_ext_words = entry->r_group.n_val_pairs;
7114 recp->chain_idx = entry->chain_idx;
7115 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7116 recp->n_grp_count = rm->n_grp_count;
7117 recp->tun_type = rm->tun_type;
7118 recp->recp_created = true;
7132 * ice_create_recipe_group - creates recipe group
7133 * @hw: pointer to hardware structure
7134 * @rm: recipe management list entry
7135 * @lkup_exts: lookup elements
7137 static enum ice_status
7138 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7139 struct ice_prot_lkup_ext *lkup_exts)
7141 enum ice_status status;
7144 rm->n_grp_count = 0;
7146 /* Create recipes for words that are marked not done by packing them
7149 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7150 &rm->rg_list, &recp_count);
7152 rm->n_grp_count += recp_count;
7153 rm->n_ext_words = lkup_exts->n_val_words;
7154 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7155 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7156 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7157 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7164 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7165 * @hw: pointer to hardware structure
7166 * @lkups: lookup elements or match criteria for the advanced recipe, one
7167 * structure per protocol header
7168 * @lkups_cnt: number of protocols
7169 * @bm: bitmap of field vectors to consider
7170 * @fv_list: pointer to a list that holds the returned field vectors
7172 static enum ice_status
7173 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7174 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7176 enum ice_status status;
7183 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7185 return ICE_ERR_NO_MEMORY;
7187 for (i = 0; i < lkups_cnt; i++)
7188 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7189 status = ICE_ERR_CFG;
7193 /* Find field vectors that include all specified protocol types */
7194 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7197 ice_free(hw, prot_ids);
7202 * ice_tun_type_match_mask - determine if tun type needs a match mask
7203 * @tun_type: tunnel type
7204 * @mask: mask to be used for the tunnel
7206 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7209 case ICE_SW_TUN_VXLAN_GPE:
7210 case ICE_SW_TUN_GENEVE:
7211 case ICE_SW_TUN_VXLAN:
7212 case ICE_SW_TUN_NVGRE:
7213 case ICE_SW_TUN_UDP:
7214 case ICE_ALL_TUNNELS:
7215 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7216 case ICE_NON_TUN_QINQ:
7217 case ICE_SW_TUN_PPPOE_QINQ:
7218 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7219 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7220 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7221 *mask = ICE_TUN_FLAG_MASK;
7224 case ICE_SW_TUN_GENEVE_VLAN:
7225 case ICE_SW_TUN_VXLAN_VLAN:
7226 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7236 * ice_add_special_words - Add words that are not protocols, such as metadata
7237 * @rinfo: other information regarding the rule e.g. priority and action info
7238 * @lkup_exts: lookup word structure
7240 static enum ice_status
7241 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7242 struct ice_prot_lkup_ext *lkup_exts)
7246 /* If this is a tunneled packet, then add recipe index to match the
7247 * tunnel bit in the packet metadata flags.
7249 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7250 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7251 u8 word = lkup_exts->n_val_words++;
7253 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7254 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7255 lkup_exts->field_mask[word] = mask;
7257 return ICE_ERR_MAX_LIMIT;
7264 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7265 * @hw: pointer to hardware structure
7266 * @rinfo: other information regarding the rule e.g. priority and action info
7267 * @bm: pointer to memory for returning the bitmap of field vectors
7270 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7273 enum ice_prof_type prof_type;
7275 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7277 switch (rinfo->tun_type) {
7279 case ICE_NON_TUN_QINQ:
7280 prof_type = ICE_PROF_NON_TUN;
7282 case ICE_ALL_TUNNELS:
7283 prof_type = ICE_PROF_TUN_ALL;
7285 case ICE_SW_TUN_VXLAN_GPE:
7286 case ICE_SW_TUN_GENEVE:
7287 case ICE_SW_TUN_GENEVE_VLAN:
7288 case ICE_SW_TUN_VXLAN:
7289 case ICE_SW_TUN_VXLAN_VLAN:
7290 case ICE_SW_TUN_UDP:
7291 case ICE_SW_TUN_GTP:
7292 prof_type = ICE_PROF_TUN_UDP;
7294 case ICE_SW_TUN_NVGRE:
7295 prof_type = ICE_PROF_TUN_GRE;
7297 case ICE_SW_TUN_PPPOE:
7298 case ICE_SW_TUN_PPPOE_QINQ:
7299 prof_type = ICE_PROF_TUN_PPPOE;
7301 case ICE_SW_TUN_PPPOE_PAY:
7302 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7303 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7305 case ICE_SW_TUN_PPPOE_IPV4:
7306 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7307 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7308 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7309 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7311 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7312 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7314 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7315 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7317 case ICE_SW_TUN_PPPOE_IPV6:
7318 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7319 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7320 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7321 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7323 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7324 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7326 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7327 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7329 case ICE_SW_TUN_PROFID_IPV6_ESP:
7330 case ICE_SW_TUN_IPV6_ESP:
7331 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7333 case ICE_SW_TUN_PROFID_IPV6_AH:
7334 case ICE_SW_TUN_IPV6_AH:
7335 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7337 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7338 case ICE_SW_TUN_IPV6_L2TPV3:
7339 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7341 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7342 case ICE_SW_TUN_IPV6_NAT_T:
7343 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7345 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7346 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7348 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7349 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7351 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7352 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7354 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7355 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7357 case ICE_SW_TUN_IPV4_NAT_T:
7358 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7360 case ICE_SW_TUN_IPV4_L2TPV3:
7361 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7363 case ICE_SW_TUN_IPV4_ESP:
7364 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7366 case ICE_SW_TUN_IPV4_AH:
7367 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7369 case ICE_SW_IPV4_TCP:
7370 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7372 case ICE_SW_IPV4_UDP:
7373 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7375 case ICE_SW_IPV6_TCP:
7376 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7378 case ICE_SW_IPV6_UDP:
7379 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7381 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7382 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7383 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7384 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7385 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7386 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7387 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7389 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7390 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7391 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7392 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7393 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7394 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7395 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7397 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7398 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7399 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7400 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7401 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7402 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7403 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7405 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7406 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7407 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7408 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7409 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7410 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7411 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7413 case ICE_SW_TUN_AND_NON_TUN:
7414 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7416 prof_type = ICE_PROF_ALL;
7420 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7424 * ice_is_prof_rule - determine if rule type is a profile rule
7425 * @type: the rule type
7427 * if the rule type is a profile rule, that means that there no field value
7428 * match required, in this case just a profile hit is required.
7430 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7433 case ICE_SW_TUN_PROFID_IPV6_ESP:
7434 case ICE_SW_TUN_PROFID_IPV6_AH:
7435 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7436 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7437 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7438 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7439 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7440 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7450 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7451 * @hw: pointer to hardware structure
7452 * @lkups: lookup elements or match criteria for the advanced recipe, one
7453 * structure per protocol header
7454 * @lkups_cnt: number of protocols
7455 * @rinfo: other information regarding the rule e.g. priority and action info
7456 * @rid: return the recipe ID of the recipe created
7458 static enum ice_status
7459 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7460 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7462 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7463 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7464 struct ice_prot_lkup_ext *lkup_exts;
7465 struct ice_recp_grp_entry *r_entry;
7466 struct ice_sw_fv_list_entry *fvit;
7467 struct ice_recp_grp_entry *r_tmp;
7468 struct ice_sw_fv_list_entry *tmp;
7469 enum ice_status status = ICE_SUCCESS;
7470 struct ice_sw_recipe *rm;
7473 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7474 return ICE_ERR_PARAM;
7476 lkup_exts = (struct ice_prot_lkup_ext *)
7477 ice_malloc(hw, sizeof(*lkup_exts));
7479 return ICE_ERR_NO_MEMORY;
7481 /* Determine the number of words to be matched and if it exceeds a
7482 * recipe's restrictions
7484 for (i = 0; i < lkups_cnt; i++) {
7487 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7488 status = ICE_ERR_CFG;
7489 goto err_free_lkup_exts;
7492 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7494 status = ICE_ERR_CFG;
7495 goto err_free_lkup_exts;
7499 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7501 status = ICE_ERR_NO_MEMORY;
7502 goto err_free_lkup_exts;
7505 /* Get field vectors that contain fields extracted from all the protocol
7506 * headers being programmed.
7508 INIT_LIST_HEAD(&rm->fv_list);
7509 INIT_LIST_HEAD(&rm->rg_list);
7511 /* Get bitmap of field vectors (profiles) that are compatible with the
7512 * rule request; only these will be searched in the subsequent call to
7515 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7517 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7521 /* Create any special protocol/offset pairs, such as looking at tunnel
7522 * bits by extracting metadata
7524 status = ice_add_special_words(rinfo, lkup_exts);
7526 goto err_free_lkup_exts;
7528 /* Group match words into recipes using preferred recipe grouping
7531 status = ice_create_recipe_group(hw, rm, lkup_exts);
7535 /* set the recipe priority if specified */
7536 rm->priority = (u8)rinfo->priority;
7538 /* Find offsets from the field vector. Pick the first one for all the
7541 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7545 /* An empty FV list means to use all the profiles returned in the
7548 if (LIST_EMPTY(&rm->fv_list)) {
7551 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7552 struct ice_sw_fv_list_entry *fvl;
7554 fvl = (struct ice_sw_fv_list_entry *)
7555 ice_malloc(hw, sizeof(*fvl));
7559 fvl->profile_id = j;
7560 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7564 /* get bitmap of all profiles the recipe will be associated with */
7565 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7566 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7568 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7569 ice_set_bit((u16)fvit->profile_id, profiles);
7572 /* Look for a recipe which matches our requested fv / mask list */
7573 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7574 if (*rid < ICE_MAX_NUM_RECIPES)
7575 /* Success if found a recipe that match the existing criteria */
7578 rm->tun_type = rinfo->tun_type;
7579 /* Recipe we need does not exist, add a recipe */
7580 status = ice_add_sw_recipe(hw, rm, profiles);
7584 /* Associate all the recipes created with all the profiles in the
7585 * common field vector.
7587 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7589 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7592 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7593 (u8 *)r_bitmap, NULL);
7597 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7598 ICE_MAX_NUM_RECIPES);
7599 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7603 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7606 ice_release_change_lock(hw);
7611 /* Update profile to recipe bitmap array */
7612 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7613 ICE_MAX_NUM_RECIPES);
7615 /* Update recipe to profile bitmap array */
7616 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7617 ice_set_bit((u16)fvit->profile_id,
7618 recipe_to_profile[j]);
7621 *rid = rm->root_rid;
7622 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7623 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7625 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7626 ice_recp_grp_entry, l_entry) {
7627 LIST_DEL(&r_entry->l_entry);
7628 ice_free(hw, r_entry);
7631 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7633 LIST_DEL(&fvit->list_entry);
7638 ice_free(hw, rm->root_buf);
7643 ice_free(hw, lkup_exts);
7649 * ice_find_dummy_packet - find dummy packet by tunnel type
7651 * @lkups: lookup elements or match criteria for the advanced recipe, one
7652 * structure per protocol header
7653 * @lkups_cnt: number of protocols
7654 * @tun_type: tunnel type from the match criteria
7655 * @pkt: dummy packet to fill according to filter match criteria
7656 * @pkt_len: packet length of dummy packet
7657 * @offsets: pointer to receive the pointer to the offsets for the packet
7660 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7661 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7663 const struct ice_dummy_pkt_offsets **offsets)
7665 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7669 for (i = 0; i < lkups_cnt; i++) {
7670 if (lkups[i].type == ICE_UDP_ILOS)
7672 else if (lkups[i].type == ICE_TCP_IL)
7674 else if (lkups[i].type == ICE_IPV6_OFOS)
7676 else if (lkups[i].type == ICE_VLAN_OFOS)
7678 else if (lkups[i].type == ICE_IPV4_OFOS &&
7679 lkups[i].h_u.ipv4_hdr.protocol ==
7680 ICE_IPV4_NVGRE_PROTO_ID &&
7681 lkups[i].m_u.ipv4_hdr.protocol ==
7684 else if (lkups[i].type == ICE_PPPOE &&
7685 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7686 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7687 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7690 else if (lkups[i].type == ICE_ETYPE_OL &&
7691 lkups[i].h_u.ethertype.ethtype_id ==
7692 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7693 lkups[i].m_u.ethertype.ethtype_id ==
7696 else if (lkups[i].type == ICE_IPV4_IL &&
7697 lkups[i].h_u.ipv4_hdr.protocol ==
7699 lkups[i].m_u.ipv4_hdr.protocol ==
7704 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7705 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7706 *pkt = dummy_qinq_ipv6_pkt;
7707 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7708 *offsets = dummy_qinq_ipv6_packet_offsets;
7710 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7711 tun_type == ICE_NON_TUN_QINQ) {
7712 *pkt = dummy_qinq_ipv4_pkt;
7713 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7714 *offsets = dummy_qinq_ipv4_packet_offsets;
7718 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7719 *pkt = dummy_qinq_pppoe_ipv6_packet;
7720 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7721 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7723 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7724 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7725 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7726 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7728 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7729 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7730 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7731 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7732 *offsets = dummy_qinq_pppoe_packet_offsets;
7736 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7737 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7738 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7739 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7741 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7742 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7743 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7744 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7746 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7747 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7748 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7749 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7751 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7752 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7753 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7754 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7756 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7757 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7758 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7759 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7761 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7762 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7763 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7764 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7768 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7769 *pkt = dummy_ipv4_esp_pkt;
7770 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7771 *offsets = dummy_ipv4_esp_packet_offsets;
7775 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7776 *pkt = dummy_ipv6_esp_pkt;
7777 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7778 *offsets = dummy_ipv6_esp_packet_offsets;
7782 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7783 *pkt = dummy_ipv4_ah_pkt;
7784 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7785 *offsets = dummy_ipv4_ah_packet_offsets;
7789 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7790 *pkt = dummy_ipv6_ah_pkt;
7791 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7792 *offsets = dummy_ipv6_ah_packet_offsets;
7796 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7797 *pkt = dummy_ipv4_nat_pkt;
7798 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7799 *offsets = dummy_ipv4_nat_packet_offsets;
7803 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7804 *pkt = dummy_ipv6_nat_pkt;
7805 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7806 *offsets = dummy_ipv6_nat_packet_offsets;
7810 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7811 *pkt = dummy_ipv4_l2tpv3_pkt;
7812 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7813 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7817 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7818 *pkt = dummy_ipv6_l2tpv3_pkt;
7819 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7820 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7824 if (tun_type == ICE_SW_TUN_GTP) {
7825 *pkt = dummy_udp_gtp_packet;
7826 *pkt_len = sizeof(dummy_udp_gtp_packet);
7827 *offsets = dummy_udp_gtp_packet_offsets;
7831 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7832 *pkt = dummy_pppoe_ipv6_packet;
7833 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7834 *offsets = dummy_pppoe_packet_offsets;
7836 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7837 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7838 *pkt = dummy_pppoe_ipv4_packet;
7839 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7840 *offsets = dummy_pppoe_packet_offsets;
7844 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7845 *pkt = dummy_pppoe_ipv4_packet;
7846 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7847 *offsets = dummy_pppoe_packet_ipv4_offsets;
7851 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7852 *pkt = dummy_pppoe_ipv4_tcp_packet;
7853 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7854 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7858 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7859 *pkt = dummy_pppoe_ipv4_udp_packet;
7860 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7861 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7865 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7866 *pkt = dummy_pppoe_ipv6_packet;
7867 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7868 *offsets = dummy_pppoe_packet_ipv6_offsets;
7872 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7873 *pkt = dummy_pppoe_ipv6_tcp_packet;
7874 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7875 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7879 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7880 *pkt = dummy_pppoe_ipv6_udp_packet;
7881 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7882 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7886 if (tun_type == ICE_SW_IPV4_TCP) {
7887 *pkt = dummy_tcp_packet;
7888 *pkt_len = sizeof(dummy_tcp_packet);
7889 *offsets = dummy_tcp_packet_offsets;
7893 if (tun_type == ICE_SW_IPV4_UDP) {
7894 *pkt = dummy_udp_packet;
7895 *pkt_len = sizeof(dummy_udp_packet);
7896 *offsets = dummy_udp_packet_offsets;
7900 if (tun_type == ICE_SW_IPV6_TCP) {
7901 *pkt = dummy_tcp_ipv6_packet;
7902 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7903 *offsets = dummy_tcp_ipv6_packet_offsets;
7907 if (tun_type == ICE_SW_IPV6_UDP) {
7908 *pkt = dummy_udp_ipv6_packet;
7909 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7910 *offsets = dummy_udp_ipv6_packet_offsets;
7914 if (tun_type == ICE_ALL_TUNNELS) {
7915 *pkt = dummy_gre_udp_packet;
7916 *pkt_len = sizeof(dummy_gre_udp_packet);
7917 *offsets = dummy_gre_udp_packet_offsets;
7921 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7923 *pkt = dummy_gre_tcp_packet;
7924 *pkt_len = sizeof(dummy_gre_tcp_packet);
7925 *offsets = dummy_gre_tcp_packet_offsets;
7929 *pkt = dummy_gre_udp_packet;
7930 *pkt_len = sizeof(dummy_gre_udp_packet);
7931 *offsets = dummy_gre_udp_packet_offsets;
7935 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7936 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7937 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7938 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7940 *pkt = dummy_udp_tun_tcp_packet;
7941 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7942 *offsets = dummy_udp_tun_tcp_packet_offsets;
7946 *pkt = dummy_udp_tun_udp_packet;
7947 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7948 *offsets = dummy_udp_tun_udp_packet_offsets;
7954 *pkt = dummy_vlan_udp_packet;
7955 *pkt_len = sizeof(dummy_vlan_udp_packet);
7956 *offsets = dummy_vlan_udp_packet_offsets;
7959 *pkt = dummy_udp_packet;
7960 *pkt_len = sizeof(dummy_udp_packet);
7961 *offsets = dummy_udp_packet_offsets;
7963 } else if (udp && ipv6) {
7965 *pkt = dummy_vlan_udp_ipv6_packet;
7966 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7967 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7970 *pkt = dummy_udp_ipv6_packet;
7971 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7972 *offsets = dummy_udp_ipv6_packet_offsets;
7974 } else if ((tcp && ipv6) || ipv6) {
7976 *pkt = dummy_vlan_tcp_ipv6_packet;
7977 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7978 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7981 *pkt = dummy_tcp_ipv6_packet;
7982 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7983 *offsets = dummy_tcp_ipv6_packet_offsets;
7988 *pkt = dummy_vlan_tcp_packet;
7989 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7990 *offsets = dummy_vlan_tcp_packet_offsets;
7992 *pkt = dummy_tcp_packet;
7993 *pkt_len = sizeof(dummy_tcp_packet);
7994 *offsets = dummy_tcp_packet_offsets;
7999 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8001 * @lkups: lookup elements or match criteria for the advanced recipe, one
8002 * structure per protocol header
8003 * @lkups_cnt: number of protocols
8004 * @s_rule: stores rule information from the match criteria
8005 * @dummy_pkt: dummy packet to fill according to filter match criteria
8006 * @pkt_len: packet length of dummy packet
8007 * @offsets: offset info for the dummy packet
8009 static enum ice_status
8010 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8011 struct ice_aqc_sw_rules_elem *s_rule,
8012 const u8 *dummy_pkt, u16 pkt_len,
8013 const struct ice_dummy_pkt_offsets *offsets)
8018 /* Start with a packet with a pre-defined/dummy content. Then, fill
8019 * in the header values to be looked up or matched.
8021 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8023 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8025 for (i = 0; i < lkups_cnt; i++) {
8026 enum ice_protocol_type type;
8027 u16 offset = 0, len = 0, j;
8030 /* find the start of this layer; it should be found since this
8031 * was already checked when search for the dummy packet
8033 type = lkups[i].type;
8034 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8035 if (type == offsets[j].type) {
8036 offset = offsets[j].offset;
8041 /* this should never happen in a correct calling sequence */
8043 return ICE_ERR_PARAM;
8045 switch (lkups[i].type) {
8048 len = sizeof(struct ice_ether_hdr);
8051 len = sizeof(struct ice_ethtype_hdr);
8055 len = sizeof(struct ice_vlan_hdr);
8059 len = sizeof(struct ice_ipv4_hdr);
8063 len = sizeof(struct ice_ipv6_hdr);
8068 len = sizeof(struct ice_l4_hdr);
8071 len = sizeof(struct ice_sctp_hdr);
8074 len = sizeof(struct ice_nvgre);
8079 len = sizeof(struct ice_udp_tnl_hdr);
8083 case ICE_GTP_NO_PAY:
8084 len = sizeof(struct ice_udp_gtp_hdr);
8087 len = sizeof(struct ice_pppoe_hdr);
8090 len = sizeof(struct ice_esp_hdr);
8093 len = sizeof(struct ice_nat_t_hdr);
8096 len = sizeof(struct ice_ah_hdr);
8099 len = sizeof(struct ice_l2tpv3_sess_hdr);
8102 return ICE_ERR_PARAM;
8105 /* the length should be a word multiple */
8106 if (len % ICE_BYTES_PER_WORD)
8109 /* We have the offset to the header start, the length, the
8110 * caller's header values and mask. Use this information to
8111 * copy the data into the dummy packet appropriately based on
8112 * the mask. Note that we need to only write the bits as
8113 * indicated by the mask to make sure we don't improperly write
8114 * over any significant packet data.
8116 for (j = 0; j < len / sizeof(u16); j++)
8117 if (((u16 *)&lkups[i].m_u)[j])
8118 ((u16 *)(pkt + offset))[j] =
8119 (((u16 *)(pkt + offset))[j] &
8120 ~((u16 *)&lkups[i].m_u)[j]) |
8121 (((u16 *)&lkups[i].h_u)[j] &
8122 ((u16 *)&lkups[i].m_u)[j]);
8125 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8131 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8132 * @hw: pointer to the hardware structure
8133 * @tun_type: tunnel type
8134 * @pkt: dummy packet to fill in
8135 * @offsets: offset info for the dummy packet
8137 static enum ice_status
8138 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8139 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8144 case ICE_SW_TUN_AND_NON_TUN:
8145 case ICE_SW_TUN_VXLAN_GPE:
8146 case ICE_SW_TUN_VXLAN:
8147 case ICE_SW_TUN_VXLAN_VLAN:
8148 case ICE_SW_TUN_UDP:
8149 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8153 case ICE_SW_TUN_GENEVE:
8154 case ICE_SW_TUN_GENEVE_VLAN:
8155 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8160 /* Nothing needs to be done for this tunnel type */
8164 /* Find the outer UDP protocol header and insert the port number */
8165 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8166 if (offsets[i].type == ICE_UDP_OF) {
8167 struct ice_l4_hdr *hdr;
8170 offset = offsets[i].offset;
8171 hdr = (struct ice_l4_hdr *)&pkt[offset];
8172 hdr->dst_port = CPU_TO_BE16(open_port);
8182 * ice_find_adv_rule_entry - Search a rule entry
8183 * @hw: pointer to the hardware structure
8184 * @lkups: lookup elements or match criteria for the advanced recipe, one
8185 * structure per protocol header
8186 * @lkups_cnt: number of protocols
8187 * @recp_id: recipe ID for which we are finding the rule
8188 * @rinfo: other information regarding the rule e.g. priority and action info
8190 * Helper function to search for a given advance rule entry
8191 * Returns pointer to entry storing the rule if found
8193 static struct ice_adv_fltr_mgmt_list_entry *
8194 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8195 u16 lkups_cnt, u16 recp_id,
8196 struct ice_adv_rule_info *rinfo)
8198 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8199 struct ice_switch_info *sw = hw->switch_info;
8202 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8203 ice_adv_fltr_mgmt_list_entry, list_entry) {
8204 bool lkups_matched = true;
8206 if (lkups_cnt != list_itr->lkups_cnt)
8208 for (i = 0; i < list_itr->lkups_cnt; i++)
8209 if (memcmp(&list_itr->lkups[i], &lkups[i],
8211 lkups_matched = false;
8214 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8215 rinfo->tun_type == list_itr->rule_info.tun_type &&
8223 * ice_adv_add_update_vsi_list
8224 * @hw: pointer to the hardware structure
8225 * @m_entry: pointer to current adv filter management list entry
8226 * @cur_fltr: filter information from the book keeping entry
8227 * @new_fltr: filter information with the new VSI to be added
8229 * Call AQ command to add or update previously created VSI list with new VSI.
8231 * Helper function to do book keeping associated with adding filter information
8232 * The algorithm to do the booking keeping is described below :
8233 * When a VSI needs to subscribe to a given advanced filter
8234 * if only one VSI has been added till now
8235 * Allocate a new VSI list and add two VSIs
8236 * to this list using switch rule command
8237 * Update the previously created switch rule with the
8238 * newly created VSI list ID
8239 * if a VSI list was previously created
8240 * Add the new VSI to the previously created VSI list set
8241 * using the update switch rule command
8243 static enum ice_status
8244 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8245 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8246 struct ice_adv_rule_info *cur_fltr,
8247 struct ice_adv_rule_info *new_fltr)
8249 enum ice_status status;
8250 u16 vsi_list_id = 0;
8252 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8253 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8254 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8255 return ICE_ERR_NOT_IMPL;
8257 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8258 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8259 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8260 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8261 return ICE_ERR_NOT_IMPL;
8263 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8264 /* Only one entry existed in the mapping and it was not already
8265 * a part of a VSI list. So, create a VSI list with the old and
8268 struct ice_fltr_info tmp_fltr;
8269 u16 vsi_handle_arr[2];
8271 /* A rule already exists with the new VSI being added */
8272 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8273 new_fltr->sw_act.fwd_id.hw_vsi_id)
8274 return ICE_ERR_ALREADY_EXISTS;
8276 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8277 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8278 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8284 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8285 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8286 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8287 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8288 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8289 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8291 /* Update the previous switch rule of "forward to VSI" to
8294 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8298 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8299 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8300 m_entry->vsi_list_info =
8301 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8304 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8306 if (!m_entry->vsi_list_info)
8309 /* A rule already exists with the new VSI being added */
8310 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8313 /* Update the previously created VSI list set with
8314 * the new VSI ID passed in
8316 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8318 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8320 ice_aqc_opc_update_sw_rules,
8322 /* update VSI list mapping info with new VSI ID */
8324 ice_set_bit(vsi_handle,
8325 m_entry->vsi_list_info->vsi_map);
8328 m_entry->vsi_count++;
8333 * ice_add_adv_rule - helper function to create an advanced switch rule
8334 * @hw: pointer to the hardware structure
8335 * @lkups: information on the words that needs to be looked up. All words
8336 * together makes one recipe
8337 * @lkups_cnt: num of entries in the lkups array
8338 * @rinfo: other information related to the rule that needs to be programmed
8339 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8340 * ignored is case of error.
8342 * This function can program only 1 rule at a time. The lkups is used to
8343 * describe the all the words that forms the "lookup" portion of the recipe.
8344 * These words can span multiple protocols. Callers to this function need to
8345 * pass in a list of protocol headers with lookup information along and mask
8346 * that determines which words are valid from the given protocol header.
8347 * rinfo describes other information related to this rule such as forwarding
8348 * IDs, priority of this rule, etc.
8351 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8352 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8353 struct ice_rule_query_data *added_entry)
8355 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8356 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8357 const struct ice_dummy_pkt_offsets *pkt_offsets;
8358 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8359 struct LIST_HEAD_TYPE *rule_head;
8360 struct ice_switch_info *sw;
8361 enum ice_status status;
8362 const u8 *pkt = NULL;
8368 /* Initialize profile to result index bitmap */
8369 if (!hw->switch_info->prof_res_bm_init) {
8370 hw->switch_info->prof_res_bm_init = 1;
8371 ice_init_prof_result_bm(hw);
8374 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8375 if (!prof_rule && !lkups_cnt)
8376 return ICE_ERR_PARAM;
8378 /* get # of words we need to match */
8380 for (i = 0; i < lkups_cnt; i++) {
8383 ptr = (u16 *)&lkups[i].m_u;
8384 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8390 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8391 return ICE_ERR_PARAM;
8393 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8394 return ICE_ERR_PARAM;
8397 /* make sure that we can locate a dummy packet */
8398 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8401 status = ICE_ERR_PARAM;
8402 goto err_ice_add_adv_rule;
8405 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8406 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8407 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8408 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8411 vsi_handle = rinfo->sw_act.vsi_handle;
8412 if (!ice_is_vsi_valid(hw, vsi_handle))
8413 return ICE_ERR_PARAM;
8415 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8416 rinfo->sw_act.fwd_id.hw_vsi_id =
8417 ice_get_hw_vsi_num(hw, vsi_handle);
8418 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8419 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8421 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8424 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8426 /* we have to add VSI to VSI_LIST and increment vsi_count.
8427 * Also Update VSI list so that we can change forwarding rule
8428 * if the rule already exists, we will check if it exists with
8429 * same vsi_id, if not then add it to the VSI list if it already
8430 * exists if not then create a VSI list and add the existing VSI
8431 * ID and the new VSI ID to the list
8432 * We will add that VSI to the list
8434 status = ice_adv_add_update_vsi_list(hw, m_entry,
8435 &m_entry->rule_info,
8438 added_entry->rid = rid;
8439 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8440 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8444 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8445 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8447 return ICE_ERR_NO_MEMORY;
8448 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8449 switch (rinfo->sw_act.fltr_act) {
8450 case ICE_FWD_TO_VSI:
8451 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8452 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8453 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8456 act |= ICE_SINGLE_ACT_TO_Q;
8457 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8458 ICE_SINGLE_ACT_Q_INDEX_M;
8460 case ICE_FWD_TO_QGRP:
8461 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8462 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8463 act |= ICE_SINGLE_ACT_TO_Q;
8464 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8465 ICE_SINGLE_ACT_Q_INDEX_M;
8466 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8467 ICE_SINGLE_ACT_Q_REGION_M;
8469 case ICE_DROP_PACKET:
8470 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8471 ICE_SINGLE_ACT_VALID_BIT;
8474 status = ICE_ERR_CFG;
8475 goto err_ice_add_adv_rule;
8478 /* set the rule LOOKUP type based on caller specified 'RX'
8479 * instead of hardcoding it to be either LOOKUP_TX/RX
8481 * for 'RX' set the source to be the port number
8482 * for 'TX' set the source to be the source HW VSI number (determined
8486 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8487 s_rule->pdata.lkup_tx_rx.src =
8488 CPU_TO_LE16(hw->port_info->lport);
8490 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8491 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8494 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8495 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8497 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8498 pkt_len, pkt_offsets);
8500 goto err_ice_add_adv_rule;
8502 if (rinfo->tun_type != ICE_NON_TUN &&
8503 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8504 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8505 s_rule->pdata.lkup_tx_rx.hdr,
8508 goto err_ice_add_adv_rule;
8511 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8512 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8515 goto err_ice_add_adv_rule;
8516 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8517 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8519 status = ICE_ERR_NO_MEMORY;
8520 goto err_ice_add_adv_rule;
8523 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8524 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8525 ICE_NONDMA_TO_NONDMA);
8526 if (!adv_fltr->lkups && !prof_rule) {
8527 status = ICE_ERR_NO_MEMORY;
8528 goto err_ice_add_adv_rule;
8531 adv_fltr->lkups_cnt = lkups_cnt;
8532 adv_fltr->rule_info = *rinfo;
8533 adv_fltr->rule_info.fltr_rule_id =
8534 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8535 sw = hw->switch_info;
8536 sw->recp_list[rid].adv_rule = true;
8537 rule_head = &sw->recp_list[rid].filt_rules;
8539 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8540 adv_fltr->vsi_count = 1;
8542 /* Add rule entry to book keeping list */
8543 LIST_ADD(&adv_fltr->list_entry, rule_head);
8545 added_entry->rid = rid;
8546 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8547 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8549 err_ice_add_adv_rule:
8550 if (status && adv_fltr) {
8551 ice_free(hw, adv_fltr->lkups);
8552 ice_free(hw, adv_fltr);
8555 ice_free(hw, s_rule);
8561 * ice_adv_rem_update_vsi_list
8562 * @hw: pointer to the hardware structure
8563 * @vsi_handle: VSI handle of the VSI to remove
8564 * @fm_list: filter management entry for which the VSI list management needs to
8567 static enum ice_status
8568 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8569 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8571 struct ice_vsi_list_map_info *vsi_list_info;
8572 enum ice_sw_lkup_type lkup_type;
8573 enum ice_status status;
8576 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8577 fm_list->vsi_count == 0)
8578 return ICE_ERR_PARAM;
8580 /* A rule with the VSI being removed does not exist */
8581 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8582 return ICE_ERR_DOES_NOT_EXIST;
8584 lkup_type = ICE_SW_LKUP_LAST;
8585 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8586 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8587 ice_aqc_opc_update_sw_rules,
8592 fm_list->vsi_count--;
8593 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8594 vsi_list_info = fm_list->vsi_list_info;
8595 if (fm_list->vsi_count == 1) {
8596 struct ice_fltr_info tmp_fltr;
8599 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8601 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8602 return ICE_ERR_OUT_OF_RANGE;
8604 /* Make sure VSI list is empty before removing it below */
8605 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8607 ice_aqc_opc_update_sw_rules,
8612 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8613 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8614 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8615 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8616 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8617 tmp_fltr.fwd_id.hw_vsi_id =
8618 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8619 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8620 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8621 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8623 /* Update the previous switch rule of "MAC forward to VSI" to
8624 * "MAC fwd to VSI list"
8626 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8628 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8629 tmp_fltr.fwd_id.hw_vsi_id, status);
8632 fm_list->vsi_list_info->ref_cnt--;
8634 /* Remove the VSI list since it is no longer used */
8635 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8637 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8638 vsi_list_id, status);
8642 LIST_DEL(&vsi_list_info->list_entry);
8643 ice_free(hw, vsi_list_info);
8644 fm_list->vsi_list_info = NULL;
8651 * ice_rem_adv_rule - removes existing advanced switch rule
8652 * @hw: pointer to the hardware structure
8653 * @lkups: information on the words that needs to be looked up. All words
8654 * together makes one recipe
8655 * @lkups_cnt: num of entries in the lkups array
8656 * @rinfo: Its the pointer to the rule information for the rule
8658 * This function can be used to remove 1 rule at a time. The lkups is
8659 * used to describe all the words that forms the "lookup" portion of the
8660 * rule. These words can span multiple protocols. Callers to this function
8661 * need to pass in a list of protocol headers with lookup information along
8662 * and mask that determines which words are valid from the given protocol
8663 * header. rinfo describes other information related to this rule such as
8664 * forwarding IDs, priority of this rule, etc.
8667 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8668 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8670 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8671 struct ice_prot_lkup_ext lkup_exts;
8672 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8673 enum ice_status status = ICE_SUCCESS;
8674 bool remove_rule = false;
8675 u16 i, rid, vsi_handle;
8677 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8678 for (i = 0; i < lkups_cnt; i++) {
8681 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8684 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8689 /* Create any special protocol/offset pairs, such as looking at tunnel
8690 * bits by extracting metadata
8692 status = ice_add_special_words(rinfo, &lkup_exts);
8696 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8697 /* If did not find a recipe that match the existing criteria */
8698 if (rid == ICE_MAX_NUM_RECIPES)
8699 return ICE_ERR_PARAM;
8701 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8702 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8703 /* the rule is already removed */
8706 ice_acquire_lock(rule_lock);
8707 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8709 } else if (list_elem->vsi_count > 1) {
8710 remove_rule = false;
8711 vsi_handle = rinfo->sw_act.vsi_handle;
8712 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8714 vsi_handle = rinfo->sw_act.vsi_handle;
8715 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8717 ice_release_lock(rule_lock);
8720 if (list_elem->vsi_count == 0)
8723 ice_release_lock(rule_lock);
8725 struct ice_aqc_sw_rules_elem *s_rule;
8728 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8729 s_rule = (struct ice_aqc_sw_rules_elem *)
8730 ice_malloc(hw, rule_buf_sz);
8732 return ICE_ERR_NO_MEMORY;
8733 s_rule->pdata.lkup_tx_rx.act = 0;
8734 s_rule->pdata.lkup_tx_rx.index =
8735 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8736 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8737 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8739 ice_aqc_opc_remove_sw_rules, NULL);
8740 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8741 struct ice_switch_info *sw = hw->switch_info;
8743 ice_acquire_lock(rule_lock);
8744 LIST_DEL(&list_elem->list_entry);
8745 ice_free(hw, list_elem->lkups);
8746 ice_free(hw, list_elem);
8747 ice_release_lock(rule_lock);
8748 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8749 sw->recp_list[rid].adv_rule = false;
8751 ice_free(hw, s_rule);
8757 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8758 * @hw: pointer to the hardware structure
8759 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8761 * This function is used to remove 1 rule at a time. The removal is based on
8762 * the remove_entry parameter. This function will remove rule for a given
8763 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8766 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8767 struct ice_rule_query_data *remove_entry)
8769 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8770 struct LIST_HEAD_TYPE *list_head;
8771 struct ice_adv_rule_info rinfo;
8772 struct ice_switch_info *sw;
8774 sw = hw->switch_info;
8775 if (!sw->recp_list[remove_entry->rid].recp_created)
8776 return ICE_ERR_PARAM;
8777 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8778 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8780 if (list_itr->rule_info.fltr_rule_id ==
8781 remove_entry->rule_id) {
8782 rinfo = list_itr->rule_info;
8783 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8784 return ice_rem_adv_rule(hw, list_itr->lkups,
8785 list_itr->lkups_cnt, &rinfo);
8788 /* either list is empty or unable to find rule */
8789 return ICE_ERR_DOES_NOT_EXIST;
8793 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8795 * @hw: pointer to the hardware structure
8796 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8798 * This function is used to remove all the rules for a given VSI and as soon
8799 * as removing a rule fails, it will return immediately with the error code,
8800 * else it will return ICE_SUCCESS
8802 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8804 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8805 struct ice_vsi_list_map_info *map_info;
8806 struct LIST_HEAD_TYPE *list_head;
8807 struct ice_adv_rule_info rinfo;
8808 struct ice_switch_info *sw;
8809 enum ice_status status;
8812 sw = hw->switch_info;
8813 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8814 if (!sw->recp_list[rid].recp_created)
8816 if (!sw->recp_list[rid].adv_rule)
8819 list_head = &sw->recp_list[rid].filt_rules;
8820 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8821 ice_adv_fltr_mgmt_list_entry,
8823 rinfo = list_itr->rule_info;
8825 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8826 map_info = list_itr->vsi_list_info;
8830 if (!ice_is_bit_set(map_info->vsi_map,
8833 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8837 rinfo.sw_act.vsi_handle = vsi_handle;
8838 status = ice_rem_adv_rule(hw, list_itr->lkups,
8839 list_itr->lkups_cnt, &rinfo);
8849 * ice_replay_fltr - Replay all the filters stored by a specific list head
8850 * @hw: pointer to the hardware structure
8851 * @list_head: list for which filters needs to be replayed
8852 * @recp_id: Recipe ID for which rules need to be replayed
8854 static enum ice_status
8855 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8857 struct ice_fltr_mgmt_list_entry *itr;
8858 enum ice_status status = ICE_SUCCESS;
8859 struct ice_sw_recipe *recp_list;
8860 u8 lport = hw->port_info->lport;
8861 struct LIST_HEAD_TYPE l_head;
8863 if (LIST_EMPTY(list_head))
8866 recp_list = &hw->switch_info->recp_list[recp_id];
8867 /* Move entries from the given list_head to a temporary l_head so that
8868 * they can be replayed. Otherwise when trying to re-add the same
8869 * filter, the function will return already exists
8871 LIST_REPLACE_INIT(list_head, &l_head);
8873 /* Mark the given list_head empty by reinitializing it so filters
8874 * could be added again by *handler
8876 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8878 struct ice_fltr_list_entry f_entry;
8881 f_entry.fltr_info = itr->fltr_info;
8882 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8883 status = ice_add_rule_internal(hw, recp_list, lport,
8885 if (status != ICE_SUCCESS)
8890 /* Add a filter per VSI separately */
8891 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8893 if (!ice_is_vsi_valid(hw, vsi_handle))
8896 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8897 f_entry.fltr_info.vsi_handle = vsi_handle;
8898 f_entry.fltr_info.fwd_id.hw_vsi_id =
8899 ice_get_hw_vsi_num(hw, vsi_handle);
8900 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8901 if (recp_id == ICE_SW_LKUP_VLAN)
8902 status = ice_add_vlan_internal(hw, recp_list,
8905 status = ice_add_rule_internal(hw, recp_list,
8908 if (status != ICE_SUCCESS)
8913 /* Clear the filter management list */
8914 ice_rem_sw_rule_info(hw, &l_head);
8919 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8920 * @hw: pointer to the hardware structure
8922 * NOTE: This function does not clean up partially added filters on error.
8923 * It is up to caller of the function to issue a reset or fail early.
8925 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8927 struct ice_switch_info *sw = hw->switch_info;
8928 enum ice_status status = ICE_SUCCESS;
8931 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8932 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8934 status = ice_replay_fltr(hw, i, head);
8935 if (status != ICE_SUCCESS)
8942 * ice_replay_vsi_fltr - Replay filters for requested VSI
8943 * @hw: pointer to the hardware structure
8944 * @pi: pointer to port information structure
8945 * @sw: pointer to switch info struct for which function replays filters
8946 * @vsi_handle: driver VSI handle
8947 * @recp_id: Recipe ID for which rules need to be replayed
8948 * @list_head: list for which filters need to be replayed
8950 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8951 * It is required to pass valid VSI handle.
8953 static enum ice_status
8954 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8955 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8956 struct LIST_HEAD_TYPE *list_head)
8958 struct ice_fltr_mgmt_list_entry *itr;
8959 enum ice_status status = ICE_SUCCESS;
8960 struct ice_sw_recipe *recp_list;
8963 if (LIST_EMPTY(list_head))
8965 recp_list = &sw->recp_list[recp_id];
8966 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8968 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8970 struct ice_fltr_list_entry f_entry;
8972 f_entry.fltr_info = itr->fltr_info;
8973 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8974 itr->fltr_info.vsi_handle == vsi_handle) {
8975 /* update the src in case it is VSI num */
8976 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8977 f_entry.fltr_info.src = hw_vsi_id;
8978 status = ice_add_rule_internal(hw, recp_list,
8981 if (status != ICE_SUCCESS)
8985 if (!itr->vsi_list_info ||
8986 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8988 /* Clearing it so that the logic can add it back */
8989 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8990 f_entry.fltr_info.vsi_handle = vsi_handle;
8991 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8992 /* update the src in case it is VSI num */
8993 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8994 f_entry.fltr_info.src = hw_vsi_id;
8995 if (recp_id == ICE_SW_LKUP_VLAN)
8996 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8998 status = ice_add_rule_internal(hw, recp_list,
9001 if (status != ICE_SUCCESS)
9009 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9010 * @hw: pointer to the hardware structure
9011 * @vsi_handle: driver VSI handle
9012 * @list_head: list for which filters need to be replayed
9014 * Replay the advanced rule for the given VSI.
9016 static enum ice_status
9017 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9018 struct LIST_HEAD_TYPE *list_head)
9020 struct ice_rule_query_data added_entry = { 0 };
9021 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9022 enum ice_status status = ICE_SUCCESS;
9024 if (LIST_EMPTY(list_head))
9026 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9028 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9029 u16 lk_cnt = adv_fltr->lkups_cnt;
9031 if (vsi_handle != rinfo->sw_act.vsi_handle)
9033 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9042 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9043 * @hw: pointer to the hardware structure
9044 * @pi: pointer to port information structure
9045 * @vsi_handle: driver VSI handle
9047 * Replays filters for requested VSI via vsi_handle.
9050 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9053 struct ice_switch_info *sw = hw->switch_info;
9054 enum ice_status status;
9057 /* Update the recipes that were created */
9058 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9059 struct LIST_HEAD_TYPE *head;
9061 head = &sw->recp_list[i].filt_replay_rules;
9062 if (!sw->recp_list[i].adv_rule)
9063 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9066 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9067 if (status != ICE_SUCCESS)
9075 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
9076 * @hw: pointer to the HW struct
9077 * @sw: pointer to switch info struct for which function removes filters
9079 * Deletes the filter replay rules for given switch
9081 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9088 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9089 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9090 struct LIST_HEAD_TYPE *l_head;
9092 l_head = &sw->recp_list[i].filt_replay_rules;
9093 if (!sw->recp_list[i].adv_rule)
9094 ice_rem_sw_rule_info(hw, l_head);
9096 ice_rem_adv_rule_info(hw, l_head);
9102 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9103 * @hw: pointer to the HW struct
9105 * Deletes the filter replay rules.
9107 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9109 ice_rm_sw_replay_rule_info(hw, hw->switch_info);