1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
509 { ICE_IPV4_OFOS, 14 },
512 { ICE_PROTOCOL_LAST, 0 },
515 static const u8 dummy_udp_gtp_packet[] = {
516 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
521 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x11, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 0x00, 0x1c, 0x00, 0x00,
530 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x00, 0x85,
534 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 0x00, 0x00, 0x00, 0x00,
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
541 { ICE_IPV4_OFOS, 14 },
545 { ICE_PROTOCOL_LAST, 0 },
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550 0x00, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
554 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555 0x00, 0x00, 0x40, 0x00,
556 0x40, 0x11, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561 0x00, 0x00, 0x00, 0x00,
563 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
564 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x85,
567 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568 0x00, 0x00, 0x00, 0x00,
570 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571 0x00, 0x00, 0x40, 0x00,
572 0x40, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, 0x00, 0x00,
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
581 { ICE_IPV4_OFOS, 14 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
594 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595 0x00, 0x00, 0x40, 0x00,
596 0x40, 0x11, 0x00, 0x00,
597 0x00, 0x00, 0x00, 0x00,
598 0x00, 0x00, 0x00, 0x00,
600 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601 0x00, 0x00, 0x00, 0x00,
603 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x85,
607 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608 0x00, 0x00, 0x00, 0x00,
610 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611 0x00, 0x00, 0x3b, 0x00,
612 0x00, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
627 { ICE_IPV6_OFOS, 14 },
631 { ICE_PROTOCOL_LAST, 0 },
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
640 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652 0x00, 0x00, 0x00, 0x00,
654 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x85,
658 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659 0x00, 0x00, 0x00, 0x00,
661 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662 0x00, 0x00, 0x40, 0x00,
663 0x40, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00,
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
673 { ICE_IPV6_OFOS, 14 },
677 { ICE_PROTOCOL_LAST, 0 },
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698 0x00, 0x00, 0x00, 0x00,
700 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x85,
704 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705 0x00, 0x00, 0x00, 0x00,
707 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708 0x00, 0x00, 0x3b, 0x00,
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
713 0x00, 0x00, 0x00, 0x00,
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
724 { ICE_IPV4_OFOS, 14 },
726 { ICE_GTP_NO_PAY, 42 },
727 { ICE_PROTOCOL_LAST, 0 },
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
733 { ICE_IPV6_OFOS, 14 },
735 { ICE_GTP_NO_PAY, 62 },
736 { ICE_PROTOCOL_LAST, 0 },
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
741 { ICE_ETYPE_OL, 12 },
742 { ICE_VLAN_OFOS, 14},
744 { ICE_PROTOCOL_LAST, 0 },
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
749 { ICE_ETYPE_OL, 12 },
750 { ICE_VLAN_OFOS, 14},
752 { ICE_IPV4_OFOS, 26 },
753 { ICE_PROTOCOL_LAST, 0 },
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758 0x00, 0x00, 0x00, 0x00,
759 0x00, 0x00, 0x00, 0x00,
761 0x81, 0x00, /* ICE_ETYPE_OL 12 */
763 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
765 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
768 0x00, 0x21, /* PPP Link Layer 24 */
770 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
782 { ICE_ETYPE_OL, 12 },
783 { ICE_VLAN_OFOS, 14},
785 { ICE_IPV4_OFOS, 26 },
787 { ICE_PROTOCOL_LAST, 0 },
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x00, 0x00, 0x00,
795 0x81, 0x00, /* ICE_ETYPE_OL 12 */
797 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
799 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
802 0x00, 0x21, /* PPP Link Layer 24 */
804 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805 0x00, 0x01, 0x00, 0x00,
806 0x00, 0x06, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
813 0x50, 0x00, 0x00, 0x00,
814 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
822 { ICE_ETYPE_OL, 12 },
823 { ICE_VLAN_OFOS, 14},
825 { ICE_IPV4_OFOS, 26 },
826 { ICE_UDP_ILOS, 46 },
827 { ICE_PROTOCOL_LAST, 0 },
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832 0x00, 0x00, 0x00, 0x00,
833 0x00, 0x00, 0x00, 0x00,
835 0x81, 0x00, /* ICE_ETYPE_OL 12 */
837 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
839 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
842 0x00, 0x21, /* PPP Link Layer 24 */
844 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845 0x00, 0x01, 0x00, 0x00,
846 0x00, 0x11, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851 0x00, 0x08, 0x00, 0x00,
853 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
858 { ICE_ETYPE_OL, 12 },
859 { ICE_VLAN_OFOS, 14},
861 { ICE_IPV6_OFOS, 26 },
862 { ICE_PROTOCOL_LAST, 0 },
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x00, 0x00, 0x00,
870 0x81, 0x00, /* ICE_ETYPE_OL 12 */
872 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
874 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
877 0x00, 0x57, /* PPP Link Layer 24 */
879 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880 0x00, 0x00, 0x3b, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x00, 0x00,
890 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
896 { ICE_ETYPE_OL, 12 },
897 { ICE_VLAN_OFOS, 14},
899 { ICE_IPV6_OFOS, 26 },
901 { ICE_PROTOCOL_LAST, 0 },
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x81, 0x00, /* ICE_ETYPE_OL 12 */
911 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
913 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
916 0x00, 0x57, /* PPP Link Layer 24 */
918 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920 0x00, 0x00, 0x00, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
929 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x50, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
941 { ICE_ETYPE_OL, 12 },
942 { ICE_VLAN_OFOS, 14},
944 { ICE_IPV6_OFOS, 26 },
945 { ICE_UDP_ILOS, 66 },
946 { ICE_PROTOCOL_LAST, 0 },
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
954 0x81, 0x00, /* ICE_ETYPE_OL 12 */
956 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
958 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
961 0x00, 0x57, /* PPP Link Layer 24 */
963 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975 0x00, 0x08, 0x00, 0x00,
977 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
982 { ICE_IPV4_OFOS, 14 },
984 { ICE_PROTOCOL_LAST, 0 },
987 static const u8 dummy_ipv4_esp_pkt[] = {
988 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
993 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994 0x00, 0x00, 0x40, 0x00,
995 0x40, 0x32, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
997 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005 { ICE_MAC_OFOS, 0 },
1006 { ICE_IPV6_OFOS, 14 },
1008 { ICE_PROTOCOL_LAST, 0 },
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x00, 0x00,
1017 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034 { ICE_MAC_OFOS, 0 },
1035 { ICE_IPV4_OFOS, 14 },
1037 { ICE_PROTOCOL_LAST, 0 },
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00,
1046 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047 0x00, 0x00, 0x40, 0x00,
1048 0x40, 0x33, 0x00, 0x00,
1049 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059 { ICE_MAC_OFOS, 0 },
1060 { ICE_IPV6_OFOS, 14 },
1062 { ICE_PROTOCOL_LAST, 0 },
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1071 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073 0x00, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1076 0x00, 0x00, 0x00, 0x00,
1077 0x00, 0x00, 0x00, 0x00,
1078 0x00, 0x00, 0x00, 0x00,
1079 0x00, 0x00, 0x00, 0x00,
1080 0x00, 0x00, 0x00, 0x00,
1082 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083 0x00, 0x00, 0x00, 0x00,
1084 0x00, 0x00, 0x00, 0x00,
1085 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089 { ICE_MAC_OFOS, 0 },
1090 { ICE_IPV4_OFOS, 14 },
1091 { ICE_UDP_ILOS, 34 },
1093 { ICE_PROTOCOL_LAST, 0 },
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1102 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103 0x00, 0x00, 0x40, 0x00,
1104 0x40, 0x11, 0x00, 0x00,
1105 0x00, 0x00, 0x00, 0x00,
1106 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109 0x00, 0x00, 0x00, 0x00,
1111 0x00, 0x00, 0x00, 0x00,
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117 { ICE_MAC_OFOS, 0 },
1118 { ICE_IPV6_OFOS, 14 },
1119 { ICE_UDP_ILOS, 54 },
1121 { ICE_PROTOCOL_LAST, 0 },
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126 0x00, 0x00, 0x00, 0x00,
1127 0x00, 0x00, 0x00, 0x00,
1130 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132 0x00, 0x00, 0x00, 0x00,
1133 0x00, 0x00, 0x00, 0x00,
1134 0x00, 0x00, 0x00, 0x00,
1135 0x00, 0x00, 0x00, 0x00,
1136 0x00, 0x00, 0x00, 0x00,
1137 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1139 0x00, 0x00, 0x00, 0x00,
1141 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142 0x00, 0x00, 0x00, 0x00,
1144 0x00, 0x00, 0x00, 0x00,
1145 0x00, 0x00, 0x00, 0x00,
1146 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_IPV4_OFOS, 14 },
1154 { ICE_PROTOCOL_LAST, 0 },
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159 0x00, 0x00, 0x00, 0x00,
1160 0x00, 0x00, 0x00, 0x00,
1163 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164 0x00, 0x00, 0x40, 0x00,
1165 0x40, 0x73, 0x00, 0x00,
1166 0x00, 0x00, 0x00, 0x00,
1167 0x00, 0x00, 0x00, 0x00,
1169 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170 0x00, 0x00, 0x00, 0x00,
1171 0x00, 0x00, 0x00, 0x00,
1172 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176 { ICE_MAC_OFOS, 0 },
1177 { ICE_IPV6_OFOS, 14 },
1179 { ICE_PROTOCOL_LAST, 0 },
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184 0x00, 0x00, 0x00, 0x00,
1185 0x00, 0x00, 0x00, 0x00,
1188 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189 0x00, 0x0c, 0x73, 0x40,
1190 0x00, 0x00, 0x00, 0x00,
1191 0x00, 0x00, 0x00, 0x00,
1192 0x00, 0x00, 0x00, 0x00,
1193 0x00, 0x00, 0x00, 0x00,
1194 0x00, 0x00, 0x00, 0x00,
1195 0x00, 0x00, 0x00, 0x00,
1196 0x00, 0x00, 0x00, 0x00,
1197 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200 0x00, 0x00, 0x00, 0x00,
1201 0x00, 0x00, 0x00, 0x00,
1202 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1205 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1206 { ICE_MAC_OFOS, 0 },
1207 { ICE_VLAN_EX, 14 },
1208 { ICE_VLAN_OFOS, 18 },
1209 { ICE_IPV4_OFOS, 22 },
1210 { ICE_PROTOCOL_LAST, 0 },
1213 static const u8 dummy_qinq_ipv4_pkt[] = {
1214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1215 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00,
1219 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1220 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1222 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1223 0x00, 0x01, 0x00, 0x00,
1224 0x00, 0x11, 0x00, 0x00,
1225 0x00, 0x00, 0x00, 0x00,
1226 0x00, 0x00, 0x00, 0x00,
1228 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1229 0x00, 0x08, 0x00, 0x00,
1231 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1234 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1235 { ICE_MAC_OFOS, 0 },
1236 { ICE_VLAN_EX, 14 },
1237 { ICE_VLAN_OFOS, 18 },
1238 { ICE_IPV6_OFOS, 22 },
1239 { ICE_PROTOCOL_LAST, 0 },
1242 static const u8 dummy_qinq_ipv6_pkt[] = {
1243 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1244 0x00, 0x00, 0x00, 0x00,
1245 0x00, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1249 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1251 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1252 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1253 0x00, 0x00, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1255 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00,
1257 0x00, 0x00, 0x00, 0x00,
1258 0x00, 0x00, 0x00, 0x00,
1259 0x00, 0x00, 0x00, 0x00,
1260 0x00, 0x00, 0x00, 0x00,
1262 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1263 0x00, 0x10, 0x00, 0x00,
1265 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1266 0x00, 0x00, 0x00, 0x00,
1268 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1271 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1272 { ICE_MAC_OFOS, 0 },
1273 { ICE_VLAN_EX, 14 },
1274 { ICE_VLAN_OFOS, 18 },
1276 { ICE_PROTOCOL_LAST, 0 },
1280 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1281 { ICE_MAC_OFOS, 0 },
1282 { ICE_VLAN_EX, 14 },
1283 { ICE_VLAN_OFOS, 18 },
1285 { ICE_IPV4_OFOS, 30 },
1286 { ICE_PROTOCOL_LAST, 0 },
1289 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1290 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1295 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1296 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1298 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1301 0x00, 0x21, /* PPP Link Layer 28 */
1303 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1304 0x00, 0x00, 0x00, 0x00,
1305 0x00, 0x00, 0x00, 0x00,
1306 0x00, 0x00, 0x00, 0x00,
1307 0x00, 0x00, 0x00, 0x00,
1309 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1313 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1314 { ICE_MAC_OFOS, 0 },
1315 { ICE_ETYPE_OL, 12 },
1317 { ICE_VLAN_OFOS, 18 },
1319 { ICE_IPV6_OFOS, 30 },
1320 { ICE_PROTOCOL_LAST, 0 },
1323 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1324 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1325 0x00, 0x00, 0x00, 0x00,
1326 0x00, 0x00, 0x00, 0x00,
1328 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1330 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1331 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1333 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1336 0x00, 0x57, /* PPP Link Layer 28*/
1338 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1339 0x00, 0x00, 0x3b, 0x00,
1340 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, 0x00, 0x00,
1343 0x00, 0x00, 0x00, 0x00,
1344 0x00, 0x00, 0x00, 0x00,
1345 0x00, 0x00, 0x00, 0x00,
1346 0x00, 0x00, 0x00, 0x00,
1347 0x00, 0x00, 0x00, 0x00,
1349 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1352 /* this is a recipe to profile association bitmap */
1353 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1354 ICE_MAX_NUM_PROFILES);
1356 /* this is a profile to recipe association bitmap */
1357 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1358 ICE_MAX_NUM_RECIPES);
1360 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1363 * ice_collect_result_idx - copy result index values
1364 * @buf: buffer that contains the result index
1365 * @recp: the recipe struct to copy data into
1367 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1368 struct ice_sw_recipe *recp)
1370 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1371 ice_set_bit(buf->content.result_indx &
1372 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1376 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1377 * @rid: recipe ID that we are populating
1379 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1381 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1382 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1383 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1384 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1385 enum ice_sw_tunnel_type tun_type;
1386 u16 i, j, profile_num = 0;
1387 bool non_tun_valid = false;
1388 bool pppoe_valid = false;
1389 bool vxlan_valid = false;
1390 bool gre_valid = false;
1391 bool gtp_valid = false;
1392 bool flag_valid = false;
1394 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1395 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1400 for (i = 0; i < 12; i++) {
1401 if (gre_profile[i] == j)
1405 for (i = 0; i < 12; i++) {
1406 if (vxlan_profile[i] == j)
1410 for (i = 0; i < 7; i++) {
1411 if (pppoe_profile[i] == j)
1415 for (i = 0; i < 6; i++) {
1416 if (non_tun_profile[i] == j)
1417 non_tun_valid = true;
1420 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1421 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1424 if ((j >= ICE_PROFID_IPV4_ESP &&
1425 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1426 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1427 j <= ICE_PROFID_IPV6_GTPU_TEID))
1431 if (!non_tun_valid && vxlan_valid)
1432 tun_type = ICE_SW_TUN_VXLAN;
1433 else if (!non_tun_valid && gre_valid)
1434 tun_type = ICE_SW_TUN_NVGRE;
1435 else if (!non_tun_valid && pppoe_valid)
1436 tun_type = ICE_SW_TUN_PPPOE;
1437 else if (!non_tun_valid && gtp_valid)
1438 tun_type = ICE_SW_TUN_GTP;
1439 else if (non_tun_valid &&
1440 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1441 tun_type = ICE_SW_TUN_AND_NON_TUN;
1442 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1444 tun_type = ICE_NON_TUN;
1446 tun_type = ICE_NON_TUN;
1448 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1449 i = ice_is_bit_set(recipe_to_profile[rid],
1450 ICE_PROFID_PPPOE_IPV4_OTHER);
1451 j = ice_is_bit_set(recipe_to_profile[rid],
1452 ICE_PROFID_PPPOE_IPV6_OTHER);
1454 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1456 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1459 if (tun_type == ICE_SW_TUN_GTP) {
1460 if (ice_is_bit_set(recipe_to_profile[rid],
1461 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1462 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1463 else if (ice_is_bit_set(recipe_to_profile[rid],
1464 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1465 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1466 else if (ice_is_bit_set(recipe_to_profile[rid],
1467 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1468 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1469 else if (ice_is_bit_set(recipe_to_profile[rid],
1470 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1471 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1474 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1475 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1476 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1478 case ICE_PROFID_IPV4_TCP:
1479 tun_type = ICE_SW_IPV4_TCP;
1481 case ICE_PROFID_IPV4_UDP:
1482 tun_type = ICE_SW_IPV4_UDP;
1484 case ICE_PROFID_IPV6_TCP:
1485 tun_type = ICE_SW_IPV6_TCP;
1487 case ICE_PROFID_IPV6_UDP:
1488 tun_type = ICE_SW_IPV6_UDP;
1490 case ICE_PROFID_PPPOE_PAY:
1491 tun_type = ICE_SW_TUN_PPPOE_PAY;
1493 case ICE_PROFID_PPPOE_IPV4_TCP:
1494 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1496 case ICE_PROFID_PPPOE_IPV4_UDP:
1497 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1499 case ICE_PROFID_PPPOE_IPV4_OTHER:
1500 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1502 case ICE_PROFID_PPPOE_IPV6_TCP:
1503 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1505 case ICE_PROFID_PPPOE_IPV6_UDP:
1506 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1508 case ICE_PROFID_PPPOE_IPV6_OTHER:
1509 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1511 case ICE_PROFID_IPV4_ESP:
1512 tun_type = ICE_SW_TUN_IPV4_ESP;
1514 case ICE_PROFID_IPV6_ESP:
1515 tun_type = ICE_SW_TUN_IPV6_ESP;
1517 case ICE_PROFID_IPV4_AH:
1518 tun_type = ICE_SW_TUN_IPV4_AH;
1520 case ICE_PROFID_IPV6_AH:
1521 tun_type = ICE_SW_TUN_IPV6_AH;
1523 case ICE_PROFID_IPV4_NAT_T:
1524 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1526 case ICE_PROFID_IPV6_NAT_T:
1527 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1529 case ICE_PROFID_IPV4_PFCP_NODE:
1531 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1533 case ICE_PROFID_IPV6_PFCP_NODE:
1535 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1537 case ICE_PROFID_IPV4_PFCP_SESSION:
1539 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1541 case ICE_PROFID_IPV6_PFCP_SESSION:
1543 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1545 case ICE_PROFID_MAC_IPV4_L2TPV3:
1546 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1548 case ICE_PROFID_MAC_IPV6_L2TPV3:
1549 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1551 case ICE_PROFID_IPV4_GTPU_TEID:
1552 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1554 case ICE_PROFID_IPV6_GTPU_TEID:
1555 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1566 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1567 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1568 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1569 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1570 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1571 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1572 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1573 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1574 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1575 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1576 else if (vlan && tun_type == ICE_NON_TUN)
1577 tun_type = ICE_NON_TUN_QINQ;
1583 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1584 * @hw: pointer to hardware structure
1585 * @recps: struct that we need to populate
1586 * @rid: recipe ID that we are populating
1587 * @refresh_required: true if we should get recipe to profile mapping from FW
1589 * This function is used to populate all the necessary entries into our
1590 * bookkeeping so that we have a current list of all the recipes that are
1591 * programmed in the firmware.
1593 static enum ice_status
1594 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1595 bool *refresh_required)
1597 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1598 struct ice_aqc_recipe_data_elem *tmp;
1599 u16 num_recps = ICE_MAX_NUM_RECIPES;
1600 struct ice_prot_lkup_ext *lkup_exts;
1601 enum ice_status status;
1606 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1608 /* we need a buffer big enough to accommodate all the recipes */
1609 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1610 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1612 return ICE_ERR_NO_MEMORY;
1614 tmp[0].recipe_indx = rid;
1615 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1616 /* non-zero status meaning recipe doesn't exist */
1620 /* Get recipe to profile map so that we can get the fv from lkups that
1621 * we read for a recipe from FW. Since we want to minimize the number of
1622 * times we make this FW call, just make one call and cache the copy
1623 * until a new recipe is added. This operation is only required the
1624 * first time to get the changes from FW. Then to search existing
1625 * entries we don't need to update the cache again until another recipe
1628 if (*refresh_required) {
1629 ice_get_recp_to_prof_map(hw);
1630 *refresh_required = false;
1633 /* Start populating all the entries for recps[rid] based on lkups from
1634 * firmware. Note that we are only creating the root recipe in our
1637 lkup_exts = &recps[rid].lkup_exts;
1639 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1640 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1641 struct ice_recp_grp_entry *rg_entry;
1642 u8 i, prof, idx, prot = 0;
1646 rg_entry = (struct ice_recp_grp_entry *)
1647 ice_malloc(hw, sizeof(*rg_entry));
1649 status = ICE_ERR_NO_MEMORY;
1653 idx = root_bufs.recipe_indx;
1654 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1656 /* Mark all result indices in this chain */
1657 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1658 ice_set_bit(root_bufs.content.result_indx &
1659 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1661 /* get the first profile that is associated with rid */
1662 prof = ice_find_first_bit(recipe_to_profile[idx],
1663 ICE_MAX_NUM_PROFILES);
1664 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1665 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1667 rg_entry->fv_idx[i] = lkup_indx;
1668 rg_entry->fv_mask[i] =
1669 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1671 /* If the recipe is a chained recipe then all its
1672 * child recipe's result will have a result index.
1673 * To fill fv_words we should not use those result
1674 * index, we only need the protocol ids and offsets.
1675 * We will skip all the fv_idx which stores result
1676 * index in them. We also need to skip any fv_idx which
1677 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1678 * valid offset value.
1680 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1681 rg_entry->fv_idx[i]) ||
1682 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1683 rg_entry->fv_idx[i] == 0)
1686 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1687 rg_entry->fv_idx[i], &prot, &off);
1688 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1689 lkup_exts->fv_words[fv_word_idx].off = off;
1690 lkup_exts->field_mask[fv_word_idx] =
1691 rg_entry->fv_mask[i];
1692 if (prot == ICE_META_DATA_ID_HW &&
1693 off == ICE_TUN_FLAG_MDID_OFF)
1697 /* populate rg_list with the data from the child entry of this
1700 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1702 /* Propagate some data to the recipe database */
1703 recps[idx].is_root = !!is_root;
1704 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1705 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1706 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1707 recps[idx].chain_idx = root_bufs.content.result_indx &
1708 ~ICE_AQ_RECIPE_RESULT_EN;
1709 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1711 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1717 /* Only do the following for root recipes entries */
1718 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1719 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1720 recps[idx].root_rid = root_bufs.content.rid &
1721 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1722 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1725 /* Complete initialization of the root recipe entry */
1726 lkup_exts->n_val_words = fv_word_idx;
1727 recps[rid].big_recp = (num_recps > 1);
1728 recps[rid].n_grp_count = (u8)num_recps;
1729 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
1730 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1731 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1732 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1733 if (!recps[rid].root_buf)
1736 /* Copy result indexes */
1737 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1738 recps[rid].recp_created = true;
1746 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1747 * @hw: pointer to hardware structure
1749 * This function is used to populate recipe_to_profile matrix where index to
1750 * this array is the recipe ID and the element is the mapping of which profiles
1751 * is this recipe mapped to.
1753 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1755 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1758 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1761 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1762 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1763 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1765 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1766 ICE_MAX_NUM_RECIPES);
1767 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1768 ice_set_bit(i, recipe_to_profile[j]);
1773 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1774 * @hw: pointer to the HW struct
1775 * @recp_list: pointer to sw recipe list
1777 * Allocate memory for the entire recipe table and initialize the structures/
1778 * entries corresponding to basic recipes.
1781 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1783 struct ice_sw_recipe *recps;
1786 recps = (struct ice_sw_recipe *)
1787 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1789 return ICE_ERR_NO_MEMORY;
1791 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1792 recps[i].root_rid = i;
1793 INIT_LIST_HEAD(&recps[i].filt_rules);
1794 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1795 INIT_LIST_HEAD(&recps[i].rg_list);
1796 ice_init_lock(&recps[i].filt_rule_lock);
1805 * ice_aq_get_sw_cfg - get switch configuration
1806 * @hw: pointer to the hardware structure
1807 * @buf: pointer to the result buffer
1808 * @buf_size: length of the buffer available for response
1809 * @req_desc: pointer to requested descriptor
1810 * @num_elems: pointer to number of elements
1811 * @cd: pointer to command details structure or NULL
1813 * Get switch configuration (0x0200) to be placed in buf.
1814 * This admin command returns information such as initial VSI/port number
1815 * and switch ID it belongs to.
1817 * NOTE: *req_desc is both an input/output parameter.
1818 * The caller of this function first calls this function with *request_desc set
1819 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1820 * configuration information has been returned; if non-zero (meaning not all
1821 * the information was returned), the caller should call this function again
1822 * with *req_desc set to the previous value returned by f/w to get the
1823 * next block of switch configuration information.
1825 * *num_elems is output only parameter. This reflects the number of elements
1826 * in response buffer. The caller of this function to use *num_elems while
1827 * parsing the response buffer.
1829 static enum ice_status
1830 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1831 u16 buf_size, u16 *req_desc, u16 *num_elems,
1832 struct ice_sq_cd *cd)
1834 struct ice_aqc_get_sw_cfg *cmd;
1835 struct ice_aq_desc desc;
1836 enum ice_status status;
1838 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1839 cmd = &desc.params.get_sw_conf;
1840 cmd->element = CPU_TO_LE16(*req_desc);
1842 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1844 *req_desc = LE16_TO_CPU(cmd->element);
1845 *num_elems = LE16_TO_CPU(cmd->num_elems);
1852 * ice_alloc_sw - allocate resources specific to switch
1853 * @hw: pointer to the HW struct
1854 * @ena_stats: true to turn on VEB stats
1855 * @shared_res: true for shared resource, false for dedicated resource
1856 * @sw_id: switch ID returned
1857 * @counter_id: VEB counter ID returned
1859 * allocates switch resources (SWID and VEB counter) (0x0208)
1862 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1865 struct ice_aqc_alloc_free_res_elem *sw_buf;
1866 struct ice_aqc_res_elem *sw_ele;
1867 enum ice_status status;
1870 buf_len = ice_struct_size(sw_buf, elem, 1);
1871 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1873 return ICE_ERR_NO_MEMORY;
1875 /* Prepare buffer for switch ID.
1876 * The number of resource entries in buffer is passed as 1 since only a
1877 * single switch/VEB instance is allocated, and hence a single sw_id
1880 sw_buf->num_elems = CPU_TO_LE16(1);
1882 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1883 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1884 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1886 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1887 ice_aqc_opc_alloc_res, NULL);
1890 goto ice_alloc_sw_exit;
1892 sw_ele = &sw_buf->elem[0];
1893 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1896 /* Prepare buffer for VEB Counter */
1897 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1898 struct ice_aqc_alloc_free_res_elem *counter_buf;
1899 struct ice_aqc_res_elem *counter_ele;
1901 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1902 ice_malloc(hw, buf_len);
1904 status = ICE_ERR_NO_MEMORY;
1905 goto ice_alloc_sw_exit;
1908 /* The number of resource entries in buffer is passed as 1 since
1909 * only a single switch/VEB instance is allocated, and hence a
1910 * single VEB counter is requested.
1912 counter_buf->num_elems = CPU_TO_LE16(1);
1913 counter_buf->res_type =
1914 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1915 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1916 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1920 ice_free(hw, counter_buf);
1921 goto ice_alloc_sw_exit;
1923 counter_ele = &counter_buf->elem[0];
1924 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1925 ice_free(hw, counter_buf);
1929 ice_free(hw, sw_buf);
1934 * ice_free_sw - free resources specific to switch
1935 * @hw: pointer to the HW struct
1936 * @sw_id: switch ID returned
1937 * @counter_id: VEB counter ID returned
1939 * free switch resources (SWID and VEB counter) (0x0209)
1941 * NOTE: This function frees multiple resources. It continues
1942 * releasing other resources even after it encounters error.
1943 * The error code returned is the last error it encountered.
1945 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1947 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1948 enum ice_status status, ret_status;
1951 buf_len = ice_struct_size(sw_buf, elem, 1);
1952 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1954 return ICE_ERR_NO_MEMORY;
1956 /* Prepare buffer to free for switch ID res.
1957 * The number of resource entries in buffer is passed as 1 since only a
1958 * single switch/VEB instance is freed, and hence a single sw_id
1961 sw_buf->num_elems = CPU_TO_LE16(1);
1962 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1963 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1965 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1966 ice_aqc_opc_free_res, NULL);
1969 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1971 /* Prepare buffer to free for VEB Counter resource */
1972 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1973 ice_malloc(hw, buf_len);
1975 ice_free(hw, sw_buf);
1976 return ICE_ERR_NO_MEMORY;
1979 /* The number of resource entries in buffer is passed as 1 since only a
1980 * single switch/VEB instance is freed, and hence a single VEB counter
1983 counter_buf->num_elems = CPU_TO_LE16(1);
1984 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1985 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1987 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1988 ice_aqc_opc_free_res, NULL);
1990 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
1991 ret_status = status;
1994 ice_free(hw, counter_buf);
1995 ice_free(hw, sw_buf);
2001 * @hw: pointer to the HW struct
2002 * @vsi_ctx: pointer to a VSI context struct
2003 * @cd: pointer to command details structure or NULL
2005 * Add a VSI context to the hardware (0x0210)
2008 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2009 struct ice_sq_cd *cd)
2011 struct ice_aqc_add_update_free_vsi_resp *res;
2012 struct ice_aqc_add_get_update_free_vsi *cmd;
2013 struct ice_aq_desc desc;
2014 enum ice_status status;
2016 cmd = &desc.params.vsi_cmd;
2017 res = &desc.params.add_update_free_vsi_res;
2019 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2021 if (!vsi_ctx->alloc_from_pool)
2022 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2023 ICE_AQ_VSI_IS_VALID);
2025 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2027 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2029 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2030 sizeof(vsi_ctx->info), cd);
2033 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2034 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2035 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2043 * @hw: pointer to the HW struct
2044 * @vsi_ctx: pointer to a VSI context struct
2045 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2046 * @cd: pointer to command details structure or NULL
2048 * Free VSI context info from hardware (0x0213)
2051 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2052 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2054 struct ice_aqc_add_update_free_vsi_resp *resp;
2055 struct ice_aqc_add_get_update_free_vsi *cmd;
2056 struct ice_aq_desc desc;
2057 enum ice_status status;
2059 cmd = &desc.params.vsi_cmd;
2060 resp = &desc.params.add_update_free_vsi_res;
2062 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2064 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2066 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2068 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2070 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2071 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2079 * @hw: pointer to the HW struct
2080 * @vsi_ctx: pointer to a VSI context struct
2081 * @cd: pointer to command details structure or NULL
2083 * Update VSI context in the hardware (0x0211)
2086 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2087 struct ice_sq_cd *cd)
2089 struct ice_aqc_add_update_free_vsi_resp *resp;
2090 struct ice_aqc_add_get_update_free_vsi *cmd;
2091 struct ice_aq_desc desc;
2092 enum ice_status status;
2094 cmd = &desc.params.vsi_cmd;
2095 resp = &desc.params.add_update_free_vsi_res;
2097 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2099 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2101 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2103 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2104 sizeof(vsi_ctx->info), cd);
2107 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2108 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2115 * ice_is_vsi_valid - check whether the VSI is valid or not
2116 * @hw: pointer to the HW struct
2117 * @vsi_handle: VSI handle
2119 * check whether the VSI is valid or not
2121 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2123 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2127 * ice_get_hw_vsi_num - return the HW VSI number
2128 * @hw: pointer to the HW struct
2129 * @vsi_handle: VSI handle
2131 * return the HW VSI number
2132 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2134 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2136 return hw->vsi_ctx[vsi_handle]->vsi_num;
2140 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2141 * @hw: pointer to the HW struct
2142 * @vsi_handle: VSI handle
2144 * return the VSI context entry for a given VSI handle
2146 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2148 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2152 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2153 * @hw: pointer to the HW struct
2154 * @vsi_handle: VSI handle
2155 * @vsi: VSI context pointer
2157 * save the VSI context entry for a given VSI handle
2160 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2162 hw->vsi_ctx[vsi_handle] = vsi;
2166 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2167 * @hw: pointer to the HW struct
2168 * @vsi_handle: VSI handle
2170 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2172 struct ice_vsi_ctx *vsi;
2175 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2178 ice_for_each_traffic_class(i) {
2179 if (vsi->lan_q_ctx[i]) {
2180 ice_free(hw, vsi->lan_q_ctx[i]);
2181 vsi->lan_q_ctx[i] = NULL;
2187 * ice_clear_vsi_ctx - clear the VSI context entry
2188 * @hw: pointer to the HW struct
2189 * @vsi_handle: VSI handle
2191 * clear the VSI context entry
2193 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2195 struct ice_vsi_ctx *vsi;
2197 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2199 ice_clear_vsi_q_ctx(hw, vsi_handle);
2201 hw->vsi_ctx[vsi_handle] = NULL;
2206 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2207 * @hw: pointer to the HW struct
2209 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2213 for (i = 0; i < ICE_MAX_VSI; i++)
2214 ice_clear_vsi_ctx(hw, i);
2218 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2219 * @hw: pointer to the HW struct
2220 * @vsi_handle: unique VSI handle provided by drivers
2221 * @vsi_ctx: pointer to a VSI context struct
2222 * @cd: pointer to command details structure or NULL
2224 * Add a VSI context to the hardware also add it into the VSI handle list.
2225 * If this function gets called after reset for existing VSIs then update
2226 * with the new HW VSI number in the corresponding VSI handle list entry.
2229 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2230 struct ice_sq_cd *cd)
2232 struct ice_vsi_ctx *tmp_vsi_ctx;
2233 enum ice_status status;
2235 if (vsi_handle >= ICE_MAX_VSI)
2236 return ICE_ERR_PARAM;
2237 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2240 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2242 /* Create a new VSI context */
2243 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2244 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2246 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2247 return ICE_ERR_NO_MEMORY;
2249 *tmp_vsi_ctx = *vsi_ctx;
2251 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2253 /* update with new HW VSI num */
2254 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2261 * ice_free_vsi- free VSI context from hardware and VSI handle list
2262 * @hw: pointer to the HW struct
2263 * @vsi_handle: unique VSI handle
2264 * @vsi_ctx: pointer to a VSI context struct
2265 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2266 * @cd: pointer to command details structure or NULL
2268 * Free VSI context info from hardware as well as from VSI handle list
2271 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2272 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2274 enum ice_status status;
2276 if (!ice_is_vsi_valid(hw, vsi_handle))
2277 return ICE_ERR_PARAM;
2278 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2279 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2281 ice_clear_vsi_ctx(hw, vsi_handle);
2287 * @hw: pointer to the HW struct
2288 * @vsi_handle: unique VSI handle
2289 * @vsi_ctx: pointer to a VSI context struct
2290 * @cd: pointer to command details structure or NULL
2292 * Update VSI context in the hardware
2295 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2296 struct ice_sq_cd *cd)
2298 if (!ice_is_vsi_valid(hw, vsi_handle))
2299 return ICE_ERR_PARAM;
2300 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2301 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2305 * ice_aq_get_vsi_params
2306 * @hw: pointer to the HW struct
2307 * @vsi_ctx: pointer to a VSI context struct
2308 * @cd: pointer to command details structure or NULL
2310 * Get VSI context info from hardware (0x0212)
2313 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2314 struct ice_sq_cd *cd)
2316 struct ice_aqc_add_get_update_free_vsi *cmd;
2317 struct ice_aqc_get_vsi_resp *resp;
2318 struct ice_aq_desc desc;
2319 enum ice_status status;
2321 cmd = &desc.params.vsi_cmd;
2322 resp = &desc.params.get_vsi_resp;
2324 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2326 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2328 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2329 sizeof(vsi_ctx->info), cd);
2331 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2333 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2334 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2341 * ice_aq_add_update_mir_rule - add/update a mirror rule
2342 * @hw: pointer to the HW struct
2343 * @rule_type: Rule Type
2344 * @dest_vsi: VSI number to which packets will be mirrored
2345 * @count: length of the list
2346 * @mr_buf: buffer for list of mirrored VSI numbers
2347 * @cd: pointer to command details structure or NULL
2350 * Add/Update Mirror Rule (0x260).
2353 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2354 u16 count, struct ice_mir_rule_buf *mr_buf,
2355 struct ice_sq_cd *cd, u16 *rule_id)
2357 struct ice_aqc_add_update_mir_rule *cmd;
2358 struct ice_aq_desc desc;
2359 enum ice_status status;
2360 __le16 *mr_list = NULL;
2363 switch (rule_type) {
2364 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2365 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2366 /* Make sure count and mr_buf are set for these rule_types */
2367 if (!(count && mr_buf))
2368 return ICE_ERR_PARAM;
2370 buf_size = count * sizeof(__le16);
2371 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2373 return ICE_ERR_NO_MEMORY;
2375 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2376 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2377 /* Make sure count and mr_buf are not set for these
2380 if (count || mr_buf)
2381 return ICE_ERR_PARAM;
2384 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2385 return ICE_ERR_OUT_OF_RANGE;
2388 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2390 /* Pre-process 'mr_buf' items for add/update of virtual port
2391 * ingress/egress mirroring (but not physical port ingress/egress
2397 for (i = 0; i < count; i++) {
2400 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2402 /* Validate specified VSI number, make sure it is less
2403 * than ICE_MAX_VSI, if not return with error.
2405 if (id >= ICE_MAX_VSI) {
2406 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2408 ice_free(hw, mr_list);
2409 return ICE_ERR_OUT_OF_RANGE;
2412 /* add VSI to mirror rule */
2415 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2416 else /* remove VSI from mirror rule */
2417 mr_list[i] = CPU_TO_LE16(id);
2421 cmd = &desc.params.add_update_rule;
2422 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2423 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2424 ICE_AQC_RULE_ID_VALID_M);
2425 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2426 cmd->num_entries = CPU_TO_LE16(count);
2427 cmd->dest = CPU_TO_LE16(dest_vsi);
2429 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2431 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2433 ice_free(hw, mr_list);
2439 * ice_aq_delete_mir_rule - delete a mirror rule
2440 * @hw: pointer to the HW struct
2441 * @rule_id: Mirror rule ID (to be deleted)
2442 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2443 * otherwise it is returned to the shared pool
2444 * @cd: pointer to command details structure or NULL
2446 * Delete Mirror Rule (0x261).
2449 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2450 struct ice_sq_cd *cd)
2452 struct ice_aqc_delete_mir_rule *cmd;
2453 struct ice_aq_desc desc;
2455 /* rule_id should be in the range 0...63 */
2456 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2457 return ICE_ERR_OUT_OF_RANGE;
2459 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2461 cmd = &desc.params.del_rule;
2462 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2463 cmd->rule_id = CPU_TO_LE16(rule_id);
2466 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2468 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2472 * ice_aq_alloc_free_vsi_list
2473 * @hw: pointer to the HW struct
2474 * @vsi_list_id: VSI list ID returned or used for lookup
2475 * @lkup_type: switch rule filter lookup type
2476 * @opc: switch rules population command type - pass in the command opcode
2478 * allocates or free a VSI list resource
2480 static enum ice_status
2481 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2482 enum ice_sw_lkup_type lkup_type,
2483 enum ice_adminq_opc opc)
2485 struct ice_aqc_alloc_free_res_elem *sw_buf;
2486 struct ice_aqc_res_elem *vsi_ele;
2487 enum ice_status status;
2490 buf_len = ice_struct_size(sw_buf, elem, 1);
2491 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2493 return ICE_ERR_NO_MEMORY;
2494 sw_buf->num_elems = CPU_TO_LE16(1);
2496 if (lkup_type == ICE_SW_LKUP_MAC ||
2497 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2498 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2499 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2500 lkup_type == ICE_SW_LKUP_PROMISC ||
2501 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2502 lkup_type == ICE_SW_LKUP_LAST) {
2503 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2504 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2506 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2508 status = ICE_ERR_PARAM;
2509 goto ice_aq_alloc_free_vsi_list_exit;
2512 if (opc == ice_aqc_opc_free_res)
2513 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2515 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2517 goto ice_aq_alloc_free_vsi_list_exit;
2519 if (opc == ice_aqc_opc_alloc_res) {
2520 vsi_ele = &sw_buf->elem[0];
2521 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2524 ice_aq_alloc_free_vsi_list_exit:
2525 ice_free(hw, sw_buf);
2530 * ice_aq_set_storm_ctrl - Sets storm control configuration
2531 * @hw: pointer to the HW struct
2532 * @bcast_thresh: represents the upper threshold for broadcast storm control
2533 * @mcast_thresh: represents the upper threshold for multicast storm control
2534 * @ctl_bitmask: storm control knobs
2536 * Sets the storm control configuration (0x0280)
2539 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2542 struct ice_aqc_storm_cfg *cmd;
2543 struct ice_aq_desc desc;
2545 cmd = &desc.params.storm_conf;
2547 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2549 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2550 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2551 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2553 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2557 * ice_aq_get_storm_ctrl - gets storm control configuration
2558 * @hw: pointer to the HW struct
2559 * @bcast_thresh: represents the upper threshold for broadcast storm control
2560 * @mcast_thresh: represents the upper threshold for multicast storm control
2561 * @ctl_bitmask: storm control knobs
2563 * Gets the storm control configuration (0x0281)
2566 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2569 enum ice_status status;
2570 struct ice_aq_desc desc;
2572 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2574 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2576 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2579 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2582 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2585 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2592 * ice_aq_sw_rules - add/update/remove switch rules
2593 * @hw: pointer to the HW struct
2594 * @rule_list: pointer to switch rule population list
2595 * @rule_list_sz: total size of the rule list in bytes
2596 * @num_rules: number of switch rules in the rule_list
2597 * @opc: switch rules population command type - pass in the command opcode
2598 * @cd: pointer to command details structure or NULL
2600 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2602 static enum ice_status
2603 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2604 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2606 struct ice_aq_desc desc;
2607 enum ice_status status;
2609 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2611 if (opc != ice_aqc_opc_add_sw_rules &&
2612 opc != ice_aqc_opc_update_sw_rules &&
2613 opc != ice_aqc_opc_remove_sw_rules)
2614 return ICE_ERR_PARAM;
2616 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2618 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2619 desc.params.sw_rules.num_rules_fltr_entry_index =
2620 CPU_TO_LE16(num_rules);
2621 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2622 if (opc != ice_aqc_opc_add_sw_rules &&
2623 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2624 status = ICE_ERR_DOES_NOT_EXIST;
2630 * ice_aq_add_recipe - add switch recipe
2631 * @hw: pointer to the HW struct
2632 * @s_recipe_list: pointer to switch rule population list
2633 * @num_recipes: number of switch recipes in the list
2634 * @cd: pointer to command details structure or NULL
2639 ice_aq_add_recipe(struct ice_hw *hw,
2640 struct ice_aqc_recipe_data_elem *s_recipe_list,
2641 u16 num_recipes, struct ice_sq_cd *cd)
2643 struct ice_aqc_add_get_recipe *cmd;
2644 struct ice_aq_desc desc;
2647 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2648 cmd = &desc.params.add_get_recipe;
2649 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2651 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2652 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2654 buf_size = num_recipes * sizeof(*s_recipe_list);
2656 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2660 * ice_aq_get_recipe - get switch recipe
2661 * @hw: pointer to the HW struct
2662 * @s_recipe_list: pointer to switch rule population list
2663 * @num_recipes: pointer to the number of recipes (input and output)
2664 * @recipe_root: root recipe number of recipe(s) to retrieve
2665 * @cd: pointer to command details structure or NULL
2669 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2670 * On output, *num_recipes will equal the number of entries returned in
2673 * The caller must supply enough space in s_recipe_list to hold all possible
2674 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2677 ice_aq_get_recipe(struct ice_hw *hw,
2678 struct ice_aqc_recipe_data_elem *s_recipe_list,
2679 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2681 struct ice_aqc_add_get_recipe *cmd;
2682 struct ice_aq_desc desc;
2683 enum ice_status status;
2686 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2687 return ICE_ERR_PARAM;
2689 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2690 cmd = &desc.params.add_get_recipe;
2691 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2693 cmd->return_index = CPU_TO_LE16(recipe_root);
2694 cmd->num_sub_recipes = 0;
2696 buf_size = *num_recipes * sizeof(*s_recipe_list);
2698 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2699 /* cppcheck-suppress constArgument */
2700 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2706 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2707 * @hw: pointer to the HW struct
2708 * @profile_id: package profile ID to associate the recipe with
2709 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2710 * @cd: pointer to command details structure or NULL
2711 * Recipe to profile association (0x0291)
2714 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2715 struct ice_sq_cd *cd)
2717 struct ice_aqc_recipe_to_profile *cmd;
2718 struct ice_aq_desc desc;
2720 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2721 cmd = &desc.params.recipe_to_profile;
2722 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2723 cmd->profile_id = CPU_TO_LE16(profile_id);
2724 /* Set the recipe ID bit in the bitmask to let the device know which
2725 * profile we are associating the recipe to
2727 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2728 ICE_NONDMA_TO_NONDMA);
2730 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2734 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2735 * @hw: pointer to the HW struct
2736 * @profile_id: package profile ID to associate the recipe with
2737 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2738 * @cd: pointer to command details structure or NULL
2739 * Associate profile ID with given recipe (0x0293)
2742 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2743 struct ice_sq_cd *cd)
2745 struct ice_aqc_recipe_to_profile *cmd;
2746 struct ice_aq_desc desc;
2747 enum ice_status status;
2749 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2750 cmd = &desc.params.recipe_to_profile;
2751 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2752 cmd->profile_id = CPU_TO_LE16(profile_id);
2754 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2756 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2757 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2763 * ice_alloc_recipe - add recipe resource
2764 * @hw: pointer to the hardware structure
2765 * @rid: recipe ID returned as response to AQ call
2767 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2769 struct ice_aqc_alloc_free_res_elem *sw_buf;
2770 enum ice_status status;
2773 buf_len = ice_struct_size(sw_buf, elem, 1);
2774 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2776 return ICE_ERR_NO_MEMORY;
2778 sw_buf->num_elems = CPU_TO_LE16(1);
2779 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2780 ICE_AQC_RES_TYPE_S) |
2781 ICE_AQC_RES_TYPE_FLAG_SHARED);
2782 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2783 ice_aqc_opc_alloc_res, NULL);
2785 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2786 ice_free(hw, sw_buf);
2791 /* ice_init_port_info - Initialize port_info with switch configuration data
2792 * @pi: pointer to port_info
2793 * @vsi_port_num: VSI number or port number
2794 * @type: Type of switch element (port or VSI)
2795 * @swid: switch ID of the switch the element is attached to
2796 * @pf_vf_num: PF or VF number
2797 * @is_vf: true if the element is a VF, false otherwise
2800 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2801 u16 swid, u16 pf_vf_num, bool is_vf)
2804 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2805 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2807 pi->pf_vf_num = pf_vf_num;
2809 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2810 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2813 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2818 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2819 * @hw: pointer to the hardware structure
2821 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2823 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2824 enum ice_status status;
2831 num_total_ports = 1;
2833 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2834 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2837 return ICE_ERR_NO_MEMORY;
2839 /* Multiple calls to ice_aq_get_sw_cfg may be required
2840 * to get all the switch configuration information. The need
2841 * for additional calls is indicated by ice_aq_get_sw_cfg
2842 * writing a non-zero value in req_desc
2845 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2847 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2848 &req_desc, &num_elems, NULL);
2853 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2854 u16 pf_vf_num, swid, vsi_port_num;
2858 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2859 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2861 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2862 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2864 swid = LE16_TO_CPU(ele->swid);
2866 if (LE16_TO_CPU(ele->pf_vf_num) &
2867 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2870 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2871 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2874 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2875 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2876 if (j == num_total_ports) {
2877 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
2878 status = ICE_ERR_CFG;
2881 ice_init_port_info(hw->port_info,
2882 vsi_port_num, res_type, swid,
2890 } while (req_desc && !status);
2898 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2899 * @hw: pointer to the hardware structure
2900 * @fi: filter info structure to fill/update
2902 * This helper function populates the lb_en and lan_en elements of the provided
2903 * ice_fltr_info struct using the switch's type and characteristics of the
2904 * switch rule being configured.
2906 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2908 if ((fi->flag & ICE_FLTR_RX) &&
2909 (fi->fltr_act == ICE_FWD_TO_VSI ||
2910 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2911 fi->lkup_type == ICE_SW_LKUP_LAST)
2915 if ((fi->flag & ICE_FLTR_TX) &&
2916 (fi->fltr_act == ICE_FWD_TO_VSI ||
2917 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2918 fi->fltr_act == ICE_FWD_TO_Q ||
2919 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2920 /* Setting LB for prune actions will result in replicated
2921 * packets to the internal switch that will be dropped.
2923 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2926 /* Set lan_en to TRUE if
2927 * 1. The switch is a VEB AND
2929 * 2.1 The lookup is a directional lookup like ethertype,
2930 * promiscuous, ethertype-MAC, promiscuous-VLAN
2931 * and default-port OR
2932 * 2.2 The lookup is VLAN, OR
2933 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2934 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2938 * The switch is a VEPA.
2940 * In all other cases, the LAN enable has to be set to false.
2943 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2944 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2945 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2946 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2947 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2948 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2949 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2950 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2951 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2952 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2961 * ice_fill_sw_rule - Helper function to fill switch rule structure
2962 * @hw: pointer to the hardware structure
2963 * @f_info: entry containing packet forwarding information
2964 * @s_rule: switch rule structure to be filled in based on mac_entry
2965 * @opc: switch rules population command type - pass in the command opcode
2968 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2969 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2971 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2979 if (opc == ice_aqc_opc_remove_sw_rules) {
2980 s_rule->pdata.lkup_tx_rx.act = 0;
2981 s_rule->pdata.lkup_tx_rx.index =
2982 CPU_TO_LE16(f_info->fltr_rule_id);
2983 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2987 eth_hdr_sz = sizeof(dummy_eth_header);
2988 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2990 /* initialize the ether header with a dummy header */
2991 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2992 ice_fill_sw_info(hw, f_info);
2994 switch (f_info->fltr_act) {
2995 case ICE_FWD_TO_VSI:
2996 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2997 ICE_SINGLE_ACT_VSI_ID_M;
2998 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2999 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3000 ICE_SINGLE_ACT_VALID_BIT;
3002 case ICE_FWD_TO_VSI_LIST:
3003 act |= ICE_SINGLE_ACT_VSI_LIST;
3004 act |= (f_info->fwd_id.vsi_list_id <<
3005 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3006 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3007 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3008 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3009 ICE_SINGLE_ACT_VALID_BIT;
3012 act |= ICE_SINGLE_ACT_TO_Q;
3013 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3014 ICE_SINGLE_ACT_Q_INDEX_M;
3016 case ICE_DROP_PACKET:
3017 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3018 ICE_SINGLE_ACT_VALID_BIT;
3020 case ICE_FWD_TO_QGRP:
3021 q_rgn = f_info->qgrp_size > 0 ?
3022 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3023 act |= ICE_SINGLE_ACT_TO_Q;
3024 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3025 ICE_SINGLE_ACT_Q_INDEX_M;
3026 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3027 ICE_SINGLE_ACT_Q_REGION_M;
3034 act |= ICE_SINGLE_ACT_LB_ENABLE;
3036 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3038 switch (f_info->lkup_type) {
3039 case ICE_SW_LKUP_MAC:
3040 daddr = f_info->l_data.mac.mac_addr;
3042 case ICE_SW_LKUP_VLAN:
3043 vlan_id = f_info->l_data.vlan.vlan_id;
3044 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3045 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3046 act |= ICE_SINGLE_ACT_PRUNE;
3047 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3050 case ICE_SW_LKUP_ETHERTYPE_MAC:
3051 daddr = f_info->l_data.ethertype_mac.mac_addr;
3053 case ICE_SW_LKUP_ETHERTYPE:
3054 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3055 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3057 case ICE_SW_LKUP_MAC_VLAN:
3058 daddr = f_info->l_data.mac_vlan.mac_addr;
3059 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3061 case ICE_SW_LKUP_PROMISC_VLAN:
3062 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3064 case ICE_SW_LKUP_PROMISC:
3065 daddr = f_info->l_data.mac_vlan.mac_addr;
3071 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3072 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3073 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3075 /* Recipe set depending on lookup type */
3076 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3077 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3078 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3081 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3082 ICE_NONDMA_TO_NONDMA);
3084 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3085 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3086 *off = CPU_TO_BE16(vlan_id);
3089 /* Create the switch rule with the final dummy Ethernet header */
3090 if (opc != ice_aqc_opc_update_sw_rules)
3091 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3095 * ice_add_marker_act
3096 * @hw: pointer to the hardware structure
3097 * @m_ent: the management entry for which sw marker needs to be added
3098 * @sw_marker: sw marker to tag the Rx descriptor with
3099 * @l_id: large action resource ID
3101 * Create a large action to hold software marker and update the switch rule
3102 * entry pointed by m_ent with newly created large action
3104 static enum ice_status
3105 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3106 u16 sw_marker, u16 l_id)
3108 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3109 /* For software marker we need 3 large actions
3110 * 1. FWD action: FWD TO VSI or VSI LIST
3111 * 2. GENERIC VALUE action to hold the profile ID
3112 * 3. GENERIC VALUE action to hold the software marker ID
3114 const u16 num_lg_acts = 3;
3115 enum ice_status status;
3121 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3122 return ICE_ERR_PARAM;
3124 /* Create two back-to-back switch rules and submit them to the HW using
3125 * one memory buffer:
3129 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3130 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3131 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3133 return ICE_ERR_NO_MEMORY;
3135 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3137 /* Fill in the first switch rule i.e. large action */
3138 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3139 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3140 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3142 /* First action VSI forwarding or VSI list forwarding depending on how
3145 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3146 m_ent->fltr_info.fwd_id.hw_vsi_id;
3148 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3149 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3150 if (m_ent->vsi_count > 1)
3151 act |= ICE_LG_ACT_VSI_LIST;
3152 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3154 /* Second action descriptor type */
3155 act = ICE_LG_ACT_GENERIC;
3157 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3158 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3160 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3161 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3163 /* Third action Marker value */
3164 act |= ICE_LG_ACT_GENERIC;
3165 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3166 ICE_LG_ACT_GENERIC_VALUE_M;
3168 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3170 /* call the fill switch rule to fill the lookup Tx Rx structure */
3171 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3172 ice_aqc_opc_update_sw_rules);
3174 /* Update the action to point to the large action ID */
3175 rx_tx->pdata.lkup_tx_rx.act =
3176 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3177 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3178 ICE_SINGLE_ACT_PTR_VAL_M));
3180 /* Use the filter rule ID of the previously created rule with single
3181 * act. Once the update happens, hardware will treat this as large
3184 rx_tx->pdata.lkup_tx_rx.index =
3185 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3187 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3188 ice_aqc_opc_update_sw_rules, NULL);
3190 m_ent->lg_act_idx = l_id;
3191 m_ent->sw_marker_id = sw_marker;
3194 ice_free(hw, lg_act);
3199 * ice_add_counter_act - add/update filter rule with counter action
3200 * @hw: pointer to the hardware structure
3201 * @m_ent: the management entry for which counter needs to be added
3202 * @counter_id: VLAN counter ID returned as part of allocate resource
3203 * @l_id: large action resource ID
3205 static enum ice_status
3206 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3207 u16 counter_id, u16 l_id)
3209 struct ice_aqc_sw_rules_elem *lg_act;
3210 struct ice_aqc_sw_rules_elem *rx_tx;
3211 enum ice_status status;
3212 /* 2 actions will be added while adding a large action counter */
3213 const int num_acts = 2;
3220 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3221 return ICE_ERR_PARAM;
3223 /* Create two back-to-back switch rules and submit them to the HW using
3224 * one memory buffer:
3228 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3229 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3230 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3232 return ICE_ERR_NO_MEMORY;
3234 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3236 /* Fill in the first switch rule i.e. large action */
3237 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3238 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3239 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3241 /* First action VSI forwarding or VSI list forwarding depending on how
3244 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3245 m_ent->fltr_info.fwd_id.hw_vsi_id;
3247 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3248 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3249 ICE_LG_ACT_VSI_LIST_ID_M;
3250 if (m_ent->vsi_count > 1)
3251 act |= ICE_LG_ACT_VSI_LIST;
3252 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3254 /* Second action counter ID */
3255 act = ICE_LG_ACT_STAT_COUNT;
3256 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3257 ICE_LG_ACT_STAT_COUNT_M;
3258 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3260 /* call the fill switch rule to fill the lookup Tx Rx structure */
3261 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3262 ice_aqc_opc_update_sw_rules);
3264 act = ICE_SINGLE_ACT_PTR;
3265 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3266 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3268 /* Use the filter rule ID of the previously created rule with single
3269 * act. Once the update happens, hardware will treat this as large
3272 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3273 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3275 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3276 ice_aqc_opc_update_sw_rules, NULL);
3278 m_ent->lg_act_idx = l_id;
3279 m_ent->counter_index = counter_id;
3282 ice_free(hw, lg_act);
3287 * ice_create_vsi_list_map
3288 * @hw: pointer to the hardware structure
3289 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3290 * @num_vsi: number of VSI handles in the array
3291 * @vsi_list_id: VSI list ID generated as part of allocate resource
3293 * Helper function to create a new entry of VSI list ID to VSI mapping
3294 * using the given VSI list ID
3296 static struct ice_vsi_list_map_info *
3297 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3300 struct ice_switch_info *sw = hw->switch_info;
3301 struct ice_vsi_list_map_info *v_map;
3304 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
3309 v_map->vsi_list_id = vsi_list_id;
3311 for (i = 0; i < num_vsi; i++)
3312 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3314 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3319 * ice_update_vsi_list_rule
3320 * @hw: pointer to the hardware structure
3321 * @vsi_handle_arr: array of VSI handles to form a VSI list
3322 * @num_vsi: number of VSI handles in the array
3323 * @vsi_list_id: VSI list ID generated as part of allocate resource
3324 * @remove: Boolean value to indicate if this is a remove action
3325 * @opc: switch rules population command type - pass in the command opcode
3326 * @lkup_type: lookup type of the filter
3328 * Call AQ command to add a new switch rule or update existing switch rule
3329 * using the given VSI list ID
3331 static enum ice_status
3332 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3333 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3334 enum ice_sw_lkup_type lkup_type)
3336 struct ice_aqc_sw_rules_elem *s_rule;
3337 enum ice_status status;
3343 return ICE_ERR_PARAM;
3345 if (lkup_type == ICE_SW_LKUP_MAC ||
3346 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3347 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3348 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3349 lkup_type == ICE_SW_LKUP_PROMISC ||
3350 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3351 lkup_type == ICE_SW_LKUP_LAST)
3352 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3353 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3354 else if (lkup_type == ICE_SW_LKUP_VLAN)
3355 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3356 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3358 return ICE_ERR_PARAM;
3360 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3361 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3363 return ICE_ERR_NO_MEMORY;
3364 for (i = 0; i < num_vsi; i++) {
3365 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3366 status = ICE_ERR_PARAM;
3369 /* AQ call requires hw_vsi_id(s) */
3370 s_rule->pdata.vsi_list.vsi[i] =
3371 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3374 s_rule->type = CPU_TO_LE16(rule_type);
3375 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3376 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3378 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3381 ice_free(hw, s_rule);
3386 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3387 * @hw: pointer to the HW struct
3388 * @vsi_handle_arr: array of VSI handles to form a VSI list
3389 * @num_vsi: number of VSI handles in the array
3390 * @vsi_list_id: stores the ID of the VSI list to be created
3391 * @lkup_type: switch rule filter's lookup type
3393 static enum ice_status
3394 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3395 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3397 enum ice_status status;
3399 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3400 ice_aqc_opc_alloc_res);
3404 /* Update the newly created VSI list to include the specified VSIs */
3405 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3406 *vsi_list_id, false,
3407 ice_aqc_opc_add_sw_rules, lkup_type);
3411 * ice_create_pkt_fwd_rule
3412 * @hw: pointer to the hardware structure
3413 * @recp_list: corresponding filter management list
3414 * @f_entry: entry containing packet forwarding information
3416 * Create switch rule with given filter information and add an entry
3417 * to the corresponding filter management list to track this switch rule
3420 static enum ice_status
3421 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3422 struct ice_fltr_list_entry *f_entry)
3424 struct ice_fltr_mgmt_list_entry *fm_entry;
3425 struct ice_aqc_sw_rules_elem *s_rule;
3426 enum ice_status status;
3428 s_rule = (struct ice_aqc_sw_rules_elem *)
3429 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3431 return ICE_ERR_NO_MEMORY;
3432 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3433 ice_malloc(hw, sizeof(*fm_entry));
3435 status = ICE_ERR_NO_MEMORY;
3436 goto ice_create_pkt_fwd_rule_exit;
3439 fm_entry->fltr_info = f_entry->fltr_info;
3441 /* Initialize all the fields for the management entry */
3442 fm_entry->vsi_count = 1;
3443 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3444 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3445 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3447 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3448 ice_aqc_opc_add_sw_rules);
3450 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3451 ice_aqc_opc_add_sw_rules, NULL);
3453 ice_free(hw, fm_entry);
3454 goto ice_create_pkt_fwd_rule_exit;
3457 f_entry->fltr_info.fltr_rule_id =
3458 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3459 fm_entry->fltr_info.fltr_rule_id =
3460 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3462 /* The book keeping entries will get removed when base driver
3463 * calls remove filter AQ command
3465 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3467 ice_create_pkt_fwd_rule_exit:
3468 ice_free(hw, s_rule);
3473 * ice_update_pkt_fwd_rule
3474 * @hw: pointer to the hardware structure
3475 * @f_info: filter information for switch rule
3477 * Call AQ command to update a previously created switch rule with a
3480 static enum ice_status
3481 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3483 struct ice_aqc_sw_rules_elem *s_rule;
3484 enum ice_status status;
3486 s_rule = (struct ice_aqc_sw_rules_elem *)
3487 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3489 return ICE_ERR_NO_MEMORY;
3491 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3493 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3495 /* Update switch rule with new rule set to forward VSI list */
3496 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3497 ice_aqc_opc_update_sw_rules, NULL);
3499 ice_free(hw, s_rule);
3504 * ice_update_sw_rule_bridge_mode
3505 * @hw: pointer to the HW struct
3507 * Updates unicast switch filter rules based on VEB/VEPA mode
3509 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3511 struct ice_switch_info *sw = hw->switch_info;
3512 struct ice_fltr_mgmt_list_entry *fm_entry;
3513 enum ice_status status = ICE_SUCCESS;
3514 struct LIST_HEAD_TYPE *rule_head;
3515 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3517 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3518 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3520 ice_acquire_lock(rule_lock);
3521 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3523 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3524 u8 *addr = fi->l_data.mac.mac_addr;
3526 /* Update unicast Tx rules to reflect the selected
3529 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3530 (fi->fltr_act == ICE_FWD_TO_VSI ||
3531 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3532 fi->fltr_act == ICE_FWD_TO_Q ||
3533 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3534 status = ice_update_pkt_fwd_rule(hw, fi);
3540 ice_release_lock(rule_lock);
3546 * ice_add_update_vsi_list
3547 * @hw: pointer to the hardware structure
3548 * @m_entry: pointer to current filter management list entry
3549 * @cur_fltr: filter information from the book keeping entry
3550 * @new_fltr: filter information with the new VSI to be added
3552 * Call AQ command to add or update previously created VSI list with new VSI.
3554 * Helper function to do book keeping associated with adding filter information
3555 * The algorithm to do the book keeping is described below :
3556 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3557 * if only one VSI has been added till now
3558 * Allocate a new VSI list and add two VSIs
3559 * to this list using switch rule command
3560 * Update the previously created switch rule with the
3561 * newly created VSI list ID
3562 * if a VSI list was previously created
3563 * Add the new VSI to the previously created VSI list set
3564 * using the update switch rule command
3566 static enum ice_status
3567 ice_add_update_vsi_list(struct ice_hw *hw,
3568 struct ice_fltr_mgmt_list_entry *m_entry,
3569 struct ice_fltr_info *cur_fltr,
3570 struct ice_fltr_info *new_fltr)
3572 enum ice_status status = ICE_SUCCESS;
3573 u16 vsi_list_id = 0;
3575 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3576 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3577 return ICE_ERR_NOT_IMPL;
3579 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3580 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3581 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3582 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3583 return ICE_ERR_NOT_IMPL;
3585 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3586 /* Only one entry existed in the mapping and it was not already
3587 * a part of a VSI list. So, create a VSI list with the old and
3590 struct ice_fltr_info tmp_fltr;
3591 u16 vsi_handle_arr[2];
3593 /* A rule already exists with the new VSI being added */
3594 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3595 return ICE_ERR_ALREADY_EXISTS;
3597 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3598 vsi_handle_arr[1] = new_fltr->vsi_handle;
3599 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3601 new_fltr->lkup_type);
3605 tmp_fltr = *new_fltr;
3606 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3607 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3608 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3609 /* Update the previous switch rule of "MAC forward to VSI" to
3610 * "MAC fwd to VSI list"
3612 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3616 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3617 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3618 m_entry->vsi_list_info =
3619 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3622 /* If this entry was large action then the large action needs
3623 * to be updated to point to FWD to VSI list
3625 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3627 ice_add_marker_act(hw, m_entry,
3628 m_entry->sw_marker_id,
3629 m_entry->lg_act_idx);
3631 u16 vsi_handle = new_fltr->vsi_handle;
3632 enum ice_adminq_opc opcode;
3634 if (!m_entry->vsi_list_info)
3637 /* A rule already exists with the new VSI being added */
3638 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3641 /* Update the previously created VSI list set with
3642 * the new VSI ID passed in
3644 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3645 opcode = ice_aqc_opc_update_sw_rules;
3647 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3648 vsi_list_id, false, opcode,
3649 new_fltr->lkup_type);
3650 /* update VSI list mapping info with new VSI ID */
3652 ice_set_bit(vsi_handle,
3653 m_entry->vsi_list_info->vsi_map);
3656 m_entry->vsi_count++;
3661 * ice_find_rule_entry - Search a rule entry
3662 * @list_head: head of rule list
3663 * @f_info: rule information
3665 * Helper function to search for a given rule entry
3666 * Returns pointer to entry storing the rule if found
3668 static struct ice_fltr_mgmt_list_entry *
3669 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3670 struct ice_fltr_info *f_info)
3672 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3674 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3676 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3677 sizeof(f_info->l_data)) &&
3678 f_info->flag == list_itr->fltr_info.flag) {
3687 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3688 * @recp_list: VSI lists needs to be searched
3689 * @vsi_handle: VSI handle to be found in VSI list
3690 * @vsi_list_id: VSI list ID found containing vsi_handle
3692 * Helper function to search a VSI list with single entry containing given VSI
3693 * handle element. This can be extended further to search VSI list with more
3694 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3696 static struct ice_vsi_list_map_info *
3697 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3700 struct ice_vsi_list_map_info *map_info = NULL;
3701 struct LIST_HEAD_TYPE *list_head;
3703 list_head = &recp_list->filt_rules;
3704 if (recp_list->adv_rule) {
3705 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3707 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3708 ice_adv_fltr_mgmt_list_entry,
3710 if (list_itr->vsi_list_info) {
3711 map_info = list_itr->vsi_list_info;
3712 if (ice_is_bit_set(map_info->vsi_map,
3714 *vsi_list_id = map_info->vsi_list_id;
3720 struct ice_fltr_mgmt_list_entry *list_itr;
3722 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3723 ice_fltr_mgmt_list_entry,
3725 if (list_itr->vsi_count == 1 &&
3726 list_itr->vsi_list_info) {
3727 map_info = list_itr->vsi_list_info;
3728 if (ice_is_bit_set(map_info->vsi_map,
3730 *vsi_list_id = map_info->vsi_list_id;
3740 * ice_add_rule_internal - add rule for a given lookup type
3741 * @hw: pointer to the hardware structure
3742 * @recp_list: recipe list for which rule has to be added
3743 * @lport: logic port number on which function add rule
3744 * @f_entry: structure containing MAC forwarding information
3746 * Adds or updates the rule lists for a given recipe
3748 static enum ice_status
3749 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3750 u8 lport, struct ice_fltr_list_entry *f_entry)
3752 struct ice_fltr_info *new_fltr, *cur_fltr;
3753 struct ice_fltr_mgmt_list_entry *m_entry;
3754 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3755 enum ice_status status = ICE_SUCCESS;
3757 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3758 return ICE_ERR_PARAM;
3760 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3761 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3762 f_entry->fltr_info.fwd_id.hw_vsi_id =
3763 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3765 rule_lock = &recp_list->filt_rule_lock;
3767 ice_acquire_lock(rule_lock);
3768 new_fltr = &f_entry->fltr_info;
3769 if (new_fltr->flag & ICE_FLTR_RX)
3770 new_fltr->src = lport;
3771 else if (new_fltr->flag & ICE_FLTR_TX)
3773 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3775 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3777 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3778 goto exit_add_rule_internal;
3781 cur_fltr = &m_entry->fltr_info;
3782 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3784 exit_add_rule_internal:
3785 ice_release_lock(rule_lock);
3790 * ice_remove_vsi_list_rule
3791 * @hw: pointer to the hardware structure
3792 * @vsi_list_id: VSI list ID generated as part of allocate resource
3793 * @lkup_type: switch rule filter lookup type
3795 * The VSI list should be emptied before this function is called to remove the
3798 static enum ice_status
3799 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3800 enum ice_sw_lkup_type lkup_type)
3802 /* Free the vsi_list resource that we allocated. It is assumed that the
3803 * list is empty at this point.
3805 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3806 ice_aqc_opc_free_res);
3810 * ice_rem_update_vsi_list
3811 * @hw: pointer to the hardware structure
3812 * @vsi_handle: VSI handle of the VSI to remove
3813 * @fm_list: filter management entry for which the VSI list management needs to
3816 static enum ice_status
3817 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3818 struct ice_fltr_mgmt_list_entry *fm_list)
3820 enum ice_sw_lkup_type lkup_type;
3821 enum ice_status status = ICE_SUCCESS;
3824 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3825 fm_list->vsi_count == 0)
3826 return ICE_ERR_PARAM;
3828 /* A rule with the VSI being removed does not exist */
3829 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3830 return ICE_ERR_DOES_NOT_EXIST;
3832 lkup_type = fm_list->fltr_info.lkup_type;
3833 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3834 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3835 ice_aqc_opc_update_sw_rules,
3840 fm_list->vsi_count--;
3841 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3843 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3844 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3845 struct ice_vsi_list_map_info *vsi_list_info =
3846 fm_list->vsi_list_info;
3849 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3851 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3852 return ICE_ERR_OUT_OF_RANGE;
3854 /* Make sure VSI list is empty before removing it below */
3855 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3857 ice_aqc_opc_update_sw_rules,
3862 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3863 tmp_fltr_info.fwd_id.hw_vsi_id =
3864 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3865 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3866 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3868 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3869 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3873 fm_list->fltr_info = tmp_fltr_info;
3876 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3877 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3878 struct ice_vsi_list_map_info *vsi_list_info =
3879 fm_list->vsi_list_info;
3881 /* Remove the VSI list since it is no longer used */
3882 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3884 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3885 vsi_list_id, status);
3889 LIST_DEL(&vsi_list_info->list_entry);
3890 ice_free(hw, vsi_list_info);
3891 fm_list->vsi_list_info = NULL;
3898 * ice_remove_rule_internal - Remove a filter rule of a given type
3900 * @hw: pointer to the hardware structure
3901 * @recp_list: recipe list for which the rule needs to removed
3902 * @f_entry: rule entry containing filter information
3904 static enum ice_status
3905 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3906 struct ice_fltr_list_entry *f_entry)
3908 struct ice_fltr_mgmt_list_entry *list_elem;
3909 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3910 enum ice_status status = ICE_SUCCESS;
3911 bool remove_rule = false;
3914 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3915 return ICE_ERR_PARAM;
3916 f_entry->fltr_info.fwd_id.hw_vsi_id =
3917 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3919 rule_lock = &recp_list->filt_rule_lock;
3920 ice_acquire_lock(rule_lock);
3921 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3922 &f_entry->fltr_info);
3924 status = ICE_ERR_DOES_NOT_EXIST;
3928 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3930 } else if (!list_elem->vsi_list_info) {
3931 status = ICE_ERR_DOES_NOT_EXIST;
3933 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3934 /* a ref_cnt > 1 indicates that the vsi_list is being
3935 * shared by multiple rules. Decrement the ref_cnt and
3936 * remove this rule, but do not modify the list, as it
3937 * is in-use by other rules.
3939 list_elem->vsi_list_info->ref_cnt--;
3942 /* a ref_cnt of 1 indicates the vsi_list is only used
3943 * by one rule. However, the original removal request is only
3944 * for a single VSI. Update the vsi_list first, and only
3945 * remove the rule if there are no further VSIs in this list.
3947 vsi_handle = f_entry->fltr_info.vsi_handle;
3948 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3951 /* if VSI count goes to zero after updating the VSI list */
3952 if (list_elem->vsi_count == 0)
3957 /* Remove the lookup rule */
3958 struct ice_aqc_sw_rules_elem *s_rule;
3960 s_rule = (struct ice_aqc_sw_rules_elem *)
3961 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3963 status = ICE_ERR_NO_MEMORY;
3967 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3968 ice_aqc_opc_remove_sw_rules);
3970 status = ice_aq_sw_rules(hw, s_rule,
3971 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3972 ice_aqc_opc_remove_sw_rules, NULL);
3974 /* Remove a book keeping from the list */
3975 ice_free(hw, s_rule);
3980 LIST_DEL(&list_elem->list_entry);
3981 ice_free(hw, list_elem);
3984 ice_release_lock(rule_lock);
3989 * ice_aq_get_res_alloc - get allocated resources
3990 * @hw: pointer to the HW struct
3991 * @num_entries: pointer to u16 to store the number of resource entries returned
3992 * @buf: pointer to buffer
3993 * @buf_size: size of buf
3994 * @cd: pointer to command details structure or NULL
3996 * The caller-supplied buffer must be large enough to store the resource
3997 * information for all resource types. Each resource type is an
3998 * ice_aqc_get_res_resp_elem structure.
4001 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4002 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4003 struct ice_sq_cd *cd)
4005 struct ice_aqc_get_res_alloc *resp;
4006 enum ice_status status;
4007 struct ice_aq_desc desc;
4010 return ICE_ERR_BAD_PTR;
4012 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4013 return ICE_ERR_INVAL_SIZE;
4015 resp = &desc.params.get_res;
4017 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4018 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4020 if (!status && num_entries)
4021 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4027 * ice_aq_get_res_descs - get allocated resource descriptors
4028 * @hw: pointer to the hardware structure
4029 * @num_entries: number of resource entries in buffer
4030 * @buf: structure to hold response data buffer
4031 * @buf_size: size of buffer
4032 * @res_type: resource type
4033 * @res_shared: is resource shared
4034 * @desc_id: input - first desc ID to start; output - next desc ID
4035 * @cd: pointer to command details structure or NULL
4038 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4039 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4040 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4042 struct ice_aqc_get_allocd_res_desc *cmd;
4043 struct ice_aq_desc desc;
4044 enum ice_status status;
4046 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4048 cmd = &desc.params.get_res_desc;
4051 return ICE_ERR_PARAM;
4053 if (buf_size != (num_entries * sizeof(*buf)))
4054 return ICE_ERR_PARAM;
4056 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4058 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4059 ICE_AQC_RES_TYPE_M) | (res_shared ?
4060 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4061 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4063 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4065 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4071 * ice_add_mac_rule - Add a MAC address based filter rule
4072 * @hw: pointer to the hardware structure
4073 * @m_list: list of MAC addresses and forwarding information
4074 * @sw: pointer to switch info struct for which function add rule
4075 * @lport: logic port number on which function add rule
4077 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4078 * multiple unicast addresses, the function assumes that all the
4079 * addresses are unique in a given add_mac call. It doesn't
4080 * check for duplicates in this case, removing duplicates from a given
4081 * list should be taken care of in the caller of this function.
4083 static enum ice_status
4084 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4085 struct ice_switch_info *sw, u8 lport)
4087 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4088 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4089 struct ice_fltr_list_entry *m_list_itr;
4090 struct LIST_HEAD_TYPE *rule_head;
4091 u16 total_elem_left, s_rule_size;
4092 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4093 enum ice_status status = ICE_SUCCESS;
4094 u16 num_unicast = 0;
4098 rule_lock = &recp_list->filt_rule_lock;
4099 rule_head = &recp_list->filt_rules;
4101 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4103 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4107 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4108 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4109 if (!ice_is_vsi_valid(hw, vsi_handle))
4110 return ICE_ERR_PARAM;
4111 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4112 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4113 /* update the src in case it is VSI num */
4114 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4115 return ICE_ERR_PARAM;
4116 m_list_itr->fltr_info.src = hw_vsi_id;
4117 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4118 IS_ZERO_ETHER_ADDR(add))
4119 return ICE_ERR_PARAM;
4120 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4121 /* Don't overwrite the unicast address */
4122 ice_acquire_lock(rule_lock);
4123 if (ice_find_rule_entry(rule_head,
4124 &m_list_itr->fltr_info)) {
4125 ice_release_lock(rule_lock);
4126 return ICE_ERR_ALREADY_EXISTS;
4128 ice_release_lock(rule_lock);
4130 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4131 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4132 m_list_itr->status =
4133 ice_add_rule_internal(hw, recp_list, lport,
4135 if (m_list_itr->status)
4136 return m_list_itr->status;
4140 ice_acquire_lock(rule_lock);
4141 /* Exit if no suitable entries were found for adding bulk switch rule */
4143 status = ICE_SUCCESS;
4144 goto ice_add_mac_exit;
4147 /* Allocate switch rule buffer for the bulk update for unicast */
4148 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4149 s_rule = (struct ice_aqc_sw_rules_elem *)
4150 ice_calloc(hw, num_unicast, s_rule_size);
4152 status = ICE_ERR_NO_MEMORY;
4153 goto ice_add_mac_exit;
4157 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4159 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4160 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4162 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4163 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4164 ice_aqc_opc_add_sw_rules);
4165 r_iter = (struct ice_aqc_sw_rules_elem *)
4166 ((u8 *)r_iter + s_rule_size);
4170 /* Call AQ bulk switch rule update for all unicast addresses */
4172 /* Call AQ switch rule in AQ_MAX chunk */
4173 for (total_elem_left = num_unicast; total_elem_left > 0;
4174 total_elem_left -= elem_sent) {
4175 struct ice_aqc_sw_rules_elem *entry = r_iter;
4177 elem_sent = MIN_T(u8, total_elem_left,
4178 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4179 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4180 elem_sent, ice_aqc_opc_add_sw_rules,
4183 goto ice_add_mac_exit;
4184 r_iter = (struct ice_aqc_sw_rules_elem *)
4185 ((u8 *)r_iter + (elem_sent * s_rule_size));
4188 /* Fill up rule ID based on the value returned from FW */
4190 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4192 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4193 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4194 struct ice_fltr_mgmt_list_entry *fm_entry;
4196 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4197 f_info->fltr_rule_id =
4198 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4199 f_info->fltr_act = ICE_FWD_TO_VSI;
4200 /* Create an entry to track this MAC address */
4201 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4202 ice_malloc(hw, sizeof(*fm_entry));
4204 status = ICE_ERR_NO_MEMORY;
4205 goto ice_add_mac_exit;
4207 fm_entry->fltr_info = *f_info;
4208 fm_entry->vsi_count = 1;
4209 /* The book keeping entries will get removed when
4210 * base driver calls remove filter AQ command
4213 LIST_ADD(&fm_entry->list_entry, rule_head);
4214 r_iter = (struct ice_aqc_sw_rules_elem *)
4215 ((u8 *)r_iter + s_rule_size);
4220 ice_release_lock(rule_lock);
4222 ice_free(hw, s_rule);
4227 * ice_add_mac - Add a MAC address based filter rule
4228 * @hw: pointer to the hardware structure
4229 * @m_list: list of MAC addresses and forwarding information
4231 * Function add MAC rule for logical port from HW struct
4233 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4236 return ICE_ERR_PARAM;
4238 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4239 hw->port_info->lport);
4243 * ice_add_vlan_internal - Add one VLAN based filter rule
4244 * @hw: pointer to the hardware structure
4245 * @recp_list: recipe list for which rule has to be added
4246 * @f_entry: filter entry containing one VLAN information
4248 static enum ice_status
4249 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4250 struct ice_fltr_list_entry *f_entry)
4252 struct ice_fltr_mgmt_list_entry *v_list_itr;
4253 struct ice_fltr_info *new_fltr, *cur_fltr;
4254 enum ice_sw_lkup_type lkup_type;
4255 u16 vsi_list_id = 0, vsi_handle;
4256 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4257 enum ice_status status = ICE_SUCCESS;
4259 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4260 return ICE_ERR_PARAM;
4262 f_entry->fltr_info.fwd_id.hw_vsi_id =
4263 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4264 new_fltr = &f_entry->fltr_info;
4266 /* VLAN ID should only be 12 bits */
4267 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4268 return ICE_ERR_PARAM;
4270 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4271 return ICE_ERR_PARAM;
4273 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4274 lkup_type = new_fltr->lkup_type;
4275 vsi_handle = new_fltr->vsi_handle;
4276 rule_lock = &recp_list->filt_rule_lock;
4277 ice_acquire_lock(rule_lock);
4278 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4280 struct ice_vsi_list_map_info *map_info = NULL;
4282 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4283 /* All VLAN pruning rules use a VSI list. Check if
4284 * there is already a VSI list containing VSI that we
4285 * want to add. If found, use the same vsi_list_id for
4286 * this new VLAN rule or else create a new list.
4288 map_info = ice_find_vsi_list_entry(recp_list,
4292 status = ice_create_vsi_list_rule(hw,
4300 /* Convert the action to forwarding to a VSI list. */
4301 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4302 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4305 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4307 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4310 status = ICE_ERR_DOES_NOT_EXIST;
4313 /* reuse VSI list for new rule and increment ref_cnt */
4315 v_list_itr->vsi_list_info = map_info;
4316 map_info->ref_cnt++;
4318 v_list_itr->vsi_list_info =
4319 ice_create_vsi_list_map(hw, &vsi_handle,
4323 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4324 /* Update existing VSI list to add new VSI ID only if it used
4327 cur_fltr = &v_list_itr->fltr_info;
4328 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4331 /* If VLAN rule exists and VSI list being used by this rule is
4332 * referenced by more than 1 VLAN rule. Then create a new VSI
4333 * list appending previous VSI with new VSI and update existing
4334 * VLAN rule to point to new VSI list ID
4336 struct ice_fltr_info tmp_fltr;
4337 u16 vsi_handle_arr[2];
4340 /* Current implementation only supports reusing VSI list with
4341 * one VSI count. We should never hit below condition
4343 if (v_list_itr->vsi_count > 1 &&
4344 v_list_itr->vsi_list_info->ref_cnt > 1) {
4345 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4346 status = ICE_ERR_CFG;
4351 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4354 /* A rule already exists with the new VSI being added */
4355 if (cur_handle == vsi_handle) {
4356 status = ICE_ERR_ALREADY_EXISTS;
4360 vsi_handle_arr[0] = cur_handle;
4361 vsi_handle_arr[1] = vsi_handle;
4362 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4363 &vsi_list_id, lkup_type);
4367 tmp_fltr = v_list_itr->fltr_info;
4368 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4369 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4370 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4371 /* Update the previous switch rule to a new VSI list which
4372 * includes current VSI that is requested
4374 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4378 /* before overriding VSI list map info. decrement ref_cnt of
4381 v_list_itr->vsi_list_info->ref_cnt--;
4383 /* now update to newly created list */
4384 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4385 v_list_itr->vsi_list_info =
4386 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4388 v_list_itr->vsi_count++;
4392 ice_release_lock(rule_lock);
4397 * ice_add_vlan_rule - Add VLAN based filter rule
4398 * @hw: pointer to the hardware structure
4399 * @v_list: list of VLAN entries and forwarding information
4400 * @sw: pointer to switch info struct for which function add rule
4402 static enum ice_status
4403 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4404 struct ice_switch_info *sw)
4406 struct ice_fltr_list_entry *v_list_itr;
4407 struct ice_sw_recipe *recp_list;
4409 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4410 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4412 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4413 return ICE_ERR_PARAM;
4414 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4415 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4417 if (v_list_itr->status)
4418 return v_list_itr->status;
4424 * ice_add_vlan - Add a VLAN based filter rule
4425 * @hw: pointer to the hardware structure
4426 * @v_list: list of VLAN and forwarding information
4428 * Function add VLAN rule for logical port from HW struct
4430 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4433 return ICE_ERR_PARAM;
4435 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4439 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4440 * @hw: pointer to the hardware structure
4441 * @mv_list: list of MAC and VLAN filters
4442 * @sw: pointer to switch info struct for which function add rule
4443 * @lport: logic port number on which function add rule
4445 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4446 * pruning bits enabled, then it is the responsibility of the caller to make
4447 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4448 * VLAN won't be received on that VSI otherwise.
4450 static enum ice_status
4451 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4452 struct ice_switch_info *sw, u8 lport)
4454 struct ice_fltr_list_entry *mv_list_itr;
4455 struct ice_sw_recipe *recp_list;
4457 if (!mv_list || !hw)
4458 return ICE_ERR_PARAM;
4460 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4461 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4463 enum ice_sw_lkup_type l_type =
4464 mv_list_itr->fltr_info.lkup_type;
4466 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4467 return ICE_ERR_PARAM;
4468 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4469 mv_list_itr->status =
4470 ice_add_rule_internal(hw, recp_list, lport,
4472 if (mv_list_itr->status)
4473 return mv_list_itr->status;
4479 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4480 * @hw: pointer to the hardware structure
4481 * @mv_list: list of MAC VLAN addresses and forwarding information
4483 * Function add MAC VLAN rule for logical port from HW struct
4486 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4488 if (!mv_list || !hw)
4489 return ICE_ERR_PARAM;
4491 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4492 hw->port_info->lport);
4496 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4497 * @hw: pointer to the hardware structure
4498 * @em_list: list of ether type MAC filter, MAC is optional
4499 * @sw: pointer to switch info struct for which function add rule
4500 * @lport: logic port number on which function add rule
4502 * This function requires the caller to populate the entries in
4503 * the filter list with the necessary fields (including flags to
4504 * indicate Tx or Rx rules).
4506 static enum ice_status
4507 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4508 struct ice_switch_info *sw, u8 lport)
4510 struct ice_fltr_list_entry *em_list_itr;
4512 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4514 struct ice_sw_recipe *recp_list;
4515 enum ice_sw_lkup_type l_type;
4517 l_type = em_list_itr->fltr_info.lkup_type;
4518 recp_list = &sw->recp_list[l_type];
4520 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4521 l_type != ICE_SW_LKUP_ETHERTYPE)
4522 return ICE_ERR_PARAM;
4524 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4527 if (em_list_itr->status)
4528 return em_list_itr->status;
4534 * ice_add_eth_mac - Add a ethertype based filter rule
4535 * @hw: pointer to the hardware structure
4536 * @em_list: list of ethertype and forwarding information
4538 * Function add ethertype rule for logical port from HW struct
4541 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4543 if (!em_list || !hw)
4544 return ICE_ERR_PARAM;
4546 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4547 hw->port_info->lport);
4551 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4552 * @hw: pointer to the hardware structure
4553 * @em_list: list of ethertype or ethertype MAC entries
4554 * @sw: pointer to switch info struct for which function add rule
4556 static enum ice_status
4557 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4558 struct ice_switch_info *sw)
4560 struct ice_fltr_list_entry *em_list_itr, *tmp;
4562 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4564 struct ice_sw_recipe *recp_list;
4565 enum ice_sw_lkup_type l_type;
4567 l_type = em_list_itr->fltr_info.lkup_type;
4569 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4570 l_type != ICE_SW_LKUP_ETHERTYPE)
4571 return ICE_ERR_PARAM;
4573 recp_list = &sw->recp_list[l_type];
4574 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4576 if (em_list_itr->status)
4577 return em_list_itr->status;
4583 * ice_remove_eth_mac - remove a ethertype based filter rule
4584 * @hw: pointer to the hardware structure
4585 * @em_list: list of ethertype and forwarding information
4589 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4591 if (!em_list || !hw)
4592 return ICE_ERR_PARAM;
4594 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4598 * ice_rem_sw_rule_info
4599 * @hw: pointer to the hardware structure
4600 * @rule_head: pointer to the switch list structure that we want to delete
4603 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4605 if (!LIST_EMPTY(rule_head)) {
4606 struct ice_fltr_mgmt_list_entry *entry;
4607 struct ice_fltr_mgmt_list_entry *tmp;
4609 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4610 ice_fltr_mgmt_list_entry, list_entry) {
4611 LIST_DEL(&entry->list_entry);
4612 ice_free(hw, entry);
4618 * ice_rem_adv_rule_info
4619 * @hw: pointer to the hardware structure
4620 * @rule_head: pointer to the switch list structure that we want to delete
4623 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4625 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4626 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4628 if (LIST_EMPTY(rule_head))
4631 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4632 ice_adv_fltr_mgmt_list_entry, list_entry) {
4633 LIST_DEL(&lst_itr->list_entry);
4634 ice_free(hw, lst_itr->lkups);
4635 ice_free(hw, lst_itr);
4640 * ice_rem_all_sw_rules_info
4641 * @hw: pointer to the hardware structure
4643 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4645 struct ice_switch_info *sw = hw->switch_info;
4648 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4649 struct LIST_HEAD_TYPE *rule_head;
4651 rule_head = &sw->recp_list[i].filt_rules;
4652 if (!sw->recp_list[i].adv_rule)
4653 ice_rem_sw_rule_info(hw, rule_head);
4655 ice_rem_adv_rule_info(hw, rule_head);
4656 if (sw->recp_list[i].adv_rule &&
4657 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4658 sw->recp_list[i].adv_rule = false;
4663 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4664 * @pi: pointer to the port_info structure
4665 * @vsi_handle: VSI handle to set as default
4666 * @set: true to add the above mentioned switch rule, false to remove it
4667 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4669 * add filter rule to set/unset given VSI as default VSI for the switch
4670 * (represented by swid)
4673 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4676 struct ice_aqc_sw_rules_elem *s_rule;
4677 struct ice_fltr_info f_info;
4678 struct ice_hw *hw = pi->hw;
4679 enum ice_adminq_opc opcode;
4680 enum ice_status status;
4684 if (!ice_is_vsi_valid(hw, vsi_handle))
4685 return ICE_ERR_PARAM;
4686 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4688 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4689 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4691 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4693 return ICE_ERR_NO_MEMORY;
4695 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4697 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4698 f_info.flag = direction;
4699 f_info.fltr_act = ICE_FWD_TO_VSI;
4700 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4702 if (f_info.flag & ICE_FLTR_RX) {
4703 f_info.src = pi->lport;
4704 f_info.src_id = ICE_SRC_ID_LPORT;
4706 f_info.fltr_rule_id =
4707 pi->dflt_rx_vsi_rule_id;
4708 } else if (f_info.flag & ICE_FLTR_TX) {
4709 f_info.src_id = ICE_SRC_ID_VSI;
4710 f_info.src = hw_vsi_id;
4712 f_info.fltr_rule_id =
4713 pi->dflt_tx_vsi_rule_id;
4717 opcode = ice_aqc_opc_add_sw_rules;
4719 opcode = ice_aqc_opc_remove_sw_rules;
4721 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4723 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4724 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4727 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4729 if (f_info.flag & ICE_FLTR_TX) {
4730 pi->dflt_tx_vsi_num = hw_vsi_id;
4731 pi->dflt_tx_vsi_rule_id = index;
4732 } else if (f_info.flag & ICE_FLTR_RX) {
4733 pi->dflt_rx_vsi_num = hw_vsi_id;
4734 pi->dflt_rx_vsi_rule_id = index;
4737 if (f_info.flag & ICE_FLTR_TX) {
4738 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4739 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4740 } else if (f_info.flag & ICE_FLTR_RX) {
4741 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4742 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4747 ice_free(hw, s_rule);
4752 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4753 * @list_head: head of rule list
4754 * @f_info: rule information
4756 * Helper function to search for a unicast rule entry - this is to be used
4757 * to remove unicast MAC filter that is not shared with other VSIs on the
4760 * Returns pointer to entry storing the rule if found
4762 static struct ice_fltr_mgmt_list_entry *
4763 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4764 struct ice_fltr_info *f_info)
4766 struct ice_fltr_mgmt_list_entry *list_itr;
4768 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4770 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4771 sizeof(f_info->l_data)) &&
4772 f_info->fwd_id.hw_vsi_id ==
4773 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4774 f_info->flag == list_itr->fltr_info.flag)
4781 * ice_remove_mac_rule - remove a MAC based filter rule
4782 * @hw: pointer to the hardware structure
4783 * @m_list: list of MAC addresses and forwarding information
4784 * @recp_list: list from which function remove MAC address
4786 * This function removes either a MAC filter rule or a specific VSI from a
4787 * VSI list for a multicast MAC address.
4789 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4790 * ice_add_mac. Caller should be aware that this call will only work if all
4791 * the entries passed into m_list were added previously. It will not attempt to
4792 * do a partial remove of entries that were found.
4794 static enum ice_status
4795 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4796 struct ice_sw_recipe *recp_list)
4798 struct ice_fltr_list_entry *list_itr, *tmp;
4799 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4802 return ICE_ERR_PARAM;
4804 rule_lock = &recp_list->filt_rule_lock;
4805 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4807 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4808 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4811 if (l_type != ICE_SW_LKUP_MAC)
4812 return ICE_ERR_PARAM;
4814 vsi_handle = list_itr->fltr_info.vsi_handle;
4815 if (!ice_is_vsi_valid(hw, vsi_handle))
4816 return ICE_ERR_PARAM;
4818 list_itr->fltr_info.fwd_id.hw_vsi_id =
4819 ice_get_hw_vsi_num(hw, vsi_handle);
4820 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4821 /* Don't remove the unicast address that belongs to
4822 * another VSI on the switch, since it is not being
4825 ice_acquire_lock(rule_lock);
4826 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4827 &list_itr->fltr_info)) {
4828 ice_release_lock(rule_lock);
4829 return ICE_ERR_DOES_NOT_EXIST;
4831 ice_release_lock(rule_lock);
4833 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4835 if (list_itr->status)
4836 return list_itr->status;
4842 * ice_remove_mac - remove a MAC address based filter rule
4843 * @hw: pointer to the hardware structure
4844 * @m_list: list of MAC addresses and forwarding information
4847 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4849 struct ice_sw_recipe *recp_list;
4851 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4852 return ice_remove_mac_rule(hw, m_list, recp_list);
4856 * ice_remove_vlan_rule - Remove VLAN based filter rule
4857 * @hw: pointer to the hardware structure
4858 * @v_list: list of VLAN entries and forwarding information
4859 * @recp_list: list from which function remove VLAN
4861 static enum ice_status
4862 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4863 struct ice_sw_recipe *recp_list)
4865 struct ice_fltr_list_entry *v_list_itr, *tmp;
4867 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4869 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4871 if (l_type != ICE_SW_LKUP_VLAN)
4872 return ICE_ERR_PARAM;
4873 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4875 if (v_list_itr->status)
4876 return v_list_itr->status;
4882 * ice_remove_vlan - remove a VLAN address based filter rule
4883 * @hw: pointer to the hardware structure
4884 * @v_list: list of VLAN and forwarding information
4888 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4890 struct ice_sw_recipe *recp_list;
4893 return ICE_ERR_PARAM;
4895 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4896 return ice_remove_vlan_rule(hw, v_list, recp_list);
4900 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4901 * @hw: pointer to the hardware structure
4902 * @v_list: list of MAC VLAN entries and forwarding information
4903 * @recp_list: list from which function remove MAC VLAN
4905 static enum ice_status
4906 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4907 struct ice_sw_recipe *recp_list)
4909 struct ice_fltr_list_entry *v_list_itr, *tmp;
4911 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4912 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4914 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4916 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4917 return ICE_ERR_PARAM;
4918 v_list_itr->status =
4919 ice_remove_rule_internal(hw, recp_list,
4921 if (v_list_itr->status)
4922 return v_list_itr->status;
4928 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4929 * @hw: pointer to the hardware structure
4930 * @mv_list: list of MAC VLAN and forwarding information
4933 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4935 struct ice_sw_recipe *recp_list;
4937 if (!mv_list || !hw)
4938 return ICE_ERR_PARAM;
4940 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4941 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4945 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4946 * @fm_entry: filter entry to inspect
4947 * @vsi_handle: VSI handle to compare with filter info
4950 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4952 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4953 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4954 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4955 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4960 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4961 * @hw: pointer to the hardware structure
4962 * @vsi_handle: VSI handle to remove filters from
4963 * @vsi_list_head: pointer to the list to add entry to
4964 * @fi: pointer to fltr_info of filter entry to copy & add
4966 * Helper function, used when creating a list of filters to remove from
4967 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4968 * original filter entry, with the exception of fltr_info.fltr_act and
4969 * fltr_info.fwd_id fields. These are set such that later logic can
4970 * extract which VSI to remove the fltr from, and pass on that information.
4972 static enum ice_status
4973 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4974 struct LIST_HEAD_TYPE *vsi_list_head,
4975 struct ice_fltr_info *fi)
4977 struct ice_fltr_list_entry *tmp;
4979 /* this memory is freed up in the caller function
4980 * once filters for this VSI are removed
4982 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4984 return ICE_ERR_NO_MEMORY;
4986 tmp->fltr_info = *fi;
4988 /* Overwrite these fields to indicate which VSI to remove filter from,
4989 * so find and remove logic can extract the information from the
4990 * list entries. Note that original entries will still have proper
4993 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4994 tmp->fltr_info.vsi_handle = vsi_handle;
4995 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4997 LIST_ADD(&tmp->list_entry, vsi_list_head);
5003 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5004 * @hw: pointer to the hardware structure
5005 * @vsi_handle: VSI handle to remove filters from
5006 * @lkup_list_head: pointer to the list that has certain lookup type filters
5007 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5009 * Locates all filters in lkup_list_head that are used by the given VSI,
5010 * and adds COPIES of those entries to vsi_list_head (intended to be used
5011 * to remove the listed filters).
5012 * Note that this means all entries in vsi_list_head must be explicitly
5013 * deallocated by the caller when done with list.
5015 static enum ice_status
5016 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5017 struct LIST_HEAD_TYPE *lkup_list_head,
5018 struct LIST_HEAD_TYPE *vsi_list_head)
5020 struct ice_fltr_mgmt_list_entry *fm_entry;
5021 enum ice_status status = ICE_SUCCESS;
5023 /* check to make sure VSI ID is valid and within boundary */
5024 if (!ice_is_vsi_valid(hw, vsi_handle))
5025 return ICE_ERR_PARAM;
5027 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5028 ice_fltr_mgmt_list_entry, list_entry) {
5029 struct ice_fltr_info *fi;
5031 fi = &fm_entry->fltr_info;
5032 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
5035 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5044 * ice_determine_promisc_mask
5045 * @fi: filter info to parse
5047 * Helper function to determine which ICE_PROMISC_ mask corresponds
5048 * to given filter into.
5050 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5052 u16 vid = fi->l_data.mac_vlan.vlan_id;
5053 u8 *macaddr = fi->l_data.mac.mac_addr;
5054 bool is_tx_fltr = false;
5055 u8 promisc_mask = 0;
5057 if (fi->flag == ICE_FLTR_TX)
5060 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5061 promisc_mask |= is_tx_fltr ?
5062 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5063 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5064 promisc_mask |= is_tx_fltr ?
5065 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5066 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5067 promisc_mask |= is_tx_fltr ?
5068 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5070 promisc_mask |= is_tx_fltr ?
5071 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5073 return promisc_mask;
5077 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5078 * @hw: pointer to the hardware structure
5079 * @vsi_handle: VSI handle to retrieve info from
5080 * @promisc_mask: pointer to mask to be filled in
5081 * @vid: VLAN ID of promisc VLAN VSI
5082 * @sw: pointer to switch info struct for which function add rule
5084 static enum ice_status
5085 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5086 u16 *vid, struct ice_switch_info *sw)
5088 struct ice_fltr_mgmt_list_entry *itr;
5089 struct LIST_HEAD_TYPE *rule_head;
5090 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5092 if (!ice_is_vsi_valid(hw, vsi_handle))
5093 return ICE_ERR_PARAM;
5097 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5098 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5100 ice_acquire_lock(rule_lock);
5101 LIST_FOR_EACH_ENTRY(itr, rule_head,
5102 ice_fltr_mgmt_list_entry, list_entry) {
5103 /* Continue if this filter doesn't apply to this VSI or the
5104 * VSI ID is not in the VSI map for this filter
5106 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5109 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5111 ice_release_lock(rule_lock);
5117 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5118 * @hw: pointer to the hardware structure
5119 * @vsi_handle: VSI handle to retrieve info from
5120 * @promisc_mask: pointer to mask to be filled in
5121 * @vid: VLAN ID of promisc VLAN VSI
5124 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5127 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5128 vid, hw->switch_info);
5132 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5133 * @hw: pointer to the hardware structure
5134 * @vsi_handle: VSI handle to retrieve info from
5135 * @promisc_mask: pointer to mask to be filled in
5136 * @vid: VLAN ID of promisc VLAN VSI
5137 * @sw: pointer to switch info struct for which function add rule
5139 static enum ice_status
5140 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5141 u16 *vid, struct ice_switch_info *sw)
5143 struct ice_fltr_mgmt_list_entry *itr;
5144 struct LIST_HEAD_TYPE *rule_head;
5145 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5147 if (!ice_is_vsi_valid(hw, vsi_handle))
5148 return ICE_ERR_PARAM;
5152 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5153 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5155 ice_acquire_lock(rule_lock);
5156 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5158 /* Continue if this filter doesn't apply to this VSI or the
5159 * VSI ID is not in the VSI map for this filter
5161 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5164 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5166 ice_release_lock(rule_lock);
5172 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5173 * @hw: pointer to the hardware structure
5174 * @vsi_handle: VSI handle to retrieve info from
5175 * @promisc_mask: pointer to mask to be filled in
5176 * @vid: VLAN ID of promisc VLAN VSI
5179 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5182 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5183 vid, hw->switch_info);
5187 * ice_remove_promisc - Remove promisc based filter rules
5188 * @hw: pointer to the hardware structure
5189 * @recp_id: recipe ID for which the rule needs to removed
5190 * @v_list: list of promisc entries
5192 static enum ice_status
5193 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5194 struct LIST_HEAD_TYPE *v_list)
5196 struct ice_fltr_list_entry *v_list_itr, *tmp;
5197 struct ice_sw_recipe *recp_list;
5199 recp_list = &hw->switch_info->recp_list[recp_id];
5200 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5202 v_list_itr->status =
5203 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5204 if (v_list_itr->status)
5205 return v_list_itr->status;
5211 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5212 * @hw: pointer to the hardware structure
5213 * @vsi_handle: VSI handle to clear mode
5214 * @promisc_mask: mask of promiscuous config bits to clear
5215 * @vid: VLAN ID to clear VLAN promiscuous
5216 * @sw: pointer to switch info struct for which function add rule
5218 static enum ice_status
5219 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5220 u16 vid, struct ice_switch_info *sw)
5222 struct ice_fltr_list_entry *fm_entry, *tmp;
5223 struct LIST_HEAD_TYPE remove_list_head;
5224 struct ice_fltr_mgmt_list_entry *itr;
5225 struct LIST_HEAD_TYPE *rule_head;
5226 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5227 enum ice_status status = ICE_SUCCESS;
5230 if (!ice_is_vsi_valid(hw, vsi_handle))
5231 return ICE_ERR_PARAM;
5233 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5234 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5236 recipe_id = ICE_SW_LKUP_PROMISC;
5238 rule_head = &sw->recp_list[recipe_id].filt_rules;
5239 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5241 INIT_LIST_HEAD(&remove_list_head);
5243 ice_acquire_lock(rule_lock);
5244 LIST_FOR_EACH_ENTRY(itr, rule_head,
5245 ice_fltr_mgmt_list_entry, list_entry) {
5246 struct ice_fltr_info *fltr_info;
5247 u8 fltr_promisc_mask = 0;
5249 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5251 fltr_info = &itr->fltr_info;
5253 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5254 vid != fltr_info->l_data.mac_vlan.vlan_id)
5257 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5259 /* Skip if filter is not completely specified by given mask */
5260 if (fltr_promisc_mask & ~promisc_mask)
5263 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5267 ice_release_lock(rule_lock);
5268 goto free_fltr_list;
5271 ice_release_lock(rule_lock);
5273 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5276 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5277 ice_fltr_list_entry, list_entry) {
5278 LIST_DEL(&fm_entry->list_entry);
5279 ice_free(hw, fm_entry);
5286 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5287 * @hw: pointer to the hardware structure
5288 * @vsi_handle: VSI handle to clear mode
5289 * @promisc_mask: mask of promiscuous config bits to clear
5290 * @vid: VLAN ID to clear VLAN promiscuous
5293 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5294 u8 promisc_mask, u16 vid)
5296 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5297 vid, hw->switch_info);
5301 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5302 * @hw: pointer to the hardware structure
5303 * @vsi_handle: VSI handle to configure
5304 * @promisc_mask: mask of promiscuous config bits
5305 * @vid: VLAN ID to set VLAN promiscuous
5306 * @lport: logical port number to configure promisc mode
5307 * @sw: pointer to switch info struct for which function add rule
5309 static enum ice_status
5310 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5311 u16 vid, u8 lport, struct ice_switch_info *sw)
5313 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5314 struct ice_fltr_list_entry f_list_entry;
5315 struct ice_fltr_info new_fltr;
5316 enum ice_status status = ICE_SUCCESS;
5322 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5324 if (!ice_is_vsi_valid(hw, vsi_handle))
5325 return ICE_ERR_PARAM;
5326 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5328 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5330 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5331 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5332 new_fltr.l_data.mac_vlan.vlan_id = vid;
5333 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5335 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5336 recipe_id = ICE_SW_LKUP_PROMISC;
5339 /* Separate filters must be set for each direction/packet type
5340 * combination, so we will loop over the mask value, store the
5341 * individual type, and clear it out in the input mask as it
5344 while (promisc_mask) {
5345 struct ice_sw_recipe *recp_list;
5351 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5352 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5353 pkt_type = UCAST_FLTR;
5354 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5355 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5356 pkt_type = UCAST_FLTR;
5358 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5359 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5360 pkt_type = MCAST_FLTR;
5361 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5362 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5363 pkt_type = MCAST_FLTR;
5365 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5366 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5367 pkt_type = BCAST_FLTR;
5368 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5369 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5370 pkt_type = BCAST_FLTR;
5374 /* Check for VLAN promiscuous flag */
5375 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5376 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5377 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5378 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5382 /* Set filter DA based on packet type */
5383 mac_addr = new_fltr.l_data.mac.mac_addr;
5384 if (pkt_type == BCAST_FLTR) {
5385 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5386 } else if (pkt_type == MCAST_FLTR ||
5387 pkt_type == UCAST_FLTR) {
5388 /* Use the dummy ether header DA */
5389 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5390 ICE_NONDMA_TO_NONDMA);
5391 if (pkt_type == MCAST_FLTR)
5392 mac_addr[0] |= 0x1; /* Set multicast bit */
5395 /* Need to reset this to zero for all iterations */
5398 new_fltr.flag |= ICE_FLTR_TX;
5399 new_fltr.src = hw_vsi_id;
5401 new_fltr.flag |= ICE_FLTR_RX;
5402 new_fltr.src = lport;
5405 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5406 new_fltr.vsi_handle = vsi_handle;
5407 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5408 f_list_entry.fltr_info = new_fltr;
5409 recp_list = &sw->recp_list[recipe_id];
5411 status = ice_add_rule_internal(hw, recp_list, lport,
5413 if (status != ICE_SUCCESS)
5414 goto set_promisc_exit;
5422 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5423 * @hw: pointer to the hardware structure
5424 * @vsi_handle: VSI handle to configure
5425 * @promisc_mask: mask of promiscuous config bits
5426 * @vid: VLAN ID to set VLAN promiscuous
5429 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5432 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5433 hw->port_info->lport,
5438 * _ice_set_vlan_vsi_promisc
5439 * @hw: pointer to the hardware structure
5440 * @vsi_handle: VSI handle to configure
5441 * @promisc_mask: mask of promiscuous config bits
5442 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5443 * @lport: logical port number to configure promisc mode
5444 * @sw: pointer to switch info struct for which function add rule
5446 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5448 static enum ice_status
5449 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5450 bool rm_vlan_promisc, u8 lport,
5451 struct ice_switch_info *sw)
5453 struct ice_fltr_list_entry *list_itr, *tmp;
5454 struct LIST_HEAD_TYPE vsi_list_head;
5455 struct LIST_HEAD_TYPE *vlan_head;
5456 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5457 enum ice_status status;
5460 INIT_LIST_HEAD(&vsi_list_head);
5461 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5462 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5463 ice_acquire_lock(vlan_lock);
5464 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5466 ice_release_lock(vlan_lock);
5468 goto free_fltr_list;
5470 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5472 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5473 if (rm_vlan_promisc)
5474 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5478 status = _ice_set_vsi_promisc(hw, vsi_handle,
5479 promisc_mask, vlan_id,
5486 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5487 ice_fltr_list_entry, list_entry) {
5488 LIST_DEL(&list_itr->list_entry);
5489 ice_free(hw, list_itr);
5495 * ice_set_vlan_vsi_promisc
5496 * @hw: pointer to the hardware structure
5497 * @vsi_handle: VSI handle to configure
5498 * @promisc_mask: mask of promiscuous config bits
5499 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5501 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5504 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5505 bool rm_vlan_promisc)
5507 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5508 rm_vlan_promisc, hw->port_info->lport,
5513 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5514 * @hw: pointer to the hardware structure
5515 * @vsi_handle: VSI handle to remove filters from
5516 * @recp_list: recipe list from which function remove fltr
5517 * @lkup: switch rule filter lookup type
5520 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5521 struct ice_sw_recipe *recp_list,
5522 enum ice_sw_lkup_type lkup)
5524 struct ice_fltr_list_entry *fm_entry;
5525 struct LIST_HEAD_TYPE remove_list_head;
5526 struct LIST_HEAD_TYPE *rule_head;
5527 struct ice_fltr_list_entry *tmp;
5528 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5529 enum ice_status status;
5531 INIT_LIST_HEAD(&remove_list_head);
5532 rule_lock = &recp_list[lkup].filt_rule_lock;
5533 rule_head = &recp_list[lkup].filt_rules;
5534 ice_acquire_lock(rule_lock);
5535 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5537 ice_release_lock(rule_lock);
5542 case ICE_SW_LKUP_MAC:
5543 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5545 case ICE_SW_LKUP_VLAN:
5546 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5548 case ICE_SW_LKUP_PROMISC:
5549 case ICE_SW_LKUP_PROMISC_VLAN:
5550 ice_remove_promisc(hw, lkup, &remove_list_head);
5552 case ICE_SW_LKUP_MAC_VLAN:
5553 ice_remove_mac_vlan(hw, &remove_list_head);
5555 case ICE_SW_LKUP_ETHERTYPE:
5556 case ICE_SW_LKUP_ETHERTYPE_MAC:
5557 ice_remove_eth_mac(hw, &remove_list_head);
5559 case ICE_SW_LKUP_DFLT:
5560 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
5562 case ICE_SW_LKUP_LAST:
5563 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5567 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5568 ice_fltr_list_entry, list_entry) {
5569 LIST_DEL(&fm_entry->list_entry);
5570 ice_free(hw, fm_entry);
5575 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5576 * @hw: pointer to the hardware structure
5577 * @vsi_handle: VSI handle to remove filters from
5578 * @sw: pointer to switch info struct
5581 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5582 struct ice_switch_info *sw)
5584 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5586 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5587 sw->recp_list, ICE_SW_LKUP_MAC);
5588 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5589 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5590 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5591 sw->recp_list, ICE_SW_LKUP_PROMISC);
5592 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5593 sw->recp_list, ICE_SW_LKUP_VLAN);
5594 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5595 sw->recp_list, ICE_SW_LKUP_DFLT);
5596 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5597 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5598 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5599 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5600 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5601 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5605 * ice_remove_vsi_fltr - Remove all filters for a VSI
5606 * @hw: pointer to the hardware structure
5607 * @vsi_handle: VSI handle to remove filters from
5609 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5611 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5615 * ice_alloc_res_cntr - allocating resource counter
5616 * @hw: pointer to the hardware structure
5617 * @type: type of resource
5618 * @alloc_shared: if set it is shared else dedicated
5619 * @num_items: number of entries requested for FD resource type
5620 * @counter_id: counter index returned by AQ call
5623 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5626 struct ice_aqc_alloc_free_res_elem *buf;
5627 enum ice_status status;
5630 /* Allocate resource */
5631 buf_len = ice_struct_size(buf, elem, 1);
5632 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5634 return ICE_ERR_NO_MEMORY;
5636 buf->num_elems = CPU_TO_LE16(num_items);
5637 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5638 ICE_AQC_RES_TYPE_M) | alloc_shared);
5640 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5641 ice_aqc_opc_alloc_res, NULL);
5645 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5653 * ice_free_res_cntr - free resource counter
5654 * @hw: pointer to the hardware structure
5655 * @type: type of resource
5656 * @alloc_shared: if set it is shared else dedicated
5657 * @num_items: number of entries to be freed for FD resource type
5658 * @counter_id: counter ID resource which needs to be freed
5661 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5664 struct ice_aqc_alloc_free_res_elem *buf;
5665 enum ice_status status;
5669 buf_len = ice_struct_size(buf, elem, 1);
5670 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5672 return ICE_ERR_NO_MEMORY;
5674 buf->num_elems = CPU_TO_LE16(num_items);
5675 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5676 ICE_AQC_RES_TYPE_M) | alloc_shared);
5677 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5679 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5680 ice_aqc_opc_free_res, NULL);
5682 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
5689 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5690 * @hw: pointer to the hardware structure
5691 * @counter_id: returns counter index
5693 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5695 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5696 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5701 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5702 * @hw: pointer to the hardware structure
5703 * @counter_id: counter index to be freed
5705 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5707 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5708 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5713 * ice_alloc_res_lg_act - add large action resource
5714 * @hw: pointer to the hardware structure
5715 * @l_id: large action ID to fill it in
5716 * @num_acts: number of actions to hold with a large action entry
5718 static enum ice_status
5719 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5721 struct ice_aqc_alloc_free_res_elem *sw_buf;
5722 enum ice_status status;
5725 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5726 return ICE_ERR_PARAM;
5728 /* Allocate resource for large action */
5729 buf_len = ice_struct_size(sw_buf, elem, 1);
5730 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5732 return ICE_ERR_NO_MEMORY;
5734 sw_buf->num_elems = CPU_TO_LE16(1);
5736 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5737 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5738 * If num_acts is greater than 2, then use
5739 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5740 * The num_acts cannot exceed 4. This was ensured at the
5741 * beginning of the function.
5744 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5745 else if (num_acts == 2)
5746 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5748 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5750 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5751 ice_aqc_opc_alloc_res, NULL);
5753 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5755 ice_free(hw, sw_buf);
5760 * ice_add_mac_with_sw_marker - add filter with sw marker
5761 * @hw: pointer to the hardware structure
5762 * @f_info: filter info structure containing the MAC filter information
5763 * @sw_marker: sw marker to tag the Rx descriptor with
5766 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5769 struct ice_fltr_mgmt_list_entry *m_entry;
5770 struct ice_fltr_list_entry fl_info;
5771 struct ice_sw_recipe *recp_list;
5772 struct LIST_HEAD_TYPE l_head;
5773 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5774 enum ice_status ret;
5778 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5779 return ICE_ERR_PARAM;
5781 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5782 return ICE_ERR_PARAM;
5784 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5785 return ICE_ERR_PARAM;
5787 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5788 return ICE_ERR_PARAM;
5789 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5791 /* Add filter if it doesn't exist so then the adding of large
5792 * action always results in update
5795 INIT_LIST_HEAD(&l_head);
5796 fl_info.fltr_info = *f_info;
5797 LIST_ADD(&fl_info.list_entry, &l_head);
5799 entry_exists = false;
5800 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5801 hw->port_info->lport);
5802 if (ret == ICE_ERR_ALREADY_EXISTS)
5803 entry_exists = true;
5807 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5808 rule_lock = &recp_list->filt_rule_lock;
5809 ice_acquire_lock(rule_lock);
5810 /* Get the book keeping entry for the filter */
5811 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5815 /* If counter action was enabled for this rule then don't enable
5816 * sw marker large action
5818 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5819 ret = ICE_ERR_PARAM;
5823 /* if same marker was added before */
5824 if (m_entry->sw_marker_id == sw_marker) {
5825 ret = ICE_ERR_ALREADY_EXISTS;
5829 /* Allocate a hardware table entry to hold large act. Three actions
5830 * for marker based large action
5832 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5836 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5839 /* Update the switch rule to add the marker action */
5840 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5842 ice_release_lock(rule_lock);
5847 ice_release_lock(rule_lock);
5848 /* only remove entry if it did not exist previously */
5850 ret = ice_remove_mac(hw, &l_head);
5856 * ice_add_mac_with_counter - add filter with counter enabled
5857 * @hw: pointer to the hardware structure
5858 * @f_info: pointer to filter info structure containing the MAC filter
5862 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5864 struct ice_fltr_mgmt_list_entry *m_entry;
5865 struct ice_fltr_list_entry fl_info;
5866 struct ice_sw_recipe *recp_list;
5867 struct LIST_HEAD_TYPE l_head;
5868 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5869 enum ice_status ret;
5874 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5875 return ICE_ERR_PARAM;
5877 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5878 return ICE_ERR_PARAM;
5880 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5881 return ICE_ERR_PARAM;
5882 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5883 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5885 entry_exist = false;
5887 rule_lock = &recp_list->filt_rule_lock;
5889 /* Add filter if it doesn't exist so then the adding of large
5890 * action always results in update
5892 INIT_LIST_HEAD(&l_head);
5894 fl_info.fltr_info = *f_info;
5895 LIST_ADD(&fl_info.list_entry, &l_head);
5897 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5898 hw->port_info->lport);
5899 if (ret == ICE_ERR_ALREADY_EXISTS)
5904 ice_acquire_lock(rule_lock);
5905 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5907 ret = ICE_ERR_BAD_PTR;
5911 /* Don't enable counter for a filter for which sw marker was enabled */
5912 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5913 ret = ICE_ERR_PARAM;
5917 /* If a counter was already enabled then don't need to add again */
5918 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5919 ret = ICE_ERR_ALREADY_EXISTS;
5923 /* Allocate a hardware table entry to VLAN counter */
5924 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5928 /* Allocate a hardware table entry to hold large act. Two actions for
5929 * counter based large action
5931 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5935 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5938 /* Update the switch rule to add the counter action */
5939 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5941 ice_release_lock(rule_lock);
5946 ice_release_lock(rule_lock);
5947 /* only remove entry if it did not exist previously */
5949 ret = ice_remove_mac(hw, &l_head);
5954 /* This is mapping table entry that maps every word within a given protocol
5955 * structure to the real byte offset as per the specification of that
5957 * for example dst address is 3 words in ethertype header and corresponding
5958 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5959 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5960 * matching entry describing its field. This needs to be updated if new
5961 * structure is added to that union.
5963 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5964 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5965 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5966 { ICE_ETYPE_OL, { 0 } },
5967 { ICE_VLAN_OFOS, { 0, 2 } },
5968 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5969 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5970 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5971 26, 28, 30, 32, 34, 36, 38 } },
5972 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5973 26, 28, 30, 32, 34, 36, 38 } },
5974 { ICE_TCP_IL, { 0, 2 } },
5975 { ICE_UDP_OF, { 0, 2 } },
5976 { ICE_UDP_ILOS, { 0, 2 } },
5977 { ICE_SCTP_IL, { 0, 2 } },
5978 { ICE_VXLAN, { 8, 10, 12, 14 } },
5979 { ICE_GENEVE, { 8, 10, 12, 14 } },
5980 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5981 { ICE_NVGRE, { 0, 2, 4, 6 } },
5982 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5983 { ICE_PPPOE, { 0, 2, 4, 6 } },
5984 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5985 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5986 { ICE_ESP, { 0, 2, 4, 6 } },
5987 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5988 { ICE_NAT_T, { 8, 10, 12, 14 } },
5989 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
5990 { ICE_VLAN_EX, { 0, 2 } },
5993 /* The following table describes preferred grouping of recipes.
5994 * If a recipe that needs to be programmed is a superset or matches one of the
5995 * following combinations, then the recipe needs to be chained as per the
5999 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6000 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6001 { ICE_MAC_IL, ICE_MAC_IL_HW },
6002 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6003 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6004 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6005 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6006 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6007 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6008 { ICE_TCP_IL, ICE_TCP_IL_HW },
6009 { ICE_UDP_OF, ICE_UDP_OF_HW },
6010 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6011 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6012 { ICE_VXLAN, ICE_UDP_OF_HW },
6013 { ICE_GENEVE, ICE_UDP_OF_HW },
6014 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6015 { ICE_NVGRE, ICE_GRE_OF_HW },
6016 { ICE_GTP, ICE_UDP_OF_HW },
6017 { ICE_PPPOE, ICE_PPPOE_HW },
6018 { ICE_PFCP, ICE_UDP_ILOS_HW },
6019 { ICE_L2TPV3, ICE_L2TPV3_HW },
6020 { ICE_ESP, ICE_ESP_HW },
6021 { ICE_AH, ICE_AH_HW },
6022 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6023 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6024 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6028 * ice_find_recp - find a recipe
6029 * @hw: pointer to the hardware structure
6030 * @lkup_exts: extension sequence to match
6032 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6034 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6035 enum ice_sw_tunnel_type tun_type)
6037 bool refresh_required = true;
6038 struct ice_sw_recipe *recp;
6041 /* Walk through existing recipes to find a match */
6042 recp = hw->switch_info->recp_list;
6043 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6044 /* If recipe was not created for this ID, in SW bookkeeping,
6045 * check if FW has an entry for this recipe. If the FW has an
6046 * entry update it in our SW bookkeeping and continue with the
6049 if (!recp[i].recp_created)
6050 if (ice_get_recp_frm_fw(hw,
6051 hw->switch_info->recp_list, i,
6055 /* Skip inverse action recipes */
6056 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6057 ICE_AQ_RECIPE_ACT_INV_ACT)
6060 /* if number of words we are looking for match */
6061 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6062 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6063 struct ice_fv_word *be = lkup_exts->fv_words;
6064 u16 *cr = recp[i].lkup_exts.field_mask;
6065 u16 *de = lkup_exts->field_mask;
6069 /* ar, cr, and qr are related to the recipe words, while
6070 * be, de, and pe are related to the lookup words
6072 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6073 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6075 if (ar[qr].off == be[pe].off &&
6076 ar[qr].prot_id == be[pe].prot_id &&
6078 /* Found the "pe"th word in the
6083 /* After walking through all the words in the
6084 * "i"th recipe if "p"th word was not found then
6085 * this recipe is not what we are looking for.
6086 * So break out from this loop and try the next
6089 if (qr >= recp[i].lkup_exts.n_val_words) {
6094 /* If for "i"th recipe the found was never set to false
6095 * then it means we found our match
6097 if (tun_type == recp[i].tun_type && found)
6098 return i; /* Return the recipe ID */
6101 return ICE_MAX_NUM_RECIPES;
6105 * ice_prot_type_to_id - get protocol ID from protocol type
6106 * @type: protocol type
6107 * @id: pointer to variable that will receive the ID
6109 * Returns true if found, false otherwise
6111 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6115 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6116 if (ice_prot_id_tbl[i].type == type) {
6117 *id = ice_prot_id_tbl[i].protocol_id;
6124 * ice_find_valid_words - count valid words
6125 * @rule: advanced rule with lookup information
6126 * @lkup_exts: byte offset extractions of the words that are valid
6128 * calculate valid words in a lookup rule using mask value
6131 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6132 struct ice_prot_lkup_ext *lkup_exts)
6134 u8 j, word, prot_id, ret_val;
6136 if (!ice_prot_type_to_id(rule->type, &prot_id))
6139 word = lkup_exts->n_val_words;
6141 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6142 if (((u16 *)&rule->m_u)[j] &&
6143 rule->type < ARRAY_SIZE(ice_prot_ext)) {
6144 /* No more space to accommodate */
6145 if (word >= ICE_MAX_CHAIN_WORDS)
6147 lkup_exts->fv_words[word].off =
6148 ice_prot_ext[rule->type].offs[j];
6149 lkup_exts->fv_words[word].prot_id =
6150 ice_prot_id_tbl[rule->type].protocol_id;
6151 lkup_exts->field_mask[word] =
6152 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6156 ret_val = word - lkup_exts->n_val_words;
6157 lkup_exts->n_val_words = word;
6163 * ice_create_first_fit_recp_def - Create a recipe grouping
6164 * @hw: pointer to the hardware structure
6165 * @lkup_exts: an array of protocol header extractions
6166 * @rg_list: pointer to a list that stores new recipe groups
6167 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6169 * Using first fit algorithm, take all the words that are still not done
6170 * and start grouping them in 4-word groups. Each group makes up one
6173 static enum ice_status
6174 ice_create_first_fit_recp_def(struct ice_hw *hw,
6175 struct ice_prot_lkup_ext *lkup_exts,
6176 struct LIST_HEAD_TYPE *rg_list,
6179 struct ice_pref_recipe_group *grp = NULL;
6184 if (!lkup_exts->n_val_words) {
6185 struct ice_recp_grp_entry *entry;
6187 entry = (struct ice_recp_grp_entry *)
6188 ice_malloc(hw, sizeof(*entry));
6190 return ICE_ERR_NO_MEMORY;
6191 LIST_ADD(&entry->l_entry, rg_list);
6192 grp = &entry->r_group;
6194 grp->n_val_pairs = 0;
6197 /* Walk through every word in the rule to check if it is not done. If so
6198 * then this word needs to be part of a new recipe.
6200 for (j = 0; j < lkup_exts->n_val_words; j++)
6201 if (!ice_is_bit_set(lkup_exts->done, j)) {
6203 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6204 struct ice_recp_grp_entry *entry;
6206 entry = (struct ice_recp_grp_entry *)
6207 ice_malloc(hw, sizeof(*entry));
6209 return ICE_ERR_NO_MEMORY;
6210 LIST_ADD(&entry->l_entry, rg_list);
6211 grp = &entry->r_group;
6215 grp->pairs[grp->n_val_pairs].prot_id =
6216 lkup_exts->fv_words[j].prot_id;
6217 grp->pairs[grp->n_val_pairs].off =
6218 lkup_exts->fv_words[j].off;
6219 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6227 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6228 * @hw: pointer to the hardware structure
6229 * @fv_list: field vector with the extraction sequence information
6230 * @rg_list: recipe groupings with protocol-offset pairs
6232 * Helper function to fill in the field vector indices for protocol-offset
6233 * pairs. These indexes are then ultimately programmed into a recipe.
6235 static enum ice_status
6236 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6237 struct LIST_HEAD_TYPE *rg_list)
6239 struct ice_sw_fv_list_entry *fv;
6240 struct ice_recp_grp_entry *rg;
6241 struct ice_fv_word *fv_ext;
6243 if (LIST_EMPTY(fv_list))
6246 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6247 fv_ext = fv->fv_ptr->ew;
6249 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6252 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6253 struct ice_fv_word *pr;
6258 pr = &rg->r_group.pairs[i];
6259 mask = rg->r_group.mask[i];
6261 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6262 if (fv_ext[j].prot_id == pr->prot_id &&
6263 fv_ext[j].off == pr->off) {
6266 /* Store index of field vector */
6268 rg->fv_mask[i] = mask;
6272 /* Protocol/offset could not be found, caller gave an
6276 return ICE_ERR_PARAM;
6284 * ice_find_free_recp_res_idx - find free result indexes for recipe
6285 * @hw: pointer to hardware structure
6286 * @profiles: bitmap of profiles that will be associated with the new recipe
6287 * @free_idx: pointer to variable to receive the free index bitmap
6289 * The algorithm used here is:
6290 * 1. When creating a new recipe, create a set P which contains all
6291 * Profiles that will be associated with our new recipe
6293 * 2. For each Profile p in set P:
6294 * a. Add all recipes associated with Profile p into set R
6295 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6296 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6297 * i. Or just assume they all have the same possible indexes:
6299 * i.e., PossibleIndexes = 0x0000F00000000000
6301 * 3. For each Recipe r in set R:
6302 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6303 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6305 * FreeIndexes will contain the bits indicating the indexes free for use,
6306 * then the code needs to update the recipe[r].used_result_idx_bits to
6307 * indicate which indexes were selected for use by this recipe.
6310 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6311 ice_bitmap_t *free_idx)
6313 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6314 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6315 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6318 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6319 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6320 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6321 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6323 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6325 /* For each profile we are going to associate the recipe with, add the
6326 * recipes that are associated with that profile. This will give us
6327 * the set of recipes that our recipe may collide with. Also, determine
6328 * what possible result indexes are usable given this set of profiles.
6330 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6331 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6332 ICE_MAX_NUM_RECIPES);
6333 ice_and_bitmap(possible_idx, possible_idx,
6334 hw->switch_info->prof_res_bm[bit],
6338 /* For each recipe that our new recipe may collide with, determine
6339 * which indexes have been used.
6341 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6342 ice_or_bitmap(used_idx, used_idx,
6343 hw->switch_info->recp_list[bit].res_idxs,
6346 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6348 /* return number of free indexes */
6349 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6353 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6354 * @hw: pointer to hardware structure
6355 * @rm: recipe management list entry
6356 * @profiles: bitmap of profiles that will be associated.
6358 static enum ice_status
6359 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6360 ice_bitmap_t *profiles)
6362 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6363 struct ice_aqc_recipe_data_elem *tmp;
6364 struct ice_aqc_recipe_data_elem *buf;
6365 struct ice_recp_grp_entry *entry;
6366 enum ice_status status;
6372 /* When more than one recipe are required, another recipe is needed to
6373 * chain them together. Matching a tunnel metadata ID takes up one of
6374 * the match fields in the chaining recipe reducing the number of
6375 * chained recipes by one.
6377 /* check number of free result indices */
6378 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6379 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6381 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6382 free_res_idx, rm->n_grp_count);
6384 if (rm->n_grp_count > 1) {
6385 if (rm->n_grp_count > free_res_idx)
6386 return ICE_ERR_MAX_LIMIT;
6391 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6392 return ICE_ERR_MAX_LIMIT;
6394 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6395 ICE_MAX_NUM_RECIPES,
6398 return ICE_ERR_NO_MEMORY;
6400 buf = (struct ice_aqc_recipe_data_elem *)
6401 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6403 status = ICE_ERR_NO_MEMORY;
6407 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6408 recipe_count = ICE_MAX_NUM_RECIPES;
6409 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6411 if (status || recipe_count == 0)
6414 /* Allocate the recipe resources, and configure them according to the
6415 * match fields from protocol headers and extracted field vectors.
6417 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6418 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6421 status = ice_alloc_recipe(hw, &entry->rid);
6425 /* Clear the result index of the located recipe, as this will be
6426 * updated, if needed, later in the recipe creation process.
6428 tmp[0].content.result_indx = 0;
6430 buf[recps] = tmp[0];
6431 buf[recps].recipe_indx = (u8)entry->rid;
6432 /* if the recipe is a non-root recipe RID should be programmed
6433 * as 0 for the rules to be applied correctly.
6435 buf[recps].content.rid = 0;
6436 ice_memset(&buf[recps].content.lkup_indx, 0,
6437 sizeof(buf[recps].content.lkup_indx),
6440 /* All recipes use look-up index 0 to match switch ID. */
6441 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6442 buf[recps].content.mask[0] =
6443 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6444 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6447 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6448 buf[recps].content.lkup_indx[i] = 0x80;
6449 buf[recps].content.mask[i] = 0;
6452 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6453 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6454 buf[recps].content.mask[i + 1] =
6455 CPU_TO_LE16(entry->fv_mask[i]);
6458 if (rm->n_grp_count > 1) {
6459 /* Checks to see if there really is a valid result index
6462 if (chain_idx >= ICE_MAX_FV_WORDS) {
6463 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6464 status = ICE_ERR_MAX_LIMIT;
6468 entry->chain_idx = chain_idx;
6469 buf[recps].content.result_indx =
6470 ICE_AQ_RECIPE_RESULT_EN |
6471 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6472 ICE_AQ_RECIPE_RESULT_DATA_M);
6473 ice_clear_bit(chain_idx, result_idx_bm);
6474 chain_idx = ice_find_first_bit(result_idx_bm,
6478 /* fill recipe dependencies */
6479 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6480 ICE_MAX_NUM_RECIPES);
6481 ice_set_bit(buf[recps].recipe_indx,
6482 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6483 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6487 if (rm->n_grp_count == 1) {
6488 rm->root_rid = buf[0].recipe_indx;
6489 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6490 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6491 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6492 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6493 sizeof(buf[0].recipe_bitmap),
6494 ICE_NONDMA_TO_NONDMA);
6496 status = ICE_ERR_BAD_PTR;
6499 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6500 * the recipe which is getting created if specified
6501 * by user. Usually any advanced switch filter, which results
6502 * into new extraction sequence, ended up creating a new recipe
6503 * of type ROOT and usually recipes are associated with profiles
6504 * Switch rule referreing newly created recipe, needs to have
6505 * either/or 'fwd' or 'join' priority, otherwise switch rule
6506 * evaluation will not happen correctly. In other words, if
6507 * switch rule to be evaluated on priority basis, then recipe
6508 * needs to have priority, otherwise it will be evaluated last.
6510 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6512 struct ice_recp_grp_entry *last_chain_entry;
6515 /* Allocate the last recipe that will chain the outcomes of the
6516 * other recipes together
6518 status = ice_alloc_recipe(hw, &rid);
6522 buf[recps].recipe_indx = (u8)rid;
6523 buf[recps].content.rid = (u8)rid;
6524 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6525 /* the new entry created should also be part of rg_list to
6526 * make sure we have complete recipe
6528 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6529 sizeof(*last_chain_entry));
6530 if (!last_chain_entry) {
6531 status = ICE_ERR_NO_MEMORY;
6534 last_chain_entry->rid = rid;
6535 ice_memset(&buf[recps].content.lkup_indx, 0,
6536 sizeof(buf[recps].content.lkup_indx),
6538 /* All recipes use look-up index 0 to match switch ID. */
6539 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6540 buf[recps].content.mask[0] =
6541 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6542 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6543 buf[recps].content.lkup_indx[i] =
6544 ICE_AQ_RECIPE_LKUP_IGNORE;
6545 buf[recps].content.mask[i] = 0;
6549 /* update r_bitmap with the recp that is used for chaining */
6550 ice_set_bit(rid, rm->r_bitmap);
6551 /* this is the recipe that chains all the other recipes so it
6552 * should not have a chaining ID to indicate the same
6554 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6555 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6557 last_chain_entry->fv_idx[i] = entry->chain_idx;
6558 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6559 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6560 ice_set_bit(entry->rid, rm->r_bitmap);
6562 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6563 if (sizeof(buf[recps].recipe_bitmap) >=
6564 sizeof(rm->r_bitmap)) {
6565 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6566 sizeof(buf[recps].recipe_bitmap),
6567 ICE_NONDMA_TO_NONDMA);
6569 status = ICE_ERR_BAD_PTR;
6572 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6575 rm->root_rid = (u8)rid;
6577 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6581 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6582 ice_release_change_lock(hw);
6586 /* Every recipe that just got created add it to the recipe
6589 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6590 struct ice_switch_info *sw = hw->switch_info;
6591 bool is_root, idx_found = false;
6592 struct ice_sw_recipe *recp;
6593 u16 idx, buf_idx = 0;
6595 /* find buffer index for copying some data */
6596 for (idx = 0; idx < rm->n_grp_count; idx++)
6597 if (buf[idx].recipe_indx == entry->rid) {
6603 status = ICE_ERR_OUT_OF_RANGE;
6607 recp = &sw->recp_list[entry->rid];
6608 is_root = (rm->root_rid == entry->rid);
6609 recp->is_root = is_root;
6611 recp->root_rid = entry->rid;
6612 recp->big_recp = (is_root && rm->n_grp_count > 1);
6614 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6615 entry->r_group.n_val_pairs *
6616 sizeof(struct ice_fv_word),
6617 ICE_NONDMA_TO_NONDMA);
6619 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6620 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6622 /* Copy non-result fv index values and masks to recipe. This
6623 * call will also update the result recipe bitmask.
6625 ice_collect_result_idx(&buf[buf_idx], recp);
6627 /* for non-root recipes, also copy to the root, this allows
6628 * easier matching of a complete chained recipe
6631 ice_collect_result_idx(&buf[buf_idx],
6632 &sw->recp_list[rm->root_rid]);
6634 recp->n_ext_words = entry->r_group.n_val_pairs;
6635 recp->chain_idx = entry->chain_idx;
6636 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6637 recp->n_grp_count = rm->n_grp_count;
6638 recp->tun_type = rm->tun_type;
6639 recp->recp_created = true;
6653 * ice_create_recipe_group - creates recipe group
6654 * @hw: pointer to hardware structure
6655 * @rm: recipe management list entry
6656 * @lkup_exts: lookup elements
6658 static enum ice_status
6659 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6660 struct ice_prot_lkup_ext *lkup_exts)
6662 enum ice_status status;
6665 rm->n_grp_count = 0;
6667 /* Create recipes for words that are marked not done by packing them
6670 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6671 &rm->rg_list, &recp_count);
6673 rm->n_grp_count += recp_count;
6674 rm->n_ext_words = lkup_exts->n_val_words;
6675 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6676 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6677 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6678 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6685 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6686 * @hw: pointer to hardware structure
6687 * @lkups: lookup elements or match criteria for the advanced recipe, one
6688 * structure per protocol header
6689 * @lkups_cnt: number of protocols
6690 * @bm: bitmap of field vectors to consider
6691 * @fv_list: pointer to a list that holds the returned field vectors
6693 static enum ice_status
6694 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6695 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6697 enum ice_status status;
6704 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6706 return ICE_ERR_NO_MEMORY;
6708 for (i = 0; i < lkups_cnt; i++)
6709 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6710 status = ICE_ERR_CFG;
6714 /* Find field vectors that include all specified protocol types */
6715 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6718 ice_free(hw, prot_ids);
6723 * ice_tun_type_match_mask - determine if tun type needs a match mask
6724 * @tun_type: tunnel type
6725 * @mask: mask to be used for the tunnel
6727 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6730 case ICE_SW_TUN_VXLAN_GPE:
6731 case ICE_SW_TUN_GENEVE:
6732 case ICE_SW_TUN_VXLAN:
6733 case ICE_SW_TUN_NVGRE:
6734 case ICE_SW_TUN_UDP:
6735 case ICE_ALL_TUNNELS:
6736 case ICE_SW_TUN_AND_NON_TUN_QINQ:
6737 case ICE_NON_TUN_QINQ:
6738 case ICE_SW_TUN_PPPOE_QINQ:
6739 case ICE_SW_TUN_PPPOE_PAY_QINQ:
6740 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6741 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6742 *mask = ICE_TUN_FLAG_MASK;
6745 case ICE_SW_TUN_GENEVE_VLAN:
6746 case ICE_SW_TUN_VXLAN_VLAN:
6747 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6757 * ice_add_special_words - Add words that are not protocols, such as metadata
6758 * @rinfo: other information regarding the rule e.g. priority and action info
6759 * @lkup_exts: lookup word structure
6761 static enum ice_status
6762 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6763 struct ice_prot_lkup_ext *lkup_exts)
6767 /* If this is a tunneled packet, then add recipe index to match the
6768 * tunnel bit in the packet metadata flags.
6770 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6771 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6772 u8 word = lkup_exts->n_val_words++;
6774 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6775 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6776 lkup_exts->field_mask[word] = mask;
6778 return ICE_ERR_MAX_LIMIT;
6785 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6786 * @hw: pointer to hardware structure
6787 * @rinfo: other information regarding the rule e.g. priority and action info
6788 * @bm: pointer to memory for returning the bitmap of field vectors
6791 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6794 enum ice_prof_type prof_type;
6796 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6798 switch (rinfo->tun_type) {
6800 case ICE_NON_TUN_QINQ:
6801 prof_type = ICE_PROF_NON_TUN;
6803 case ICE_ALL_TUNNELS:
6804 prof_type = ICE_PROF_TUN_ALL;
6806 case ICE_SW_TUN_VXLAN_GPE:
6807 case ICE_SW_TUN_GENEVE:
6808 case ICE_SW_TUN_GENEVE_VLAN:
6809 case ICE_SW_TUN_VXLAN:
6810 case ICE_SW_TUN_VXLAN_VLAN:
6811 case ICE_SW_TUN_UDP:
6812 case ICE_SW_TUN_GTP:
6813 prof_type = ICE_PROF_TUN_UDP;
6815 case ICE_SW_TUN_NVGRE:
6816 prof_type = ICE_PROF_TUN_GRE;
6818 case ICE_SW_TUN_PPPOE:
6819 case ICE_SW_TUN_PPPOE_QINQ:
6820 prof_type = ICE_PROF_TUN_PPPOE;
6822 case ICE_SW_TUN_PPPOE_PAY:
6823 case ICE_SW_TUN_PPPOE_PAY_QINQ:
6824 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6826 case ICE_SW_TUN_PPPOE_IPV4:
6827 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6828 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6829 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6830 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6832 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6833 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6835 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6836 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6838 case ICE_SW_TUN_PPPOE_IPV6:
6839 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6840 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6841 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6842 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6844 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6845 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6847 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6848 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6850 case ICE_SW_TUN_PROFID_IPV6_ESP:
6851 case ICE_SW_TUN_IPV6_ESP:
6852 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6854 case ICE_SW_TUN_PROFID_IPV6_AH:
6855 case ICE_SW_TUN_IPV6_AH:
6856 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6858 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6859 case ICE_SW_TUN_IPV6_L2TPV3:
6860 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6862 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6863 case ICE_SW_TUN_IPV6_NAT_T:
6864 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6866 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6867 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6869 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6870 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6872 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6873 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6875 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6876 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6878 case ICE_SW_TUN_IPV4_NAT_T:
6879 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6881 case ICE_SW_TUN_IPV4_L2TPV3:
6882 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6884 case ICE_SW_TUN_IPV4_ESP:
6885 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6887 case ICE_SW_TUN_IPV4_AH:
6888 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6890 case ICE_SW_IPV4_TCP:
6891 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6893 case ICE_SW_IPV4_UDP:
6894 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6896 case ICE_SW_IPV6_TCP:
6897 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6899 case ICE_SW_IPV6_UDP:
6900 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6902 case ICE_SW_TUN_IPV4_GTPU_IPV4:
6903 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6904 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6905 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6906 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6907 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6908 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6910 case ICE_SW_TUN_IPV6_GTPU_IPV4:
6911 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6912 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6913 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6914 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6915 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6916 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6918 case ICE_SW_TUN_IPV4_GTPU_IPV6:
6919 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6920 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6921 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6922 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6923 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6924 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6926 case ICE_SW_TUN_IPV6_GTPU_IPV6:
6927 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6928 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6929 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6930 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6931 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6932 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6934 case ICE_SW_TUN_AND_NON_TUN:
6935 case ICE_SW_TUN_AND_NON_TUN_QINQ:
6937 prof_type = ICE_PROF_ALL;
6941 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6945 * ice_is_prof_rule - determine if rule type is a profile rule
6946 * @type: the rule type
6948 * if the rule type is a profile rule, that means that there no field value
6949 * match required, in this case just a profile hit is required.
6951 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6954 case ICE_SW_TUN_PROFID_IPV6_ESP:
6955 case ICE_SW_TUN_PROFID_IPV6_AH:
6956 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6957 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6958 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6959 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6960 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6961 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6971 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6972 * @hw: pointer to hardware structure
6973 * @lkups: lookup elements or match criteria for the advanced recipe, one
6974 * structure per protocol header
6975 * @lkups_cnt: number of protocols
6976 * @rinfo: other information regarding the rule e.g. priority and action info
6977 * @rid: return the recipe ID of the recipe created
6979 static enum ice_status
6980 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6981 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6983 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6984 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6985 struct ice_prot_lkup_ext *lkup_exts;
6986 struct ice_recp_grp_entry *r_entry;
6987 struct ice_sw_fv_list_entry *fvit;
6988 struct ice_recp_grp_entry *r_tmp;
6989 struct ice_sw_fv_list_entry *tmp;
6990 enum ice_status status = ICE_SUCCESS;
6991 struct ice_sw_recipe *rm;
6994 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6995 return ICE_ERR_PARAM;
6997 lkup_exts = (struct ice_prot_lkup_ext *)
6998 ice_malloc(hw, sizeof(*lkup_exts));
7000 return ICE_ERR_NO_MEMORY;
7002 /* Determine the number of words to be matched and if it exceeds a
7003 * recipe's restrictions
7005 for (i = 0; i < lkups_cnt; i++) {
7008 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7009 status = ICE_ERR_CFG;
7010 goto err_free_lkup_exts;
7013 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7015 status = ICE_ERR_CFG;
7016 goto err_free_lkup_exts;
7020 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7022 status = ICE_ERR_NO_MEMORY;
7023 goto err_free_lkup_exts;
7026 /* Get field vectors that contain fields extracted from all the protocol
7027 * headers being programmed.
7029 INIT_LIST_HEAD(&rm->fv_list);
7030 INIT_LIST_HEAD(&rm->rg_list);
7032 /* Get bitmap of field vectors (profiles) that are compatible with the
7033 * rule request; only these will be searched in the subsequent call to
7036 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7038 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7042 /* Create any special protocol/offset pairs, such as looking at tunnel
7043 * bits by extracting metadata
7045 status = ice_add_special_words(rinfo, lkup_exts);
7047 goto err_free_lkup_exts;
7049 /* Group match words into recipes using preferred recipe grouping
7052 status = ice_create_recipe_group(hw, rm, lkup_exts);
7056 /* set the recipe priority if specified */
7057 rm->priority = (u8)rinfo->priority;
7059 /* Find offsets from the field vector. Pick the first one for all the
7062 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7066 /* An empty FV list means to use all the profiles returned in the
7069 if (LIST_EMPTY(&rm->fv_list)) {
7072 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7073 struct ice_sw_fv_list_entry *fvl;
7075 fvl = (struct ice_sw_fv_list_entry *)
7076 ice_malloc(hw, sizeof(*fvl));
7080 fvl->profile_id = j;
7081 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7085 /* get bitmap of all profiles the recipe will be associated with */
7086 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7087 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7089 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7090 ice_set_bit((u16)fvit->profile_id, profiles);
7093 /* Look for a recipe which matches our requested fv / mask list */
7094 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7095 if (*rid < ICE_MAX_NUM_RECIPES)
7096 /* Success if found a recipe that match the existing criteria */
7099 rm->tun_type = rinfo->tun_type;
7100 /* Recipe we need does not exist, add a recipe */
7101 status = ice_add_sw_recipe(hw, rm, profiles);
7105 /* Associate all the recipes created with all the profiles in the
7106 * common field vector.
7108 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7110 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7113 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7114 (u8 *)r_bitmap, NULL);
7118 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7119 ICE_MAX_NUM_RECIPES);
7120 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7124 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7127 ice_release_change_lock(hw);
7132 /* Update profile to recipe bitmap array */
7133 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7134 ICE_MAX_NUM_RECIPES);
7136 /* Update recipe to profile bitmap array */
7137 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7138 ice_set_bit((u16)fvit->profile_id,
7139 recipe_to_profile[j]);
7142 *rid = rm->root_rid;
7143 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7144 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7146 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7147 ice_recp_grp_entry, l_entry) {
7148 LIST_DEL(&r_entry->l_entry);
7149 ice_free(hw, r_entry);
7152 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7154 LIST_DEL(&fvit->list_entry);
7159 ice_free(hw, rm->root_buf);
7164 ice_free(hw, lkup_exts);
7170 * ice_find_dummy_packet - find dummy packet by tunnel type
7172 * @lkups: lookup elements or match criteria for the advanced recipe, one
7173 * structure per protocol header
7174 * @lkups_cnt: number of protocols
7175 * @tun_type: tunnel type from the match criteria
7176 * @pkt: dummy packet to fill according to filter match criteria
7177 * @pkt_len: packet length of dummy packet
7178 * @offsets: pointer to receive the pointer to the offsets for the packet
7181 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7182 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7184 const struct ice_dummy_pkt_offsets **offsets)
7186 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7190 for (i = 0; i < lkups_cnt; i++) {
7191 if (lkups[i].type == ICE_UDP_ILOS)
7193 else if (lkups[i].type == ICE_TCP_IL)
7195 else if (lkups[i].type == ICE_IPV6_OFOS)
7197 else if (lkups[i].type == ICE_VLAN_OFOS)
7199 else if (lkups[i].type == ICE_IPV4_OFOS &&
7200 lkups[i].h_u.ipv4_hdr.protocol ==
7201 ICE_IPV4_NVGRE_PROTO_ID &&
7202 lkups[i].m_u.ipv4_hdr.protocol ==
7205 else if (lkups[i].type == ICE_PPPOE &&
7206 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7207 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7208 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7211 else if (lkups[i].type == ICE_ETYPE_OL &&
7212 lkups[i].h_u.ethertype.ethtype_id ==
7213 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7214 lkups[i].m_u.ethertype.ethtype_id ==
7217 else if (lkups[i].type == ICE_IPV4_IL &&
7218 lkups[i].h_u.ipv4_hdr.protocol ==
7220 lkups[i].m_u.ipv4_hdr.protocol ==
7225 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7226 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7227 *pkt = dummy_qinq_ipv6_pkt;
7228 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7229 *offsets = dummy_qinq_ipv6_packet_offsets;
7231 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7232 tun_type == ICE_NON_TUN_QINQ) {
7233 *pkt = dummy_qinq_ipv4_pkt;
7234 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7235 *offsets = dummy_qinq_ipv4_packet_offsets;
7239 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7240 *pkt = dummy_qinq_pppoe_ipv6_packet;
7241 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7242 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7244 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7245 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7246 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7247 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7249 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7250 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7251 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7252 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7253 *offsets = dummy_qinq_pppoe_packet_offsets;
7257 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7258 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7259 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7260 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7262 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7263 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7264 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7265 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7267 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7268 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7269 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7270 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7272 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7273 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7274 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7275 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7277 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7278 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7279 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7280 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7282 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7283 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7284 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7285 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7289 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7290 *pkt = dummy_ipv4_esp_pkt;
7291 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7292 *offsets = dummy_ipv4_esp_packet_offsets;
7296 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7297 *pkt = dummy_ipv6_esp_pkt;
7298 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7299 *offsets = dummy_ipv6_esp_packet_offsets;
7303 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7304 *pkt = dummy_ipv4_ah_pkt;
7305 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7306 *offsets = dummy_ipv4_ah_packet_offsets;
7310 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7311 *pkt = dummy_ipv6_ah_pkt;
7312 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7313 *offsets = dummy_ipv6_ah_packet_offsets;
7317 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7318 *pkt = dummy_ipv4_nat_pkt;
7319 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7320 *offsets = dummy_ipv4_nat_packet_offsets;
7324 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7325 *pkt = dummy_ipv6_nat_pkt;
7326 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7327 *offsets = dummy_ipv6_nat_packet_offsets;
7331 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7332 *pkt = dummy_ipv4_l2tpv3_pkt;
7333 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7334 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7338 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7339 *pkt = dummy_ipv6_l2tpv3_pkt;
7340 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7341 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7345 if (tun_type == ICE_SW_TUN_GTP) {
7346 *pkt = dummy_udp_gtp_packet;
7347 *pkt_len = sizeof(dummy_udp_gtp_packet);
7348 *offsets = dummy_udp_gtp_packet_offsets;
7352 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7353 *pkt = dummy_pppoe_ipv6_packet;
7354 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7355 *offsets = dummy_pppoe_packet_offsets;
7357 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7358 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7359 *pkt = dummy_pppoe_ipv4_packet;
7360 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7361 *offsets = dummy_pppoe_packet_offsets;
7365 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7366 *pkt = dummy_pppoe_ipv4_packet;
7367 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7368 *offsets = dummy_pppoe_packet_ipv4_offsets;
7372 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7373 *pkt = dummy_pppoe_ipv4_tcp_packet;
7374 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7375 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7379 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7380 *pkt = dummy_pppoe_ipv4_udp_packet;
7381 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7382 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7386 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7387 *pkt = dummy_pppoe_ipv6_packet;
7388 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7389 *offsets = dummy_pppoe_packet_ipv6_offsets;
7393 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7394 *pkt = dummy_pppoe_ipv6_tcp_packet;
7395 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7396 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7400 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7401 *pkt = dummy_pppoe_ipv6_udp_packet;
7402 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7403 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7407 if (tun_type == ICE_SW_IPV4_TCP) {
7408 *pkt = dummy_tcp_packet;
7409 *pkt_len = sizeof(dummy_tcp_packet);
7410 *offsets = dummy_tcp_packet_offsets;
7414 if (tun_type == ICE_SW_IPV4_UDP) {
7415 *pkt = dummy_udp_packet;
7416 *pkt_len = sizeof(dummy_udp_packet);
7417 *offsets = dummy_udp_packet_offsets;
7421 if (tun_type == ICE_SW_IPV6_TCP) {
7422 *pkt = dummy_tcp_ipv6_packet;
7423 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7424 *offsets = dummy_tcp_ipv6_packet_offsets;
7428 if (tun_type == ICE_SW_IPV6_UDP) {
7429 *pkt = dummy_udp_ipv6_packet;
7430 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7431 *offsets = dummy_udp_ipv6_packet_offsets;
7435 if (tun_type == ICE_ALL_TUNNELS) {
7436 *pkt = dummy_gre_udp_packet;
7437 *pkt_len = sizeof(dummy_gre_udp_packet);
7438 *offsets = dummy_gre_udp_packet_offsets;
7442 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7444 *pkt = dummy_gre_tcp_packet;
7445 *pkt_len = sizeof(dummy_gre_tcp_packet);
7446 *offsets = dummy_gre_tcp_packet_offsets;
7450 *pkt = dummy_gre_udp_packet;
7451 *pkt_len = sizeof(dummy_gre_udp_packet);
7452 *offsets = dummy_gre_udp_packet_offsets;
7456 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7457 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7458 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7459 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7461 *pkt = dummy_udp_tun_tcp_packet;
7462 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7463 *offsets = dummy_udp_tun_tcp_packet_offsets;
7467 *pkt = dummy_udp_tun_udp_packet;
7468 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7469 *offsets = dummy_udp_tun_udp_packet_offsets;
7475 *pkt = dummy_vlan_udp_packet;
7476 *pkt_len = sizeof(dummy_vlan_udp_packet);
7477 *offsets = dummy_vlan_udp_packet_offsets;
7480 *pkt = dummy_udp_packet;
7481 *pkt_len = sizeof(dummy_udp_packet);
7482 *offsets = dummy_udp_packet_offsets;
7484 } else if (udp && ipv6) {
7486 *pkt = dummy_vlan_udp_ipv6_packet;
7487 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7488 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7491 *pkt = dummy_udp_ipv6_packet;
7492 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7493 *offsets = dummy_udp_ipv6_packet_offsets;
7495 } else if ((tcp && ipv6) || ipv6) {
7497 *pkt = dummy_vlan_tcp_ipv6_packet;
7498 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7499 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7502 *pkt = dummy_tcp_ipv6_packet;
7503 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7504 *offsets = dummy_tcp_ipv6_packet_offsets;
7509 *pkt = dummy_vlan_tcp_packet;
7510 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7511 *offsets = dummy_vlan_tcp_packet_offsets;
7513 *pkt = dummy_tcp_packet;
7514 *pkt_len = sizeof(dummy_tcp_packet);
7515 *offsets = dummy_tcp_packet_offsets;
7520 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7522 * @lkups: lookup elements or match criteria for the advanced recipe, one
7523 * structure per protocol header
7524 * @lkups_cnt: number of protocols
7525 * @s_rule: stores rule information from the match criteria
7526 * @dummy_pkt: dummy packet to fill according to filter match criteria
7527 * @pkt_len: packet length of dummy packet
7528 * @offsets: offset info for the dummy packet
7530 static enum ice_status
7531 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7532 struct ice_aqc_sw_rules_elem *s_rule,
7533 const u8 *dummy_pkt, u16 pkt_len,
7534 const struct ice_dummy_pkt_offsets *offsets)
7539 /* Start with a packet with a pre-defined/dummy content. Then, fill
7540 * in the header values to be looked up or matched.
7542 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7544 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7546 for (i = 0; i < lkups_cnt; i++) {
7547 enum ice_protocol_type type;
7548 u16 offset = 0, len = 0, j;
7551 /* find the start of this layer; it should be found since this
7552 * was already checked when search for the dummy packet
7554 type = lkups[i].type;
7555 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7556 if (type == offsets[j].type) {
7557 offset = offsets[j].offset;
7562 /* this should never happen in a correct calling sequence */
7564 return ICE_ERR_PARAM;
7566 switch (lkups[i].type) {
7569 len = sizeof(struct ice_ether_hdr);
7572 len = sizeof(struct ice_ethtype_hdr);
7576 len = sizeof(struct ice_vlan_hdr);
7580 len = sizeof(struct ice_ipv4_hdr);
7584 len = sizeof(struct ice_ipv6_hdr);
7589 len = sizeof(struct ice_l4_hdr);
7592 len = sizeof(struct ice_sctp_hdr);
7595 len = sizeof(struct ice_nvgre);
7600 len = sizeof(struct ice_udp_tnl_hdr);
7604 case ICE_GTP_NO_PAY:
7605 len = sizeof(struct ice_udp_gtp_hdr);
7608 len = sizeof(struct ice_pppoe_hdr);
7611 len = sizeof(struct ice_esp_hdr);
7614 len = sizeof(struct ice_nat_t_hdr);
7617 len = sizeof(struct ice_ah_hdr);
7620 len = sizeof(struct ice_l2tpv3_sess_hdr);
7623 return ICE_ERR_PARAM;
7626 /* the length should be a word multiple */
7627 if (len % ICE_BYTES_PER_WORD)
7630 /* We have the offset to the header start, the length, the
7631 * caller's header values and mask. Use this information to
7632 * copy the data into the dummy packet appropriately based on
7633 * the mask. Note that we need to only write the bits as
7634 * indicated by the mask to make sure we don't improperly write
7635 * over any significant packet data.
7637 for (j = 0; j < len / sizeof(u16); j++)
7638 if (((u16 *)&lkups[i].m_u)[j])
7639 ((u16 *)(pkt + offset))[j] =
7640 (((u16 *)(pkt + offset))[j] &
7641 ~((u16 *)&lkups[i].m_u)[j]) |
7642 (((u16 *)&lkups[i].h_u)[j] &
7643 ((u16 *)&lkups[i].m_u)[j]);
7646 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7652 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7653 * @hw: pointer to the hardware structure
7654 * @tun_type: tunnel type
7655 * @pkt: dummy packet to fill in
7656 * @offsets: offset info for the dummy packet
7658 static enum ice_status
7659 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7660 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7665 case ICE_SW_TUN_AND_NON_TUN:
7666 case ICE_SW_TUN_VXLAN_GPE:
7667 case ICE_SW_TUN_VXLAN:
7668 case ICE_SW_TUN_VXLAN_VLAN:
7669 case ICE_SW_TUN_UDP:
7670 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7674 case ICE_SW_TUN_GENEVE:
7675 case ICE_SW_TUN_GENEVE_VLAN:
7676 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7681 /* Nothing needs to be done for this tunnel type */
7685 /* Find the outer UDP protocol header and insert the port number */
7686 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7687 if (offsets[i].type == ICE_UDP_OF) {
7688 struct ice_l4_hdr *hdr;
7691 offset = offsets[i].offset;
7692 hdr = (struct ice_l4_hdr *)&pkt[offset];
7693 hdr->dst_port = CPU_TO_BE16(open_port);
7703 * ice_find_adv_rule_entry - Search a rule entry
7704 * @hw: pointer to the hardware structure
7705 * @lkups: lookup elements or match criteria for the advanced recipe, one
7706 * structure per protocol header
7707 * @lkups_cnt: number of protocols
7708 * @recp_id: recipe ID for which we are finding the rule
7709 * @rinfo: other information regarding the rule e.g. priority and action info
7711 * Helper function to search for a given advance rule entry
7712 * Returns pointer to entry storing the rule if found
7714 static struct ice_adv_fltr_mgmt_list_entry *
7715 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7716 u16 lkups_cnt, u16 recp_id,
7717 struct ice_adv_rule_info *rinfo)
7719 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7720 struct ice_switch_info *sw = hw->switch_info;
7723 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7724 ice_adv_fltr_mgmt_list_entry, list_entry) {
7725 bool lkups_matched = true;
7727 if (lkups_cnt != list_itr->lkups_cnt)
7729 for (i = 0; i < list_itr->lkups_cnt; i++)
7730 if (memcmp(&list_itr->lkups[i], &lkups[i],
7732 lkups_matched = false;
7735 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7736 rinfo->tun_type == list_itr->rule_info.tun_type &&
7744 * ice_adv_add_update_vsi_list
7745 * @hw: pointer to the hardware structure
7746 * @m_entry: pointer to current adv filter management list entry
7747 * @cur_fltr: filter information from the book keeping entry
7748 * @new_fltr: filter information with the new VSI to be added
7750 * Call AQ command to add or update previously created VSI list with new VSI.
7752 * Helper function to do book keeping associated with adding filter information
7753 * The algorithm to do the booking keeping is described below :
7754 * When a VSI needs to subscribe to a given advanced filter
7755 * if only one VSI has been added till now
7756 * Allocate a new VSI list and add two VSIs
7757 * to this list using switch rule command
7758 * Update the previously created switch rule with the
7759 * newly created VSI list ID
7760 * if a VSI list was previously created
7761 * Add the new VSI to the previously created VSI list set
7762 * using the update switch rule command
7764 static enum ice_status
7765 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7766 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7767 struct ice_adv_rule_info *cur_fltr,
7768 struct ice_adv_rule_info *new_fltr)
7770 enum ice_status status;
7771 u16 vsi_list_id = 0;
7773 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7774 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7775 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7776 return ICE_ERR_NOT_IMPL;
7778 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7779 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7780 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7781 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7782 return ICE_ERR_NOT_IMPL;
7784 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7785 /* Only one entry existed in the mapping and it was not already
7786 * a part of a VSI list. So, create a VSI list with the old and
7789 struct ice_fltr_info tmp_fltr;
7790 u16 vsi_handle_arr[2];
7792 /* A rule already exists with the new VSI being added */
7793 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7794 new_fltr->sw_act.fwd_id.hw_vsi_id)
7795 return ICE_ERR_ALREADY_EXISTS;
7797 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7798 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7799 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7805 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7806 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7807 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7808 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7809 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7810 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7812 /* Update the previous switch rule of "forward to VSI" to
7815 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7819 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7820 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7821 m_entry->vsi_list_info =
7822 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7825 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7827 if (!m_entry->vsi_list_info)
7830 /* A rule already exists with the new VSI being added */
7831 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7834 /* Update the previously created VSI list set with
7835 * the new VSI ID passed in
7837 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7839 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7841 ice_aqc_opc_update_sw_rules,
7843 /* update VSI list mapping info with new VSI ID */
7845 ice_set_bit(vsi_handle,
7846 m_entry->vsi_list_info->vsi_map);
7849 m_entry->vsi_count++;
7854 * ice_add_adv_rule - helper function to create an advanced switch rule
7855 * @hw: pointer to the hardware structure
7856 * @lkups: information on the words that needs to be looked up. All words
7857 * together makes one recipe
7858 * @lkups_cnt: num of entries in the lkups array
7859 * @rinfo: other information related to the rule that needs to be programmed
7860 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7861 * ignored is case of error.
7863 * This function can program only 1 rule at a time. The lkups is used to
7864 * describe the all the words that forms the "lookup" portion of the recipe.
7865 * These words can span multiple protocols. Callers to this function need to
7866 * pass in a list of protocol headers with lookup information along and mask
7867 * that determines which words are valid from the given protocol header.
7868 * rinfo describes other information related to this rule such as forwarding
7869 * IDs, priority of this rule, etc.
7872 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7873 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7874 struct ice_rule_query_data *added_entry)
7876 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7877 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7878 const struct ice_dummy_pkt_offsets *pkt_offsets;
7879 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7880 struct LIST_HEAD_TYPE *rule_head;
7881 struct ice_switch_info *sw;
7882 enum ice_status status;
7883 const u8 *pkt = NULL;
7889 /* Initialize profile to result index bitmap */
7890 if (!hw->switch_info->prof_res_bm_init) {
7891 hw->switch_info->prof_res_bm_init = 1;
7892 ice_init_prof_result_bm(hw);
7895 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7896 if (!prof_rule && !lkups_cnt)
7897 return ICE_ERR_PARAM;
7899 /* get # of words we need to match */
7901 for (i = 0; i < lkups_cnt; i++) {
7904 ptr = (u16 *)&lkups[i].m_u;
7905 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7911 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7912 return ICE_ERR_PARAM;
7914 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7915 return ICE_ERR_PARAM;
7918 /* make sure that we can locate a dummy packet */
7919 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7922 status = ICE_ERR_PARAM;
7923 goto err_ice_add_adv_rule;
7926 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7927 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7928 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7929 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7932 vsi_handle = rinfo->sw_act.vsi_handle;
7933 if (!ice_is_vsi_valid(hw, vsi_handle))
7934 return ICE_ERR_PARAM;
7936 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7937 rinfo->sw_act.fwd_id.hw_vsi_id =
7938 ice_get_hw_vsi_num(hw, vsi_handle);
7939 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7940 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7942 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7945 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7947 /* we have to add VSI to VSI_LIST and increment vsi_count.
7948 * Also Update VSI list so that we can change forwarding rule
7949 * if the rule already exists, we will check if it exists with
7950 * same vsi_id, if not then add it to the VSI list if it already
7951 * exists if not then create a VSI list and add the existing VSI
7952 * ID and the new VSI ID to the list
7953 * We will add that VSI to the list
7955 status = ice_adv_add_update_vsi_list(hw, m_entry,
7956 &m_entry->rule_info,
7959 added_entry->rid = rid;
7960 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7961 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7965 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7966 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7968 return ICE_ERR_NO_MEMORY;
7969 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7970 switch (rinfo->sw_act.fltr_act) {
7971 case ICE_FWD_TO_VSI:
7972 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7973 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7974 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7977 act |= ICE_SINGLE_ACT_TO_Q;
7978 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7979 ICE_SINGLE_ACT_Q_INDEX_M;
7981 case ICE_FWD_TO_QGRP:
7982 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7983 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7984 act |= ICE_SINGLE_ACT_TO_Q;
7985 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7986 ICE_SINGLE_ACT_Q_INDEX_M;
7987 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7988 ICE_SINGLE_ACT_Q_REGION_M;
7990 case ICE_DROP_PACKET:
7991 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7992 ICE_SINGLE_ACT_VALID_BIT;
7995 status = ICE_ERR_CFG;
7996 goto err_ice_add_adv_rule;
7999 /* set the rule LOOKUP type based on caller specified 'RX'
8000 * instead of hardcoding it to be either LOOKUP_TX/RX
8002 * for 'RX' set the source to be the port number
8003 * for 'TX' set the source to be the source HW VSI number (determined
8007 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8008 s_rule->pdata.lkup_tx_rx.src =
8009 CPU_TO_LE16(hw->port_info->lport);
8011 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8012 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8015 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8016 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8018 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8019 pkt_len, pkt_offsets);
8021 goto err_ice_add_adv_rule;
8023 if (rinfo->tun_type != ICE_NON_TUN &&
8024 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8025 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8026 s_rule->pdata.lkup_tx_rx.hdr,
8029 goto err_ice_add_adv_rule;
8032 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8033 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8036 goto err_ice_add_adv_rule;
8037 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8038 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8040 status = ICE_ERR_NO_MEMORY;
8041 goto err_ice_add_adv_rule;
8044 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8045 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8046 ICE_NONDMA_TO_NONDMA);
8047 if (!adv_fltr->lkups && !prof_rule) {
8048 status = ICE_ERR_NO_MEMORY;
8049 goto err_ice_add_adv_rule;
8052 adv_fltr->lkups_cnt = lkups_cnt;
8053 adv_fltr->rule_info = *rinfo;
8054 adv_fltr->rule_info.fltr_rule_id =
8055 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8056 sw = hw->switch_info;
8057 sw->recp_list[rid].adv_rule = true;
8058 rule_head = &sw->recp_list[rid].filt_rules;
8060 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8061 adv_fltr->vsi_count = 1;
8063 /* Add rule entry to book keeping list */
8064 LIST_ADD(&adv_fltr->list_entry, rule_head);
8066 added_entry->rid = rid;
8067 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8068 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8070 err_ice_add_adv_rule:
8071 if (status && adv_fltr) {
8072 ice_free(hw, adv_fltr->lkups);
8073 ice_free(hw, adv_fltr);
8076 ice_free(hw, s_rule);
8082 * ice_adv_rem_update_vsi_list
8083 * @hw: pointer to the hardware structure
8084 * @vsi_handle: VSI handle of the VSI to remove
8085 * @fm_list: filter management entry for which the VSI list management needs to
8088 static enum ice_status
8089 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8090 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8092 struct ice_vsi_list_map_info *vsi_list_info;
8093 enum ice_sw_lkup_type lkup_type;
8094 enum ice_status status;
8097 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8098 fm_list->vsi_count == 0)
8099 return ICE_ERR_PARAM;
8101 /* A rule with the VSI being removed does not exist */
8102 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8103 return ICE_ERR_DOES_NOT_EXIST;
8105 lkup_type = ICE_SW_LKUP_LAST;
8106 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8107 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8108 ice_aqc_opc_update_sw_rules,
8113 fm_list->vsi_count--;
8114 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8115 vsi_list_info = fm_list->vsi_list_info;
8116 if (fm_list->vsi_count == 1) {
8117 struct ice_fltr_info tmp_fltr;
8120 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8122 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8123 return ICE_ERR_OUT_OF_RANGE;
8125 /* Make sure VSI list is empty before removing it below */
8126 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8128 ice_aqc_opc_update_sw_rules,
8133 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8134 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8135 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8136 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8137 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8138 tmp_fltr.fwd_id.hw_vsi_id =
8139 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8140 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8141 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8142 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8144 /* Update the previous switch rule of "MAC forward to VSI" to
8145 * "MAC fwd to VSI list"
8147 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8149 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8150 tmp_fltr.fwd_id.hw_vsi_id, status);
8153 fm_list->vsi_list_info->ref_cnt--;
8155 /* Remove the VSI list since it is no longer used */
8156 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8158 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8159 vsi_list_id, status);
8163 LIST_DEL(&vsi_list_info->list_entry);
8164 ice_free(hw, vsi_list_info);
8165 fm_list->vsi_list_info = NULL;
8172 * ice_rem_adv_rule - removes existing advanced switch rule
8173 * @hw: pointer to the hardware structure
8174 * @lkups: information on the words that needs to be looked up. All words
8175 * together makes one recipe
8176 * @lkups_cnt: num of entries in the lkups array
8177 * @rinfo: Its the pointer to the rule information for the rule
8179 * This function can be used to remove 1 rule at a time. The lkups is
8180 * used to describe all the words that forms the "lookup" portion of the
8181 * rule. These words can span multiple protocols. Callers to this function
8182 * need to pass in a list of protocol headers with lookup information along
8183 * and mask that determines which words are valid from the given protocol
8184 * header. rinfo describes other information related to this rule such as
8185 * forwarding IDs, priority of this rule, etc.
8188 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8189 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8191 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8192 struct ice_prot_lkup_ext lkup_exts;
8193 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8194 enum ice_status status = ICE_SUCCESS;
8195 bool remove_rule = false;
8196 u16 i, rid, vsi_handle;
8198 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8199 for (i = 0; i < lkups_cnt; i++) {
8202 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8205 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8210 /* Create any special protocol/offset pairs, such as looking at tunnel
8211 * bits by extracting metadata
8213 status = ice_add_special_words(rinfo, &lkup_exts);
8217 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8218 /* If did not find a recipe that match the existing criteria */
8219 if (rid == ICE_MAX_NUM_RECIPES)
8220 return ICE_ERR_PARAM;
8222 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8223 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8224 /* the rule is already removed */
8227 ice_acquire_lock(rule_lock);
8228 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8230 } else if (list_elem->vsi_count > 1) {
8231 remove_rule = false;
8232 vsi_handle = rinfo->sw_act.vsi_handle;
8233 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8235 vsi_handle = rinfo->sw_act.vsi_handle;
8236 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8238 ice_release_lock(rule_lock);
8241 if (list_elem->vsi_count == 0)
8244 ice_release_lock(rule_lock);
8246 struct ice_aqc_sw_rules_elem *s_rule;
8249 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8250 s_rule = (struct ice_aqc_sw_rules_elem *)
8251 ice_malloc(hw, rule_buf_sz);
8253 return ICE_ERR_NO_MEMORY;
8254 s_rule->pdata.lkup_tx_rx.act = 0;
8255 s_rule->pdata.lkup_tx_rx.index =
8256 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8257 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8258 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8260 ice_aqc_opc_remove_sw_rules, NULL);
8261 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8262 struct ice_switch_info *sw = hw->switch_info;
8264 ice_acquire_lock(rule_lock);
8265 LIST_DEL(&list_elem->list_entry);
8266 ice_free(hw, list_elem->lkups);
8267 ice_free(hw, list_elem);
8268 ice_release_lock(rule_lock);
8269 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8270 sw->recp_list[rid].adv_rule = false;
8272 ice_free(hw, s_rule);
8278 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8279 * @hw: pointer to the hardware structure
8280 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8282 * This function is used to remove 1 rule at a time. The removal is based on
8283 * the remove_entry parameter. This function will remove rule for a given
8284 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8287 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8288 struct ice_rule_query_data *remove_entry)
8290 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8291 struct LIST_HEAD_TYPE *list_head;
8292 struct ice_adv_rule_info rinfo;
8293 struct ice_switch_info *sw;
8295 sw = hw->switch_info;
8296 if (!sw->recp_list[remove_entry->rid].recp_created)
8297 return ICE_ERR_PARAM;
8298 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8299 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8301 if (list_itr->rule_info.fltr_rule_id ==
8302 remove_entry->rule_id) {
8303 rinfo = list_itr->rule_info;
8304 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8305 return ice_rem_adv_rule(hw, list_itr->lkups,
8306 list_itr->lkups_cnt, &rinfo);
8309 /* either list is empty or unable to find rule */
8310 return ICE_ERR_DOES_NOT_EXIST;
8314 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8316 * @hw: pointer to the hardware structure
8317 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8319 * This function is used to remove all the rules for a given VSI and as soon
8320 * as removing a rule fails, it will return immediately with the error code,
8321 * else it will return ICE_SUCCESS
8323 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8325 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8326 struct ice_vsi_list_map_info *map_info;
8327 struct LIST_HEAD_TYPE *list_head;
8328 struct ice_adv_rule_info rinfo;
8329 struct ice_switch_info *sw;
8330 enum ice_status status;
8333 sw = hw->switch_info;
8334 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8335 if (!sw->recp_list[rid].recp_created)
8337 if (!sw->recp_list[rid].adv_rule)
8340 list_head = &sw->recp_list[rid].filt_rules;
8341 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8342 ice_adv_fltr_mgmt_list_entry,
8344 rinfo = list_itr->rule_info;
8346 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8347 map_info = list_itr->vsi_list_info;
8351 if (!ice_is_bit_set(map_info->vsi_map,
8354 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8358 rinfo.sw_act.vsi_handle = vsi_handle;
8359 status = ice_rem_adv_rule(hw, list_itr->lkups,
8360 list_itr->lkups_cnt, &rinfo);
8370 * ice_replay_fltr - Replay all the filters stored by a specific list head
8371 * @hw: pointer to the hardware structure
8372 * @list_head: list for which filters needs to be replayed
8373 * @recp_id: Recipe ID for which rules need to be replayed
8375 static enum ice_status
8376 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8378 struct ice_fltr_mgmt_list_entry *itr;
8379 enum ice_status status = ICE_SUCCESS;
8380 struct ice_sw_recipe *recp_list;
8381 u8 lport = hw->port_info->lport;
8382 struct LIST_HEAD_TYPE l_head;
8384 if (LIST_EMPTY(list_head))
8387 recp_list = &hw->switch_info->recp_list[recp_id];
8388 /* Move entries from the given list_head to a temporary l_head so that
8389 * they can be replayed. Otherwise when trying to re-add the same
8390 * filter, the function will return already exists
8392 LIST_REPLACE_INIT(list_head, &l_head);
8394 /* Mark the given list_head empty by reinitializing it so filters
8395 * could be added again by *handler
8397 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8399 struct ice_fltr_list_entry f_entry;
8402 f_entry.fltr_info = itr->fltr_info;
8403 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8404 status = ice_add_rule_internal(hw, recp_list, lport,
8406 if (status != ICE_SUCCESS)
8411 /* Add a filter per VSI separately */
8412 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8414 if (!ice_is_vsi_valid(hw, vsi_handle))
8417 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8418 f_entry.fltr_info.vsi_handle = vsi_handle;
8419 f_entry.fltr_info.fwd_id.hw_vsi_id =
8420 ice_get_hw_vsi_num(hw, vsi_handle);
8421 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8422 if (recp_id == ICE_SW_LKUP_VLAN)
8423 status = ice_add_vlan_internal(hw, recp_list,
8426 status = ice_add_rule_internal(hw, recp_list,
8429 if (status != ICE_SUCCESS)
8434 /* Clear the filter management list */
8435 ice_rem_sw_rule_info(hw, &l_head);
8440 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8441 * @hw: pointer to the hardware structure
8443 * NOTE: This function does not clean up partially added filters on error.
8444 * It is up to caller of the function to issue a reset or fail early.
8446 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8448 struct ice_switch_info *sw = hw->switch_info;
8449 enum ice_status status = ICE_SUCCESS;
8452 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8453 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8455 status = ice_replay_fltr(hw, i, head);
8456 if (status != ICE_SUCCESS)
8463 * ice_replay_vsi_fltr - Replay filters for requested VSI
8464 * @hw: pointer to the hardware structure
8465 * @pi: pointer to port information structure
8466 * @sw: pointer to switch info struct for which function replays filters
8467 * @vsi_handle: driver VSI handle
8468 * @recp_id: Recipe ID for which rules need to be replayed
8469 * @list_head: list for which filters need to be replayed
8471 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8472 * It is required to pass valid VSI handle.
8474 static enum ice_status
8475 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8476 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8477 struct LIST_HEAD_TYPE *list_head)
8479 struct ice_fltr_mgmt_list_entry *itr;
8480 enum ice_status status = ICE_SUCCESS;
8481 struct ice_sw_recipe *recp_list;
8484 if (LIST_EMPTY(list_head))
8486 recp_list = &sw->recp_list[recp_id];
8487 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8489 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8491 struct ice_fltr_list_entry f_entry;
8493 f_entry.fltr_info = itr->fltr_info;
8494 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8495 itr->fltr_info.vsi_handle == vsi_handle) {
8496 /* update the src in case it is VSI num */
8497 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8498 f_entry.fltr_info.src = hw_vsi_id;
8499 status = ice_add_rule_internal(hw, recp_list,
8502 if (status != ICE_SUCCESS)
8506 if (!itr->vsi_list_info ||
8507 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8509 /* Clearing it so that the logic can add it back */
8510 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8511 f_entry.fltr_info.vsi_handle = vsi_handle;
8512 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8513 /* update the src in case it is VSI num */
8514 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8515 f_entry.fltr_info.src = hw_vsi_id;
8516 if (recp_id == ICE_SW_LKUP_VLAN)
8517 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8519 status = ice_add_rule_internal(hw, recp_list,
8522 if (status != ICE_SUCCESS)
8530 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8531 * @hw: pointer to the hardware structure
8532 * @vsi_handle: driver VSI handle
8533 * @list_head: list for which filters need to be replayed
8535 * Replay the advanced rule for the given VSI.
8537 static enum ice_status
8538 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8539 struct LIST_HEAD_TYPE *list_head)
8541 struct ice_rule_query_data added_entry = { 0 };
8542 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8543 enum ice_status status = ICE_SUCCESS;
8545 if (LIST_EMPTY(list_head))
8547 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8549 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8550 u16 lk_cnt = adv_fltr->lkups_cnt;
8552 if (vsi_handle != rinfo->sw_act.vsi_handle)
8554 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8563 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8564 * @hw: pointer to the hardware structure
8565 * @pi: pointer to port information structure
8566 * @vsi_handle: driver VSI handle
8568 * Replays filters for requested VSI via vsi_handle.
8571 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8574 struct ice_switch_info *sw = hw->switch_info;
8575 enum ice_status status;
8578 /* Update the recipes that were created */
8579 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8580 struct LIST_HEAD_TYPE *head;
8582 head = &sw->recp_list[i].filt_replay_rules;
8583 if (!sw->recp_list[i].adv_rule)
8584 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8587 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8588 if (status != ICE_SUCCESS)
8596 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8597 * @hw: pointer to the HW struct
8598 * @sw: pointer to switch info struct for which function removes filters
8600 * Deletes the filter replay rules for given switch
8602 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8609 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8610 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8611 struct LIST_HEAD_TYPE *l_head;
8613 l_head = &sw->recp_list[i].filt_replay_rules;
8614 if (!sw->recp_list[i].adv_rule)
8615 ice_rem_sw_rule_info(hw, l_head);
8617 ice_rem_adv_rule_info(hw, l_head);
8623 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8624 * @hw: pointer to the HW struct
8626 * Deletes the filter replay rules.
8628 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8630 ice_rm_sw_replay_rule_info(hw, hw->switch_info);