1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
509 { ICE_IPV4_OFOS, 14 },
512 { ICE_PROTOCOL_LAST, 0 },
515 static const u8 dummy_udp_gtp_packet[] = {
516 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
521 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x11, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 0x00, 0x1c, 0x00, 0x00,
530 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x00, 0x85,
534 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 0x00, 0x00, 0x00, 0x00,
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
541 { ICE_IPV4_OFOS, 14 },
545 { ICE_PROTOCOL_LAST, 0 },
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550 0x00, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
554 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555 0x00, 0x00, 0x40, 0x00,
556 0x40, 0x11, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561 0x00, 0x00, 0x00, 0x00,
563 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
564 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x85,
567 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568 0x00, 0x00, 0x00, 0x00,
570 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571 0x00, 0x00, 0x40, 0x00,
572 0x40, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, 0x00, 0x00,
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
581 { ICE_IPV4_OFOS, 14 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
594 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595 0x00, 0x00, 0x40, 0x00,
596 0x40, 0x11, 0x00, 0x00,
597 0x00, 0x00, 0x00, 0x00,
598 0x00, 0x00, 0x00, 0x00,
600 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601 0x00, 0x00, 0x00, 0x00,
603 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x85,
607 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608 0x00, 0x00, 0x00, 0x00,
610 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611 0x00, 0x00, 0x3b, 0x00,
612 0x00, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
627 { ICE_IPV6_OFOS, 14 },
631 { ICE_PROTOCOL_LAST, 0 },
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
640 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652 0x00, 0x00, 0x00, 0x00,
654 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x85,
658 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659 0x00, 0x00, 0x00, 0x00,
661 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662 0x00, 0x00, 0x40, 0x00,
663 0x40, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00,
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
673 { ICE_IPV6_OFOS, 14 },
677 { ICE_PROTOCOL_LAST, 0 },
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698 0x00, 0x00, 0x00, 0x00,
700 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x85,
704 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705 0x00, 0x00, 0x00, 0x00,
707 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708 0x00, 0x00, 0x3b, 0x00,
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
713 0x00, 0x00, 0x00, 0x00,
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
724 { ICE_IPV4_OFOS, 14 },
726 { ICE_GTP_NO_PAY, 42 },
727 { ICE_PROTOCOL_LAST, 0 },
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
733 { ICE_IPV6_OFOS, 14 },
735 { ICE_GTP_NO_PAY, 62 },
736 { ICE_PROTOCOL_LAST, 0 },
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
741 { ICE_ETYPE_OL, 12 },
742 { ICE_VLAN_OFOS, 14},
744 { ICE_PROTOCOL_LAST, 0 },
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
749 { ICE_ETYPE_OL, 12 },
750 { ICE_VLAN_OFOS, 14},
752 { ICE_IPV4_OFOS, 26 },
753 { ICE_PROTOCOL_LAST, 0 },
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758 0x00, 0x00, 0x00, 0x00,
759 0x00, 0x00, 0x00, 0x00,
761 0x81, 0x00, /* ICE_ETYPE_OL 12 */
763 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
765 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
768 0x00, 0x21, /* PPP Link Layer 24 */
770 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
782 { ICE_ETYPE_OL, 12 },
783 { ICE_VLAN_OFOS, 14},
785 { ICE_IPV4_OFOS, 26 },
787 { ICE_PROTOCOL_LAST, 0 },
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x00, 0x00, 0x00,
795 0x81, 0x00, /* ICE_ETYPE_OL 12 */
797 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
799 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
802 0x00, 0x21, /* PPP Link Layer 24 */
804 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805 0x00, 0x01, 0x00, 0x00,
806 0x00, 0x06, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
813 0x50, 0x00, 0x00, 0x00,
814 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
822 { ICE_ETYPE_OL, 12 },
823 { ICE_VLAN_OFOS, 14},
825 { ICE_IPV4_OFOS, 26 },
826 { ICE_UDP_ILOS, 46 },
827 { ICE_PROTOCOL_LAST, 0 },
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832 0x00, 0x00, 0x00, 0x00,
833 0x00, 0x00, 0x00, 0x00,
835 0x81, 0x00, /* ICE_ETYPE_OL 12 */
837 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
839 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
842 0x00, 0x21, /* PPP Link Layer 24 */
844 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845 0x00, 0x01, 0x00, 0x00,
846 0x00, 0x11, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851 0x00, 0x08, 0x00, 0x00,
853 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
858 { ICE_ETYPE_OL, 12 },
859 { ICE_VLAN_OFOS, 14},
861 { ICE_IPV6_OFOS, 26 },
862 { ICE_PROTOCOL_LAST, 0 },
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x00, 0x00, 0x00,
870 0x81, 0x00, /* ICE_ETYPE_OL 12 */
872 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
874 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
877 0x00, 0x57, /* PPP Link Layer 24 */
879 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880 0x00, 0x00, 0x3b, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x00, 0x00,
890 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
896 { ICE_ETYPE_OL, 12 },
897 { ICE_VLAN_OFOS, 14},
899 { ICE_IPV6_OFOS, 26 },
901 { ICE_PROTOCOL_LAST, 0 },
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x81, 0x00, /* ICE_ETYPE_OL 12 */
911 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
913 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
916 0x00, 0x57, /* PPP Link Layer 24 */
918 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920 0x00, 0x00, 0x00, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
929 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x50, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
941 { ICE_ETYPE_OL, 12 },
942 { ICE_VLAN_OFOS, 14},
944 { ICE_IPV6_OFOS, 26 },
945 { ICE_UDP_ILOS, 66 },
946 { ICE_PROTOCOL_LAST, 0 },
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
954 0x81, 0x00, /* ICE_ETYPE_OL 12 */
956 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
958 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
961 0x00, 0x57, /* PPP Link Layer 24 */
963 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975 0x00, 0x08, 0x00, 0x00,
977 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
982 { ICE_IPV4_OFOS, 14 },
984 { ICE_PROTOCOL_LAST, 0 },
987 static const u8 dummy_ipv4_esp_pkt[] = {
988 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
993 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994 0x00, 0x00, 0x40, 0x00,
995 0x40, 0x32, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
997 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005 { ICE_MAC_OFOS, 0 },
1006 { ICE_IPV6_OFOS, 14 },
1008 { ICE_PROTOCOL_LAST, 0 },
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x00, 0x00,
1017 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034 { ICE_MAC_OFOS, 0 },
1035 { ICE_IPV4_OFOS, 14 },
1037 { ICE_PROTOCOL_LAST, 0 },
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00,
1046 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047 0x00, 0x00, 0x40, 0x00,
1048 0x40, 0x33, 0x00, 0x00,
1049 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059 { ICE_MAC_OFOS, 0 },
1060 { ICE_IPV6_OFOS, 14 },
1062 { ICE_PROTOCOL_LAST, 0 },
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1071 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073 0x00, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1076 0x00, 0x00, 0x00, 0x00,
1077 0x00, 0x00, 0x00, 0x00,
1078 0x00, 0x00, 0x00, 0x00,
1079 0x00, 0x00, 0x00, 0x00,
1080 0x00, 0x00, 0x00, 0x00,
1082 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083 0x00, 0x00, 0x00, 0x00,
1084 0x00, 0x00, 0x00, 0x00,
1085 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089 { ICE_MAC_OFOS, 0 },
1090 { ICE_IPV4_OFOS, 14 },
1091 { ICE_UDP_ILOS, 34 },
1093 { ICE_PROTOCOL_LAST, 0 },
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1102 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103 0x00, 0x00, 0x40, 0x00,
1104 0x40, 0x11, 0x00, 0x00,
1105 0x00, 0x00, 0x00, 0x00,
1106 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109 0x00, 0x00, 0x00, 0x00,
1111 0x00, 0x00, 0x00, 0x00,
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117 { ICE_MAC_OFOS, 0 },
1118 { ICE_IPV6_OFOS, 14 },
1119 { ICE_UDP_ILOS, 54 },
1121 { ICE_PROTOCOL_LAST, 0 },
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126 0x00, 0x00, 0x00, 0x00,
1127 0x00, 0x00, 0x00, 0x00,
1130 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132 0x00, 0x00, 0x00, 0x00,
1133 0x00, 0x00, 0x00, 0x00,
1134 0x00, 0x00, 0x00, 0x00,
1135 0x00, 0x00, 0x00, 0x00,
1136 0x00, 0x00, 0x00, 0x00,
1137 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1139 0x00, 0x00, 0x00, 0x00,
1141 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142 0x00, 0x00, 0x00, 0x00,
1144 0x00, 0x00, 0x00, 0x00,
1145 0x00, 0x00, 0x00, 0x00,
1146 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_IPV4_OFOS, 14 },
1154 { ICE_PROTOCOL_LAST, 0 },
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159 0x00, 0x00, 0x00, 0x00,
1160 0x00, 0x00, 0x00, 0x00,
1163 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164 0x00, 0x00, 0x40, 0x00,
1165 0x40, 0x73, 0x00, 0x00,
1166 0x00, 0x00, 0x00, 0x00,
1167 0x00, 0x00, 0x00, 0x00,
1169 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170 0x00, 0x00, 0x00, 0x00,
1171 0x00, 0x00, 0x00, 0x00,
1172 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176 { ICE_MAC_OFOS, 0 },
1177 { ICE_IPV6_OFOS, 14 },
1179 { ICE_PROTOCOL_LAST, 0 },
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184 0x00, 0x00, 0x00, 0x00,
1185 0x00, 0x00, 0x00, 0x00,
1188 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189 0x00, 0x0c, 0x73, 0x40,
1190 0x00, 0x00, 0x00, 0x00,
1191 0x00, 0x00, 0x00, 0x00,
1192 0x00, 0x00, 0x00, 0x00,
1193 0x00, 0x00, 0x00, 0x00,
1194 0x00, 0x00, 0x00, 0x00,
1195 0x00, 0x00, 0x00, 0x00,
1196 0x00, 0x00, 0x00, 0x00,
1197 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200 0x00, 0x00, 0x00, 0x00,
1201 0x00, 0x00, 0x00, 0x00,
1202 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1205 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1206 { ICE_MAC_OFOS, 0 },
1207 { ICE_VLAN_EX, 14 },
1208 { ICE_VLAN_OFOS, 18 },
1209 { ICE_IPV4_OFOS, 22 },
1210 { ICE_PROTOCOL_LAST, 0 },
1213 static const u8 dummy_qinq_ipv4_pkt[] = {
1214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1215 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00,
1219 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1220 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1222 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1223 0x00, 0x01, 0x00, 0x00,
1224 0x00, 0x11, 0x00, 0x00,
1225 0x00, 0x00, 0x00, 0x00,
1226 0x00, 0x00, 0x00, 0x00,
1228 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1229 0x00, 0x08, 0x00, 0x00,
1231 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1234 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1235 { ICE_MAC_OFOS, 0 },
1236 { ICE_VLAN_EX, 14 },
1237 { ICE_VLAN_OFOS, 18 },
1238 { ICE_IPV6_OFOS, 22 },
1239 { ICE_PROTOCOL_LAST, 0 },
1242 static const u8 dummy_qinq_ipv6_pkt[] = {
1243 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1244 0x00, 0x00, 0x00, 0x00,
1245 0x00, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1249 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1251 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1252 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1253 0x00, 0x00, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1255 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00,
1257 0x00, 0x00, 0x00, 0x00,
1258 0x00, 0x00, 0x00, 0x00,
1259 0x00, 0x00, 0x00, 0x00,
1260 0x00, 0x00, 0x00, 0x00,
1262 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1263 0x00, 0x10, 0x00, 0x00,
1265 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1266 0x00, 0x00, 0x00, 0x00,
1268 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1271 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1272 { ICE_MAC_OFOS, 0 },
1273 { ICE_VLAN_EX, 14 },
1274 { ICE_VLAN_OFOS, 18 },
1276 { ICE_PROTOCOL_LAST, 0 },
1280 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1281 { ICE_MAC_OFOS, 0 },
1282 { ICE_VLAN_EX, 14 },
1283 { ICE_VLAN_OFOS, 18 },
1285 { ICE_IPV4_OFOS, 30 },
1286 { ICE_PROTOCOL_LAST, 0 },
1289 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1290 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1295 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1296 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1298 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1301 0x00, 0x21, /* PPP Link Layer 28 */
1303 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1304 0x00, 0x00, 0x00, 0x00,
1305 0x00, 0x00, 0x00, 0x00,
1306 0x00, 0x00, 0x00, 0x00,
1307 0x00, 0x00, 0x00, 0x00,
1309 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1313 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1314 { ICE_MAC_OFOS, 0 },
1315 { ICE_ETYPE_OL, 12 },
1317 { ICE_VLAN_OFOS, 18 },
1319 { ICE_IPV6_OFOS, 30 },
1320 { ICE_PROTOCOL_LAST, 0 },
1323 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1324 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1325 0x00, 0x00, 0x00, 0x00,
1326 0x00, 0x00, 0x00, 0x00,
1328 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1330 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1331 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1333 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1336 0x00, 0x57, /* PPP Link Layer 28*/
1338 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1339 0x00, 0x00, 0x3b, 0x00,
1340 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, 0x00, 0x00,
1343 0x00, 0x00, 0x00, 0x00,
1344 0x00, 0x00, 0x00, 0x00,
1345 0x00, 0x00, 0x00, 0x00,
1346 0x00, 0x00, 0x00, 0x00,
1347 0x00, 0x00, 0x00, 0x00,
1349 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1352 /* this is a recipe to profile association bitmap */
1353 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1354 ICE_MAX_NUM_PROFILES);
1356 /* this is a profile to recipe association bitmap */
1357 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1358 ICE_MAX_NUM_RECIPES);
1360 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1363 * ice_collect_result_idx - copy result index values
1364 * @buf: buffer that contains the result index
1365 * @recp: the recipe struct to copy data into
1367 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1368 struct ice_sw_recipe *recp)
1370 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1371 ice_set_bit(buf->content.result_indx &
1372 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1376 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1377 * @rid: recipe ID that we are populating
1379 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1381 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1382 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1383 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1384 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1385 enum ice_sw_tunnel_type tun_type;
1386 u16 i, j, profile_num = 0;
1387 bool non_tun_valid = false;
1388 bool pppoe_valid = false;
1389 bool vxlan_valid = false;
1390 bool gre_valid = false;
1391 bool gtp_valid = false;
1392 bool flag_valid = false;
1394 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1395 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1400 for (i = 0; i < 12; i++) {
1401 if (gre_profile[i] == j)
1405 for (i = 0; i < 12; i++) {
1406 if (vxlan_profile[i] == j)
1410 for (i = 0; i < 7; i++) {
1411 if (pppoe_profile[i] == j)
1415 for (i = 0; i < 6; i++) {
1416 if (non_tun_profile[i] == j)
1417 non_tun_valid = true;
1420 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1421 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1424 if ((j >= ICE_PROFID_IPV4_ESP &&
1425 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1426 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1427 j <= ICE_PROFID_IPV6_GTPU_TEID))
1431 if (!non_tun_valid && vxlan_valid)
1432 tun_type = ICE_SW_TUN_VXLAN;
1433 else if (!non_tun_valid && gre_valid)
1434 tun_type = ICE_SW_TUN_NVGRE;
1435 else if (!non_tun_valid && pppoe_valid)
1436 tun_type = ICE_SW_TUN_PPPOE;
1437 else if (!non_tun_valid && gtp_valid)
1438 tun_type = ICE_SW_TUN_GTP;
1439 else if (non_tun_valid &&
1440 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1441 tun_type = ICE_SW_TUN_AND_NON_TUN;
1442 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1444 tun_type = ICE_NON_TUN;
1446 tun_type = ICE_NON_TUN;
1448 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1449 i = ice_is_bit_set(recipe_to_profile[rid],
1450 ICE_PROFID_PPPOE_IPV4_OTHER);
1451 j = ice_is_bit_set(recipe_to_profile[rid],
1452 ICE_PROFID_PPPOE_IPV6_OTHER);
1454 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1456 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1459 if (tun_type == ICE_SW_TUN_GTP) {
1460 if (ice_is_bit_set(recipe_to_profile[rid],
1461 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1462 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1463 else if (ice_is_bit_set(recipe_to_profile[rid],
1464 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1465 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1466 else if (ice_is_bit_set(recipe_to_profile[rid],
1467 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1468 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1469 else if (ice_is_bit_set(recipe_to_profile[rid],
1470 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1471 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1474 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1475 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1476 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1478 case ICE_PROFID_IPV4_TCP:
1479 tun_type = ICE_SW_IPV4_TCP;
1481 case ICE_PROFID_IPV4_UDP:
1482 tun_type = ICE_SW_IPV4_UDP;
1484 case ICE_PROFID_IPV6_TCP:
1485 tun_type = ICE_SW_IPV6_TCP;
1487 case ICE_PROFID_IPV6_UDP:
1488 tun_type = ICE_SW_IPV6_UDP;
1490 case ICE_PROFID_PPPOE_PAY:
1491 tun_type = ICE_SW_TUN_PPPOE_PAY;
1493 case ICE_PROFID_PPPOE_IPV4_TCP:
1494 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1496 case ICE_PROFID_PPPOE_IPV4_UDP:
1497 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1499 case ICE_PROFID_PPPOE_IPV4_OTHER:
1500 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1502 case ICE_PROFID_PPPOE_IPV6_TCP:
1503 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1505 case ICE_PROFID_PPPOE_IPV6_UDP:
1506 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1508 case ICE_PROFID_PPPOE_IPV6_OTHER:
1509 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1511 case ICE_PROFID_IPV4_ESP:
1512 tun_type = ICE_SW_TUN_IPV4_ESP;
1514 case ICE_PROFID_IPV6_ESP:
1515 tun_type = ICE_SW_TUN_IPV6_ESP;
1517 case ICE_PROFID_IPV4_AH:
1518 tun_type = ICE_SW_TUN_IPV4_AH;
1520 case ICE_PROFID_IPV6_AH:
1521 tun_type = ICE_SW_TUN_IPV6_AH;
1523 case ICE_PROFID_IPV4_NAT_T:
1524 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1526 case ICE_PROFID_IPV6_NAT_T:
1527 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1529 case ICE_PROFID_IPV4_PFCP_NODE:
1531 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1533 case ICE_PROFID_IPV6_PFCP_NODE:
1535 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1537 case ICE_PROFID_IPV4_PFCP_SESSION:
1539 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1541 case ICE_PROFID_IPV6_PFCP_SESSION:
1543 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1545 case ICE_PROFID_MAC_IPV4_L2TPV3:
1546 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1548 case ICE_PROFID_MAC_IPV6_L2TPV3:
1549 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1551 case ICE_PROFID_IPV4_GTPU_TEID:
1552 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1554 case ICE_PROFID_IPV6_GTPU_TEID:
1555 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1566 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1567 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1568 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1569 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1570 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1571 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1572 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1573 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1574 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1575 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1576 else if (vlan && tun_type == ICE_NON_TUN)
1577 tun_type = ICE_NON_TUN_QINQ;
1583 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1584 * @hw: pointer to hardware structure
1585 * @recps: struct that we need to populate
1586 * @rid: recipe ID that we are populating
1587 * @refresh_required: true if we should get recipe to profile mapping from FW
1589 * This function is used to populate all the necessary entries into our
1590 * bookkeeping so that we have a current list of all the recipes that are
1591 * programmed in the firmware.
1593 static enum ice_status
1594 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1595 bool *refresh_required)
1597 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1598 struct ice_aqc_recipe_data_elem *tmp;
1599 u16 num_recps = ICE_MAX_NUM_RECIPES;
1600 struct ice_prot_lkup_ext *lkup_exts;
1601 enum ice_status status;
1606 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1608 /* we need a buffer big enough to accommodate all the recipes */
1609 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1610 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1612 return ICE_ERR_NO_MEMORY;
1614 tmp[0].recipe_indx = rid;
1615 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1616 /* non-zero status meaning recipe doesn't exist */
1620 /* Get recipe to profile map so that we can get the fv from lkups that
1621 * we read for a recipe from FW. Since we want to minimize the number of
1622 * times we make this FW call, just make one call and cache the copy
1623 * until a new recipe is added. This operation is only required the
1624 * first time to get the changes from FW. Then to search existing
1625 * entries we don't need to update the cache again until another recipe
1628 if (*refresh_required) {
1629 ice_get_recp_to_prof_map(hw);
1630 *refresh_required = false;
1633 /* Start populating all the entries for recps[rid] based on lkups from
1634 * firmware. Note that we are only creating the root recipe in our
1637 lkup_exts = &recps[rid].lkup_exts;
1639 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1640 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1641 struct ice_recp_grp_entry *rg_entry;
1642 u8 i, prof, idx, prot = 0;
1646 rg_entry = (struct ice_recp_grp_entry *)
1647 ice_malloc(hw, sizeof(*rg_entry));
1649 status = ICE_ERR_NO_MEMORY;
1653 idx = root_bufs.recipe_indx;
1654 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1656 /* Mark all result indices in this chain */
1657 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1658 ice_set_bit(root_bufs.content.result_indx &
1659 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1661 /* get the first profile that is associated with rid */
1662 prof = ice_find_first_bit(recipe_to_profile[idx],
1663 ICE_MAX_NUM_PROFILES);
1664 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1665 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1667 rg_entry->fv_idx[i] = lkup_indx;
1668 rg_entry->fv_mask[i] =
1669 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1671 /* If the recipe is a chained recipe then all its
1672 * child recipe's result will have a result index.
1673 * To fill fv_words we should not use those result
1674 * index, we only need the protocol ids and offsets.
1675 * We will skip all the fv_idx which stores result
1676 * index in them. We also need to skip any fv_idx which
1677 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1678 * valid offset value.
1680 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1681 rg_entry->fv_idx[i]) ||
1682 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1683 rg_entry->fv_idx[i] == 0)
1686 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1687 rg_entry->fv_idx[i], &prot, &off);
1688 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1689 lkup_exts->fv_words[fv_word_idx].off = off;
1690 lkup_exts->field_mask[fv_word_idx] =
1691 rg_entry->fv_mask[i];
1692 if (prot == ICE_META_DATA_ID_HW &&
1693 off == ICE_TUN_FLAG_MDID_OFF)
1697 /* populate rg_list with the data from the child entry of this
1700 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1702 /* Propagate some data to the recipe database */
1703 recps[idx].is_root = !!is_root;
1704 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1705 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1706 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1707 recps[idx].chain_idx = root_bufs.content.result_indx &
1708 ~ICE_AQ_RECIPE_RESULT_EN;
1709 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1711 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1717 /* Only do the following for root recipes entries */
1718 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1719 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1720 recps[idx].root_rid = root_bufs.content.rid &
1721 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1722 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1725 /* Complete initialization of the root recipe entry */
1726 lkup_exts->n_val_words = fv_word_idx;
1727 recps[rid].big_recp = (num_recps > 1);
1728 recps[rid].n_grp_count = (u8)num_recps;
1729 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
1730 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1731 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1732 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1733 if (!recps[rid].root_buf)
1736 /* Copy result indexes */
1737 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1738 recps[rid].recp_created = true;
1746 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1747 * @hw: pointer to hardware structure
1749 * This function is used to populate recipe_to_profile matrix where index to
1750 * this array is the recipe ID and the element is the mapping of which profiles
1751 * is this recipe mapped to.
1753 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1755 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1758 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1761 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1762 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1763 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1765 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1766 ICE_MAX_NUM_RECIPES);
1767 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1768 ice_set_bit(i, recipe_to_profile[j]);
1773 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1774 * @hw: pointer to the HW struct
1775 * @recp_list: pointer to sw recipe list
1777 * Allocate memory for the entire recipe table and initialize the structures/
1778 * entries corresponding to basic recipes.
1781 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1783 struct ice_sw_recipe *recps;
1786 recps = (struct ice_sw_recipe *)
1787 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1789 return ICE_ERR_NO_MEMORY;
1791 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1792 recps[i].root_rid = i;
1793 INIT_LIST_HEAD(&recps[i].filt_rules);
1794 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1795 INIT_LIST_HEAD(&recps[i].rg_list);
1796 ice_init_lock(&recps[i].filt_rule_lock);
1805 * ice_aq_get_sw_cfg - get switch configuration
1806 * @hw: pointer to the hardware structure
1807 * @buf: pointer to the result buffer
1808 * @buf_size: length of the buffer available for response
1809 * @req_desc: pointer to requested descriptor
1810 * @num_elems: pointer to number of elements
1811 * @cd: pointer to command details structure or NULL
1813 * Get switch configuration (0x0200) to be placed in buf.
1814 * This admin command returns information such as initial VSI/port number
1815 * and switch ID it belongs to.
1817 * NOTE: *req_desc is both an input/output parameter.
1818 * The caller of this function first calls this function with *request_desc set
1819 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1820 * configuration information has been returned; if non-zero (meaning not all
1821 * the information was returned), the caller should call this function again
1822 * with *req_desc set to the previous value returned by f/w to get the
1823 * next block of switch configuration information.
1825 * *num_elems is output only parameter. This reflects the number of elements
1826 * in response buffer. The caller of this function to use *num_elems while
1827 * parsing the response buffer.
1829 static enum ice_status
1830 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1831 u16 buf_size, u16 *req_desc, u16 *num_elems,
1832 struct ice_sq_cd *cd)
1834 struct ice_aqc_get_sw_cfg *cmd;
1835 struct ice_aq_desc desc;
1836 enum ice_status status;
1838 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1839 cmd = &desc.params.get_sw_conf;
1840 cmd->element = CPU_TO_LE16(*req_desc);
1842 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1844 *req_desc = LE16_TO_CPU(cmd->element);
1845 *num_elems = LE16_TO_CPU(cmd->num_elems);
1852 * ice_alloc_rss_global_lut - allocate a RSS global LUT
1853 * @hw: pointer to the HW struct
1854 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
1855 * @global_lut_id: output parameter for the RSS global LUT's ID
1857 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
1859 struct ice_aqc_alloc_free_res_elem *sw_buf;
1860 enum ice_status status;
1863 buf_len = ice_struct_size(sw_buf, elem, 1);
1864 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1866 return ICE_ERR_NO_MEMORY;
1868 sw_buf->num_elems = CPU_TO_LE16(1);
1869 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
1870 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1871 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1873 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
1875 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
1876 shared_res ? "shared" : "dedicated", status);
1877 goto ice_alloc_global_lut_exit;
1880 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1882 ice_alloc_global_lut_exit:
1883 ice_free(hw, sw_buf);
1888 * ice_free_global_lut - free a RSS global LUT
1889 * @hw: pointer to the HW struct
1890 * @global_lut_id: ID of the RSS global LUT to free
1892 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
1894 struct ice_aqc_alloc_free_res_elem *sw_buf;
1895 u16 buf_len, num_elems = 1;
1896 enum ice_status status;
1898 buf_len = ice_struct_size(sw_buf, elem, num_elems);
1899 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1901 return ICE_ERR_NO_MEMORY;
1903 sw_buf->num_elems = CPU_TO_LE16(num_elems);
1904 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
1905 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
1907 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
1909 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
1910 global_lut_id, status);
1912 ice_free(hw, sw_buf);
1917 * ice_alloc_sw - allocate resources specific to switch
1918 * @hw: pointer to the HW struct
1919 * @ena_stats: true to turn on VEB stats
1920 * @shared_res: true for shared resource, false for dedicated resource
1921 * @sw_id: switch ID returned
1922 * @counter_id: VEB counter ID returned
1924 * allocates switch resources (SWID and VEB counter) (0x0208)
1927 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1930 struct ice_aqc_alloc_free_res_elem *sw_buf;
1931 struct ice_aqc_res_elem *sw_ele;
1932 enum ice_status status;
1935 buf_len = ice_struct_size(sw_buf, elem, 1);
1936 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1938 return ICE_ERR_NO_MEMORY;
1940 /* Prepare buffer for switch ID.
1941 * The number of resource entries in buffer is passed as 1 since only a
1942 * single switch/VEB instance is allocated, and hence a single sw_id
1945 sw_buf->num_elems = CPU_TO_LE16(1);
1947 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1948 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1949 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1951 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1952 ice_aqc_opc_alloc_res, NULL);
1955 goto ice_alloc_sw_exit;
1957 sw_ele = &sw_buf->elem[0];
1958 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1961 /* Prepare buffer for VEB Counter */
1962 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1963 struct ice_aqc_alloc_free_res_elem *counter_buf;
1964 struct ice_aqc_res_elem *counter_ele;
1966 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1967 ice_malloc(hw, buf_len);
1969 status = ICE_ERR_NO_MEMORY;
1970 goto ice_alloc_sw_exit;
1973 /* The number of resource entries in buffer is passed as 1 since
1974 * only a single switch/VEB instance is allocated, and hence a
1975 * single VEB counter is requested.
1977 counter_buf->num_elems = CPU_TO_LE16(1);
1978 counter_buf->res_type =
1979 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1980 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1981 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1985 ice_free(hw, counter_buf);
1986 goto ice_alloc_sw_exit;
1988 counter_ele = &counter_buf->elem[0];
1989 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1990 ice_free(hw, counter_buf);
1994 ice_free(hw, sw_buf);
1999 * ice_free_sw - free resources specific to switch
2000 * @hw: pointer to the HW struct
2001 * @sw_id: switch ID returned
2002 * @counter_id: VEB counter ID returned
2004 * free switch resources (SWID and VEB counter) (0x0209)
2006 * NOTE: This function frees multiple resources. It continues
2007 * releasing other resources even after it encounters error.
2008 * The error code returned is the last error it encountered.
2010 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2012 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2013 enum ice_status status, ret_status;
2016 buf_len = ice_struct_size(sw_buf, elem, 1);
2017 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2019 return ICE_ERR_NO_MEMORY;
2021 /* Prepare buffer to free for switch ID res.
2022 * The number of resource entries in buffer is passed as 1 since only a
2023 * single switch/VEB instance is freed, and hence a single sw_id
2026 sw_buf->num_elems = CPU_TO_LE16(1);
2027 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2028 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2030 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2031 ice_aqc_opc_free_res, NULL);
2034 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2036 /* Prepare buffer to free for VEB Counter resource */
2037 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2038 ice_malloc(hw, buf_len);
2040 ice_free(hw, sw_buf);
2041 return ICE_ERR_NO_MEMORY;
2044 /* The number of resource entries in buffer is passed as 1 since only a
2045 * single switch/VEB instance is freed, and hence a single VEB counter
2048 counter_buf->num_elems = CPU_TO_LE16(1);
2049 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2050 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2052 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2053 ice_aqc_opc_free_res, NULL);
2055 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2056 ret_status = status;
2059 ice_free(hw, counter_buf);
2060 ice_free(hw, sw_buf);
2066 * @hw: pointer to the HW struct
2067 * @vsi_ctx: pointer to a VSI context struct
2068 * @cd: pointer to command details structure or NULL
2070 * Add a VSI context to the hardware (0x0210)
2073 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2074 struct ice_sq_cd *cd)
2076 struct ice_aqc_add_update_free_vsi_resp *res;
2077 struct ice_aqc_add_get_update_free_vsi *cmd;
2078 struct ice_aq_desc desc;
2079 enum ice_status status;
2081 cmd = &desc.params.vsi_cmd;
2082 res = &desc.params.add_update_free_vsi_res;
2084 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2086 if (!vsi_ctx->alloc_from_pool)
2087 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2088 ICE_AQ_VSI_IS_VALID);
2090 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2092 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2094 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2095 sizeof(vsi_ctx->info), cd);
2098 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2099 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2100 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2108 * @hw: pointer to the HW struct
2109 * @vsi_ctx: pointer to a VSI context struct
2110 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2111 * @cd: pointer to command details structure or NULL
2113 * Free VSI context info from hardware (0x0213)
2116 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2117 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2119 struct ice_aqc_add_update_free_vsi_resp *resp;
2120 struct ice_aqc_add_get_update_free_vsi *cmd;
2121 struct ice_aq_desc desc;
2122 enum ice_status status;
2124 cmd = &desc.params.vsi_cmd;
2125 resp = &desc.params.add_update_free_vsi_res;
2127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2129 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2131 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2133 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2135 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2136 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2144 * @hw: pointer to the HW struct
2145 * @vsi_ctx: pointer to a VSI context struct
2146 * @cd: pointer to command details structure or NULL
2148 * Update VSI context in the hardware (0x0211)
2151 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2152 struct ice_sq_cd *cd)
2154 struct ice_aqc_add_update_free_vsi_resp *resp;
2155 struct ice_aqc_add_get_update_free_vsi *cmd;
2156 struct ice_aq_desc desc;
2157 enum ice_status status;
2159 cmd = &desc.params.vsi_cmd;
2160 resp = &desc.params.add_update_free_vsi_res;
2162 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2164 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2166 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2168 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2169 sizeof(vsi_ctx->info), cd);
2172 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2173 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2180 * ice_is_vsi_valid - check whether the VSI is valid or not
2181 * @hw: pointer to the HW struct
2182 * @vsi_handle: VSI handle
2184 * check whether the VSI is valid or not
2186 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2188 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2192 * ice_get_hw_vsi_num - return the HW VSI number
2193 * @hw: pointer to the HW struct
2194 * @vsi_handle: VSI handle
2196 * return the HW VSI number
2197 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2199 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2201 return hw->vsi_ctx[vsi_handle]->vsi_num;
2205 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2206 * @hw: pointer to the HW struct
2207 * @vsi_handle: VSI handle
2209 * return the VSI context entry for a given VSI handle
2211 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2213 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2217 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2218 * @hw: pointer to the HW struct
2219 * @vsi_handle: VSI handle
2220 * @vsi: VSI context pointer
2222 * save the VSI context entry for a given VSI handle
2225 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2227 hw->vsi_ctx[vsi_handle] = vsi;
2231 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2232 * @hw: pointer to the HW struct
2233 * @vsi_handle: VSI handle
2235 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2237 struct ice_vsi_ctx *vsi;
2240 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2243 ice_for_each_traffic_class(i) {
2244 if (vsi->lan_q_ctx[i]) {
2245 ice_free(hw, vsi->lan_q_ctx[i]);
2246 vsi->lan_q_ctx[i] = NULL;
2252 * ice_clear_vsi_ctx - clear the VSI context entry
2253 * @hw: pointer to the HW struct
2254 * @vsi_handle: VSI handle
2256 * clear the VSI context entry
2258 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2260 struct ice_vsi_ctx *vsi;
2262 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2264 ice_clear_vsi_q_ctx(hw, vsi_handle);
2266 hw->vsi_ctx[vsi_handle] = NULL;
2271 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2272 * @hw: pointer to the HW struct
2274 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2278 for (i = 0; i < ICE_MAX_VSI; i++)
2279 ice_clear_vsi_ctx(hw, i);
2283 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2284 * @hw: pointer to the HW struct
2285 * @vsi_handle: unique VSI handle provided by drivers
2286 * @vsi_ctx: pointer to a VSI context struct
2287 * @cd: pointer to command details structure or NULL
2289 * Add a VSI context to the hardware also add it into the VSI handle list.
2290 * If this function gets called after reset for existing VSIs then update
2291 * with the new HW VSI number in the corresponding VSI handle list entry.
2294 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2295 struct ice_sq_cd *cd)
2297 struct ice_vsi_ctx *tmp_vsi_ctx;
2298 enum ice_status status;
2300 if (vsi_handle >= ICE_MAX_VSI)
2301 return ICE_ERR_PARAM;
2302 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2305 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2307 /* Create a new VSI context */
2308 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2309 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2311 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2312 return ICE_ERR_NO_MEMORY;
2314 *tmp_vsi_ctx = *vsi_ctx;
2316 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2318 /* update with new HW VSI num */
2319 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2326 * ice_free_vsi- free VSI context from hardware and VSI handle list
2327 * @hw: pointer to the HW struct
2328 * @vsi_handle: unique VSI handle
2329 * @vsi_ctx: pointer to a VSI context struct
2330 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2331 * @cd: pointer to command details structure or NULL
2333 * Free VSI context info from hardware as well as from VSI handle list
2336 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2337 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2339 enum ice_status status;
2341 if (!ice_is_vsi_valid(hw, vsi_handle))
2342 return ICE_ERR_PARAM;
2343 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2344 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2346 ice_clear_vsi_ctx(hw, vsi_handle);
2352 * @hw: pointer to the HW struct
2353 * @vsi_handle: unique VSI handle
2354 * @vsi_ctx: pointer to a VSI context struct
2355 * @cd: pointer to command details structure or NULL
2357 * Update VSI context in the hardware
2360 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2361 struct ice_sq_cd *cd)
2363 if (!ice_is_vsi_valid(hw, vsi_handle))
2364 return ICE_ERR_PARAM;
2365 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2366 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2370 * ice_aq_get_vsi_params
2371 * @hw: pointer to the HW struct
2372 * @vsi_ctx: pointer to a VSI context struct
2373 * @cd: pointer to command details structure or NULL
2375 * Get VSI context info from hardware (0x0212)
2378 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2379 struct ice_sq_cd *cd)
2381 struct ice_aqc_add_get_update_free_vsi *cmd;
2382 struct ice_aqc_get_vsi_resp *resp;
2383 struct ice_aq_desc desc;
2384 enum ice_status status;
2386 cmd = &desc.params.vsi_cmd;
2387 resp = &desc.params.get_vsi_resp;
2389 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2391 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2393 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2394 sizeof(vsi_ctx->info), cd);
2396 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2398 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2399 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2406 * ice_aq_add_update_mir_rule - add/update a mirror rule
2407 * @hw: pointer to the HW struct
2408 * @rule_type: Rule Type
2409 * @dest_vsi: VSI number to which packets will be mirrored
2410 * @count: length of the list
2411 * @mr_buf: buffer for list of mirrored VSI numbers
2412 * @cd: pointer to command details structure or NULL
2415 * Add/Update Mirror Rule (0x260).
2418 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2419 u16 count, struct ice_mir_rule_buf *mr_buf,
2420 struct ice_sq_cd *cd, u16 *rule_id)
2422 struct ice_aqc_add_update_mir_rule *cmd;
2423 struct ice_aq_desc desc;
2424 enum ice_status status;
2425 __le16 *mr_list = NULL;
2428 switch (rule_type) {
2429 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2430 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2431 /* Make sure count and mr_buf are set for these rule_types */
2432 if (!(count && mr_buf))
2433 return ICE_ERR_PARAM;
2435 buf_size = count * sizeof(__le16);
2436 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2438 return ICE_ERR_NO_MEMORY;
2440 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2441 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2442 /* Make sure count and mr_buf are not set for these
2445 if (count || mr_buf)
2446 return ICE_ERR_PARAM;
2449 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2450 return ICE_ERR_OUT_OF_RANGE;
2453 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2455 /* Pre-process 'mr_buf' items for add/update of virtual port
2456 * ingress/egress mirroring (but not physical port ingress/egress
2462 for (i = 0; i < count; i++) {
2465 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2467 /* Validate specified VSI number, make sure it is less
2468 * than ICE_MAX_VSI, if not return with error.
2470 if (id >= ICE_MAX_VSI) {
2471 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2473 ice_free(hw, mr_list);
2474 return ICE_ERR_OUT_OF_RANGE;
2477 /* add VSI to mirror rule */
2480 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2481 else /* remove VSI from mirror rule */
2482 mr_list[i] = CPU_TO_LE16(id);
2486 cmd = &desc.params.add_update_rule;
2487 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2488 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2489 ICE_AQC_RULE_ID_VALID_M);
2490 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2491 cmd->num_entries = CPU_TO_LE16(count);
2492 cmd->dest = CPU_TO_LE16(dest_vsi);
2494 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2496 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2498 ice_free(hw, mr_list);
2504 * ice_aq_delete_mir_rule - delete a mirror rule
2505 * @hw: pointer to the HW struct
2506 * @rule_id: Mirror rule ID (to be deleted)
2507 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2508 * otherwise it is returned to the shared pool
2509 * @cd: pointer to command details structure or NULL
2511 * Delete Mirror Rule (0x261).
2514 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2515 struct ice_sq_cd *cd)
2517 struct ice_aqc_delete_mir_rule *cmd;
2518 struct ice_aq_desc desc;
2520 /* rule_id should be in the range 0...63 */
2521 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2522 return ICE_ERR_OUT_OF_RANGE;
2524 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2526 cmd = &desc.params.del_rule;
2527 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2528 cmd->rule_id = CPU_TO_LE16(rule_id);
2531 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2533 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2537 * ice_aq_alloc_free_vsi_list
2538 * @hw: pointer to the HW struct
2539 * @vsi_list_id: VSI list ID returned or used for lookup
2540 * @lkup_type: switch rule filter lookup type
2541 * @opc: switch rules population command type - pass in the command opcode
2543 * allocates or free a VSI list resource
2545 static enum ice_status
2546 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2547 enum ice_sw_lkup_type lkup_type,
2548 enum ice_adminq_opc opc)
2550 struct ice_aqc_alloc_free_res_elem *sw_buf;
2551 struct ice_aqc_res_elem *vsi_ele;
2552 enum ice_status status;
2555 buf_len = ice_struct_size(sw_buf, elem, 1);
2556 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2558 return ICE_ERR_NO_MEMORY;
2559 sw_buf->num_elems = CPU_TO_LE16(1);
2561 if (lkup_type == ICE_SW_LKUP_MAC ||
2562 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2563 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2564 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2565 lkup_type == ICE_SW_LKUP_PROMISC ||
2566 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2567 lkup_type == ICE_SW_LKUP_LAST) {
2568 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2569 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2571 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2573 status = ICE_ERR_PARAM;
2574 goto ice_aq_alloc_free_vsi_list_exit;
2577 if (opc == ice_aqc_opc_free_res)
2578 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2580 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2582 goto ice_aq_alloc_free_vsi_list_exit;
2584 if (opc == ice_aqc_opc_alloc_res) {
2585 vsi_ele = &sw_buf->elem[0];
2586 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2589 ice_aq_alloc_free_vsi_list_exit:
2590 ice_free(hw, sw_buf);
2595 * ice_aq_set_storm_ctrl - Sets storm control configuration
2596 * @hw: pointer to the HW struct
2597 * @bcast_thresh: represents the upper threshold for broadcast storm control
2598 * @mcast_thresh: represents the upper threshold for multicast storm control
2599 * @ctl_bitmask: storm control knobs
2601 * Sets the storm control configuration (0x0280)
2604 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2607 struct ice_aqc_storm_cfg *cmd;
2608 struct ice_aq_desc desc;
2610 cmd = &desc.params.storm_conf;
2612 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2614 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2615 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2616 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2618 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2622 * ice_aq_get_storm_ctrl - gets storm control configuration
2623 * @hw: pointer to the HW struct
2624 * @bcast_thresh: represents the upper threshold for broadcast storm control
2625 * @mcast_thresh: represents the upper threshold for multicast storm control
2626 * @ctl_bitmask: storm control knobs
2628 * Gets the storm control configuration (0x0281)
2631 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2634 enum ice_status status;
2635 struct ice_aq_desc desc;
2637 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2639 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2641 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2644 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2647 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2650 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2657 * ice_aq_sw_rules - add/update/remove switch rules
2658 * @hw: pointer to the HW struct
2659 * @rule_list: pointer to switch rule population list
2660 * @rule_list_sz: total size of the rule list in bytes
2661 * @num_rules: number of switch rules in the rule_list
2662 * @opc: switch rules population command type - pass in the command opcode
2663 * @cd: pointer to command details structure or NULL
2665 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2667 static enum ice_status
2668 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2669 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2671 struct ice_aq_desc desc;
2672 enum ice_status status;
2674 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2676 if (opc != ice_aqc_opc_add_sw_rules &&
2677 opc != ice_aqc_opc_update_sw_rules &&
2678 opc != ice_aqc_opc_remove_sw_rules)
2679 return ICE_ERR_PARAM;
2681 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2683 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2684 desc.params.sw_rules.num_rules_fltr_entry_index =
2685 CPU_TO_LE16(num_rules);
2686 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2687 if (opc != ice_aqc_opc_add_sw_rules &&
2688 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2689 status = ICE_ERR_DOES_NOT_EXIST;
2695 * ice_aq_add_recipe - add switch recipe
2696 * @hw: pointer to the HW struct
2697 * @s_recipe_list: pointer to switch rule population list
2698 * @num_recipes: number of switch recipes in the list
2699 * @cd: pointer to command details structure or NULL
2704 ice_aq_add_recipe(struct ice_hw *hw,
2705 struct ice_aqc_recipe_data_elem *s_recipe_list,
2706 u16 num_recipes, struct ice_sq_cd *cd)
2708 struct ice_aqc_add_get_recipe *cmd;
2709 struct ice_aq_desc desc;
2712 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2713 cmd = &desc.params.add_get_recipe;
2714 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2716 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2717 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2719 buf_size = num_recipes * sizeof(*s_recipe_list);
2721 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2725 * ice_aq_get_recipe - get switch recipe
2726 * @hw: pointer to the HW struct
2727 * @s_recipe_list: pointer to switch rule population list
2728 * @num_recipes: pointer to the number of recipes (input and output)
2729 * @recipe_root: root recipe number of recipe(s) to retrieve
2730 * @cd: pointer to command details structure or NULL
2734 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2735 * On output, *num_recipes will equal the number of entries returned in
2738 * The caller must supply enough space in s_recipe_list to hold all possible
2739 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2742 ice_aq_get_recipe(struct ice_hw *hw,
2743 struct ice_aqc_recipe_data_elem *s_recipe_list,
2744 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2746 struct ice_aqc_add_get_recipe *cmd;
2747 struct ice_aq_desc desc;
2748 enum ice_status status;
2751 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2752 return ICE_ERR_PARAM;
2754 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2755 cmd = &desc.params.add_get_recipe;
2756 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2758 cmd->return_index = CPU_TO_LE16(recipe_root);
2759 cmd->num_sub_recipes = 0;
2761 buf_size = *num_recipes * sizeof(*s_recipe_list);
2763 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2764 /* cppcheck-suppress constArgument */
2765 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2771 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2772 * @hw: pointer to the HW struct
2773 * @profile_id: package profile ID to associate the recipe with
2774 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2775 * @cd: pointer to command details structure or NULL
2776 * Recipe to profile association (0x0291)
2779 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2780 struct ice_sq_cd *cd)
2782 struct ice_aqc_recipe_to_profile *cmd;
2783 struct ice_aq_desc desc;
2785 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2786 cmd = &desc.params.recipe_to_profile;
2787 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2788 cmd->profile_id = CPU_TO_LE16(profile_id);
2789 /* Set the recipe ID bit in the bitmask to let the device know which
2790 * profile we are associating the recipe to
2792 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2793 ICE_NONDMA_TO_NONDMA);
2795 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2799 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2800 * @hw: pointer to the HW struct
2801 * @profile_id: package profile ID to associate the recipe with
2802 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2803 * @cd: pointer to command details structure or NULL
2804 * Associate profile ID with given recipe (0x0293)
2807 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2808 struct ice_sq_cd *cd)
2810 struct ice_aqc_recipe_to_profile *cmd;
2811 struct ice_aq_desc desc;
2812 enum ice_status status;
2814 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2815 cmd = &desc.params.recipe_to_profile;
2816 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2817 cmd->profile_id = CPU_TO_LE16(profile_id);
2819 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2821 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2822 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2828 * ice_alloc_recipe - add recipe resource
2829 * @hw: pointer to the hardware structure
2830 * @rid: recipe ID returned as response to AQ call
2832 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2834 struct ice_aqc_alloc_free_res_elem *sw_buf;
2835 enum ice_status status;
2838 buf_len = ice_struct_size(sw_buf, elem, 1);
2839 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2841 return ICE_ERR_NO_MEMORY;
2843 sw_buf->num_elems = CPU_TO_LE16(1);
2844 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2845 ICE_AQC_RES_TYPE_S) |
2846 ICE_AQC_RES_TYPE_FLAG_SHARED);
2847 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2848 ice_aqc_opc_alloc_res, NULL);
2850 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2851 ice_free(hw, sw_buf);
2856 /* ice_init_port_info - Initialize port_info with switch configuration data
2857 * @pi: pointer to port_info
2858 * @vsi_port_num: VSI number or port number
2859 * @type: Type of switch element (port or VSI)
2860 * @swid: switch ID of the switch the element is attached to
2861 * @pf_vf_num: PF or VF number
2862 * @is_vf: true if the element is a VF, false otherwise
2865 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2866 u16 swid, u16 pf_vf_num, bool is_vf)
2869 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2870 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2872 pi->pf_vf_num = pf_vf_num;
2874 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2875 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2878 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2883 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2884 * @hw: pointer to the hardware structure
2886 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2888 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2889 enum ice_status status;
2896 num_total_ports = 1;
2898 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2899 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2902 return ICE_ERR_NO_MEMORY;
2904 /* Multiple calls to ice_aq_get_sw_cfg may be required
2905 * to get all the switch configuration information. The need
2906 * for additional calls is indicated by ice_aq_get_sw_cfg
2907 * writing a non-zero value in req_desc
2910 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2912 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2913 &req_desc, &num_elems, NULL);
2918 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2919 u16 pf_vf_num, swid, vsi_port_num;
2923 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2924 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2926 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2927 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2929 swid = LE16_TO_CPU(ele->swid);
2931 if (LE16_TO_CPU(ele->pf_vf_num) &
2932 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2935 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2936 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2939 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2940 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2941 if (j == num_total_ports) {
2942 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
2943 status = ICE_ERR_CFG;
2946 ice_init_port_info(hw->port_info,
2947 vsi_port_num, res_type, swid,
2955 } while (req_desc && !status);
2963 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2964 * @hw: pointer to the hardware structure
2965 * @fi: filter info structure to fill/update
2967 * This helper function populates the lb_en and lan_en elements of the provided
2968 * ice_fltr_info struct using the switch's type and characteristics of the
2969 * switch rule being configured.
2971 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2973 if ((fi->flag & ICE_FLTR_RX) &&
2974 (fi->fltr_act == ICE_FWD_TO_VSI ||
2975 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2976 fi->lkup_type == ICE_SW_LKUP_LAST)
2980 if ((fi->flag & ICE_FLTR_TX) &&
2981 (fi->fltr_act == ICE_FWD_TO_VSI ||
2982 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2983 fi->fltr_act == ICE_FWD_TO_Q ||
2984 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2985 /* Setting LB for prune actions will result in replicated
2986 * packets to the internal switch that will be dropped.
2988 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2991 /* Set lan_en to TRUE if
2992 * 1. The switch is a VEB AND
2994 * 2.1 The lookup is a directional lookup like ethertype,
2995 * promiscuous, ethertype-MAC, promiscuous-VLAN
2996 * and default-port OR
2997 * 2.2 The lookup is VLAN, OR
2998 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2999 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3003 * The switch is a VEPA.
3005 * In all other cases, the LAN enable has to be set to false.
3008 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3009 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3010 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3011 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3012 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3013 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3014 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3015 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3016 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3017 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3026 * ice_fill_sw_rule - Helper function to fill switch rule structure
3027 * @hw: pointer to the hardware structure
3028 * @f_info: entry containing packet forwarding information
3029 * @s_rule: switch rule structure to be filled in based on mac_entry
3030 * @opc: switch rules population command type - pass in the command opcode
3033 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3034 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3036 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3044 if (opc == ice_aqc_opc_remove_sw_rules) {
3045 s_rule->pdata.lkup_tx_rx.act = 0;
3046 s_rule->pdata.lkup_tx_rx.index =
3047 CPU_TO_LE16(f_info->fltr_rule_id);
3048 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3052 eth_hdr_sz = sizeof(dummy_eth_header);
3053 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3055 /* initialize the ether header with a dummy header */
3056 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3057 ice_fill_sw_info(hw, f_info);
3059 switch (f_info->fltr_act) {
3060 case ICE_FWD_TO_VSI:
3061 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3062 ICE_SINGLE_ACT_VSI_ID_M;
3063 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3064 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3065 ICE_SINGLE_ACT_VALID_BIT;
3067 case ICE_FWD_TO_VSI_LIST:
3068 act |= ICE_SINGLE_ACT_VSI_LIST;
3069 act |= (f_info->fwd_id.vsi_list_id <<
3070 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3071 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3072 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3073 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3074 ICE_SINGLE_ACT_VALID_BIT;
3077 act |= ICE_SINGLE_ACT_TO_Q;
3078 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3079 ICE_SINGLE_ACT_Q_INDEX_M;
3081 case ICE_DROP_PACKET:
3082 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3083 ICE_SINGLE_ACT_VALID_BIT;
3085 case ICE_FWD_TO_QGRP:
3086 q_rgn = f_info->qgrp_size > 0 ?
3087 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3088 act |= ICE_SINGLE_ACT_TO_Q;
3089 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3090 ICE_SINGLE_ACT_Q_INDEX_M;
3091 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3092 ICE_SINGLE_ACT_Q_REGION_M;
3099 act |= ICE_SINGLE_ACT_LB_ENABLE;
3101 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3103 switch (f_info->lkup_type) {
3104 case ICE_SW_LKUP_MAC:
3105 daddr = f_info->l_data.mac.mac_addr;
3107 case ICE_SW_LKUP_VLAN:
3108 vlan_id = f_info->l_data.vlan.vlan_id;
3109 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3110 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3111 act |= ICE_SINGLE_ACT_PRUNE;
3112 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3115 case ICE_SW_LKUP_ETHERTYPE_MAC:
3116 daddr = f_info->l_data.ethertype_mac.mac_addr;
3118 case ICE_SW_LKUP_ETHERTYPE:
3119 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3120 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3122 case ICE_SW_LKUP_MAC_VLAN:
3123 daddr = f_info->l_data.mac_vlan.mac_addr;
3124 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3126 case ICE_SW_LKUP_PROMISC_VLAN:
3127 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3129 case ICE_SW_LKUP_PROMISC:
3130 daddr = f_info->l_data.mac_vlan.mac_addr;
3136 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3137 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3138 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3140 /* Recipe set depending on lookup type */
3141 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3142 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3143 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3146 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3147 ICE_NONDMA_TO_NONDMA);
3149 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3150 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3151 *off = CPU_TO_BE16(vlan_id);
3154 /* Create the switch rule with the final dummy Ethernet header */
3155 if (opc != ice_aqc_opc_update_sw_rules)
3156 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3160 * ice_add_marker_act
3161 * @hw: pointer to the hardware structure
3162 * @m_ent: the management entry for which sw marker needs to be added
3163 * @sw_marker: sw marker to tag the Rx descriptor with
3164 * @l_id: large action resource ID
3166 * Create a large action to hold software marker and update the switch rule
3167 * entry pointed by m_ent with newly created large action
3169 static enum ice_status
3170 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3171 u16 sw_marker, u16 l_id)
3173 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3174 /* For software marker we need 3 large actions
3175 * 1. FWD action: FWD TO VSI or VSI LIST
3176 * 2. GENERIC VALUE action to hold the profile ID
3177 * 3. GENERIC VALUE action to hold the software marker ID
3179 const u16 num_lg_acts = 3;
3180 enum ice_status status;
3186 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3187 return ICE_ERR_PARAM;
3189 /* Create two back-to-back switch rules and submit them to the HW using
3190 * one memory buffer:
3194 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3195 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3196 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3198 return ICE_ERR_NO_MEMORY;
3200 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3202 /* Fill in the first switch rule i.e. large action */
3203 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3204 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3205 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3207 /* First action VSI forwarding or VSI list forwarding depending on how
3210 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3211 m_ent->fltr_info.fwd_id.hw_vsi_id;
3213 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3214 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3215 if (m_ent->vsi_count > 1)
3216 act |= ICE_LG_ACT_VSI_LIST;
3217 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3219 /* Second action descriptor type */
3220 act = ICE_LG_ACT_GENERIC;
3222 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3223 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3225 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3226 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3228 /* Third action Marker value */
3229 act |= ICE_LG_ACT_GENERIC;
3230 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3231 ICE_LG_ACT_GENERIC_VALUE_M;
3233 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3235 /* call the fill switch rule to fill the lookup Tx Rx structure */
3236 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3237 ice_aqc_opc_update_sw_rules);
3239 /* Update the action to point to the large action ID */
3240 rx_tx->pdata.lkup_tx_rx.act =
3241 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3242 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3243 ICE_SINGLE_ACT_PTR_VAL_M));
3245 /* Use the filter rule ID of the previously created rule with single
3246 * act. Once the update happens, hardware will treat this as large
3249 rx_tx->pdata.lkup_tx_rx.index =
3250 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3252 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3253 ice_aqc_opc_update_sw_rules, NULL);
3255 m_ent->lg_act_idx = l_id;
3256 m_ent->sw_marker_id = sw_marker;
3259 ice_free(hw, lg_act);
3264 * ice_add_counter_act - add/update filter rule with counter action
3265 * @hw: pointer to the hardware structure
3266 * @m_ent: the management entry for which counter needs to be added
3267 * @counter_id: VLAN counter ID returned as part of allocate resource
3268 * @l_id: large action resource ID
3270 static enum ice_status
3271 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3272 u16 counter_id, u16 l_id)
3274 struct ice_aqc_sw_rules_elem *lg_act;
3275 struct ice_aqc_sw_rules_elem *rx_tx;
3276 enum ice_status status;
3277 /* 2 actions will be added while adding a large action counter */
3278 const int num_acts = 2;
3285 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3286 return ICE_ERR_PARAM;
3288 /* Create two back-to-back switch rules and submit them to the HW using
3289 * one memory buffer:
3293 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3294 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3295 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3297 return ICE_ERR_NO_MEMORY;
3299 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3301 /* Fill in the first switch rule i.e. large action */
3302 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3303 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3304 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3306 /* First action VSI forwarding or VSI list forwarding depending on how
3309 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3310 m_ent->fltr_info.fwd_id.hw_vsi_id;
3312 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3313 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3314 ICE_LG_ACT_VSI_LIST_ID_M;
3315 if (m_ent->vsi_count > 1)
3316 act |= ICE_LG_ACT_VSI_LIST;
3317 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3319 /* Second action counter ID */
3320 act = ICE_LG_ACT_STAT_COUNT;
3321 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3322 ICE_LG_ACT_STAT_COUNT_M;
3323 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3325 /* call the fill switch rule to fill the lookup Tx Rx structure */
3326 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3327 ice_aqc_opc_update_sw_rules);
3329 act = ICE_SINGLE_ACT_PTR;
3330 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3331 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3333 /* Use the filter rule ID of the previously created rule with single
3334 * act. Once the update happens, hardware will treat this as large
3337 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3338 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3340 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3341 ice_aqc_opc_update_sw_rules, NULL);
3343 m_ent->lg_act_idx = l_id;
3344 m_ent->counter_index = counter_id;
3347 ice_free(hw, lg_act);
3352 * ice_create_vsi_list_map
3353 * @hw: pointer to the hardware structure
3354 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3355 * @num_vsi: number of VSI handles in the array
3356 * @vsi_list_id: VSI list ID generated as part of allocate resource
3358 * Helper function to create a new entry of VSI list ID to VSI mapping
3359 * using the given VSI list ID
3361 static struct ice_vsi_list_map_info *
3362 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3365 struct ice_switch_info *sw = hw->switch_info;
3366 struct ice_vsi_list_map_info *v_map;
3369 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3373 v_map->vsi_list_id = vsi_list_id;
3375 for (i = 0; i < num_vsi; i++)
3376 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3378 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3383 * ice_update_vsi_list_rule
3384 * @hw: pointer to the hardware structure
3385 * @vsi_handle_arr: array of VSI handles to form a VSI list
3386 * @num_vsi: number of VSI handles in the array
3387 * @vsi_list_id: VSI list ID generated as part of allocate resource
3388 * @remove: Boolean value to indicate if this is a remove action
3389 * @opc: switch rules population command type - pass in the command opcode
3390 * @lkup_type: lookup type of the filter
3392 * Call AQ command to add a new switch rule or update existing switch rule
3393 * using the given VSI list ID
3395 static enum ice_status
3396 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3397 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3398 enum ice_sw_lkup_type lkup_type)
3400 struct ice_aqc_sw_rules_elem *s_rule;
3401 enum ice_status status;
3407 return ICE_ERR_PARAM;
3409 if (lkup_type == ICE_SW_LKUP_MAC ||
3410 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3411 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3412 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3413 lkup_type == ICE_SW_LKUP_PROMISC ||
3414 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3415 lkup_type == ICE_SW_LKUP_LAST)
3416 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3417 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3418 else if (lkup_type == ICE_SW_LKUP_VLAN)
3419 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3420 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3422 return ICE_ERR_PARAM;
3424 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3425 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3427 return ICE_ERR_NO_MEMORY;
3428 for (i = 0; i < num_vsi; i++) {
3429 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3430 status = ICE_ERR_PARAM;
3433 /* AQ call requires hw_vsi_id(s) */
3434 s_rule->pdata.vsi_list.vsi[i] =
3435 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3438 s_rule->type = CPU_TO_LE16(rule_type);
3439 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3440 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3442 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3445 ice_free(hw, s_rule);
3450 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3451 * @hw: pointer to the HW struct
3452 * @vsi_handle_arr: array of VSI handles to form a VSI list
3453 * @num_vsi: number of VSI handles in the array
3454 * @vsi_list_id: stores the ID of the VSI list to be created
3455 * @lkup_type: switch rule filter's lookup type
3457 static enum ice_status
3458 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3459 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3461 enum ice_status status;
3463 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3464 ice_aqc_opc_alloc_res);
3468 /* Update the newly created VSI list to include the specified VSIs */
3469 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3470 *vsi_list_id, false,
3471 ice_aqc_opc_add_sw_rules, lkup_type);
3475 * ice_create_pkt_fwd_rule
3476 * @hw: pointer to the hardware structure
3477 * @recp_list: corresponding filter management list
3478 * @f_entry: entry containing packet forwarding information
3480 * Create switch rule with given filter information and add an entry
3481 * to the corresponding filter management list to track this switch rule
3484 static enum ice_status
3485 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3486 struct ice_fltr_list_entry *f_entry)
3488 struct ice_fltr_mgmt_list_entry *fm_entry;
3489 struct ice_aqc_sw_rules_elem *s_rule;
3490 enum ice_status status;
3492 s_rule = (struct ice_aqc_sw_rules_elem *)
3493 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3495 return ICE_ERR_NO_MEMORY;
3496 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3497 ice_malloc(hw, sizeof(*fm_entry));
3499 status = ICE_ERR_NO_MEMORY;
3500 goto ice_create_pkt_fwd_rule_exit;
3503 fm_entry->fltr_info = f_entry->fltr_info;
3505 /* Initialize all the fields for the management entry */
3506 fm_entry->vsi_count = 1;
3507 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3508 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3509 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3511 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3512 ice_aqc_opc_add_sw_rules);
3514 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3515 ice_aqc_opc_add_sw_rules, NULL);
3517 ice_free(hw, fm_entry);
3518 goto ice_create_pkt_fwd_rule_exit;
3521 f_entry->fltr_info.fltr_rule_id =
3522 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3523 fm_entry->fltr_info.fltr_rule_id =
3524 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3526 /* The book keeping entries will get removed when base driver
3527 * calls remove filter AQ command
3529 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3531 ice_create_pkt_fwd_rule_exit:
3532 ice_free(hw, s_rule);
3537 * ice_update_pkt_fwd_rule
3538 * @hw: pointer to the hardware structure
3539 * @f_info: filter information for switch rule
3541 * Call AQ command to update a previously created switch rule with a
3544 static enum ice_status
3545 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3547 struct ice_aqc_sw_rules_elem *s_rule;
3548 enum ice_status status;
3550 s_rule = (struct ice_aqc_sw_rules_elem *)
3551 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3553 return ICE_ERR_NO_MEMORY;
3555 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3557 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3559 /* Update switch rule with new rule set to forward VSI list */
3560 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3561 ice_aqc_opc_update_sw_rules, NULL);
3563 ice_free(hw, s_rule);
3568 * ice_update_sw_rule_bridge_mode
3569 * @hw: pointer to the HW struct
3571 * Updates unicast switch filter rules based on VEB/VEPA mode
3573 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3575 struct ice_switch_info *sw = hw->switch_info;
3576 struct ice_fltr_mgmt_list_entry *fm_entry;
3577 enum ice_status status = ICE_SUCCESS;
3578 struct LIST_HEAD_TYPE *rule_head;
3579 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3581 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3582 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3584 ice_acquire_lock(rule_lock);
3585 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3587 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3588 u8 *addr = fi->l_data.mac.mac_addr;
3590 /* Update unicast Tx rules to reflect the selected
3593 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3594 (fi->fltr_act == ICE_FWD_TO_VSI ||
3595 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3596 fi->fltr_act == ICE_FWD_TO_Q ||
3597 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3598 status = ice_update_pkt_fwd_rule(hw, fi);
3604 ice_release_lock(rule_lock);
3610 * ice_add_update_vsi_list
3611 * @hw: pointer to the hardware structure
3612 * @m_entry: pointer to current filter management list entry
3613 * @cur_fltr: filter information from the book keeping entry
3614 * @new_fltr: filter information with the new VSI to be added
3616 * Call AQ command to add or update previously created VSI list with new VSI.
3618 * Helper function to do book keeping associated with adding filter information
3619 * The algorithm to do the book keeping is described below :
3620 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3621 * if only one VSI has been added till now
3622 * Allocate a new VSI list and add two VSIs
3623 * to this list using switch rule command
3624 * Update the previously created switch rule with the
3625 * newly created VSI list ID
3626 * if a VSI list was previously created
3627 * Add the new VSI to the previously created VSI list set
3628 * using the update switch rule command
3630 static enum ice_status
3631 ice_add_update_vsi_list(struct ice_hw *hw,
3632 struct ice_fltr_mgmt_list_entry *m_entry,
3633 struct ice_fltr_info *cur_fltr,
3634 struct ice_fltr_info *new_fltr)
3636 enum ice_status status = ICE_SUCCESS;
3637 u16 vsi_list_id = 0;
3639 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3640 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3641 return ICE_ERR_NOT_IMPL;
3643 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3644 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3645 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3646 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3647 return ICE_ERR_NOT_IMPL;
3649 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3650 /* Only one entry existed in the mapping and it was not already
3651 * a part of a VSI list. So, create a VSI list with the old and
3654 struct ice_fltr_info tmp_fltr;
3655 u16 vsi_handle_arr[2];
3657 /* A rule already exists with the new VSI being added */
3658 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3659 return ICE_ERR_ALREADY_EXISTS;
3661 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3662 vsi_handle_arr[1] = new_fltr->vsi_handle;
3663 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3665 new_fltr->lkup_type);
3669 tmp_fltr = *new_fltr;
3670 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3671 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3672 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3673 /* Update the previous switch rule of "MAC forward to VSI" to
3674 * "MAC fwd to VSI list"
3676 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3680 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3681 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3682 m_entry->vsi_list_info =
3683 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3686 /* If this entry was large action then the large action needs
3687 * to be updated to point to FWD to VSI list
3689 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3691 ice_add_marker_act(hw, m_entry,
3692 m_entry->sw_marker_id,
3693 m_entry->lg_act_idx);
3695 u16 vsi_handle = new_fltr->vsi_handle;
3696 enum ice_adminq_opc opcode;
3698 if (!m_entry->vsi_list_info)
3701 /* A rule already exists with the new VSI being added */
3702 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3705 /* Update the previously created VSI list set with
3706 * the new VSI ID passed in
3708 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3709 opcode = ice_aqc_opc_update_sw_rules;
3711 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3712 vsi_list_id, false, opcode,
3713 new_fltr->lkup_type);
3714 /* update VSI list mapping info with new VSI ID */
3716 ice_set_bit(vsi_handle,
3717 m_entry->vsi_list_info->vsi_map);
3720 m_entry->vsi_count++;
3725 * ice_find_rule_entry - Search a rule entry
3726 * @list_head: head of rule list
3727 * @f_info: rule information
3729 * Helper function to search for a given rule entry
3730 * Returns pointer to entry storing the rule if found
3732 static struct ice_fltr_mgmt_list_entry *
3733 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3734 struct ice_fltr_info *f_info)
3736 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3738 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3740 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3741 sizeof(f_info->l_data)) &&
3742 f_info->flag == list_itr->fltr_info.flag) {
3751 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3752 * @recp_list: VSI lists needs to be searched
3753 * @vsi_handle: VSI handle to be found in VSI list
3754 * @vsi_list_id: VSI list ID found containing vsi_handle
3756 * Helper function to search a VSI list with single entry containing given VSI
3757 * handle element. This can be extended further to search VSI list with more
3758 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3760 static struct ice_vsi_list_map_info *
3761 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3764 struct ice_vsi_list_map_info *map_info = NULL;
3765 struct LIST_HEAD_TYPE *list_head;
3767 list_head = &recp_list->filt_rules;
3768 if (recp_list->adv_rule) {
3769 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3771 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3772 ice_adv_fltr_mgmt_list_entry,
3774 if (list_itr->vsi_list_info) {
3775 map_info = list_itr->vsi_list_info;
3776 if (ice_is_bit_set(map_info->vsi_map,
3778 *vsi_list_id = map_info->vsi_list_id;
3784 struct ice_fltr_mgmt_list_entry *list_itr;
3786 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3787 ice_fltr_mgmt_list_entry,
3789 if (list_itr->vsi_count == 1 &&
3790 list_itr->vsi_list_info) {
3791 map_info = list_itr->vsi_list_info;
3792 if (ice_is_bit_set(map_info->vsi_map,
3794 *vsi_list_id = map_info->vsi_list_id;
3804 * ice_add_rule_internal - add rule for a given lookup type
3805 * @hw: pointer to the hardware structure
3806 * @recp_list: recipe list for which rule has to be added
3807 * @lport: logic port number on which function add rule
3808 * @f_entry: structure containing MAC forwarding information
3810 * Adds or updates the rule lists for a given recipe
3812 static enum ice_status
3813 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3814 u8 lport, struct ice_fltr_list_entry *f_entry)
3816 struct ice_fltr_info *new_fltr, *cur_fltr;
3817 struct ice_fltr_mgmt_list_entry *m_entry;
3818 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3819 enum ice_status status = ICE_SUCCESS;
3821 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3822 return ICE_ERR_PARAM;
3824 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3825 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3826 f_entry->fltr_info.fwd_id.hw_vsi_id =
3827 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3829 rule_lock = &recp_list->filt_rule_lock;
3831 ice_acquire_lock(rule_lock);
3832 new_fltr = &f_entry->fltr_info;
3833 if (new_fltr->flag & ICE_FLTR_RX)
3834 new_fltr->src = lport;
3835 else if (new_fltr->flag & ICE_FLTR_TX)
3837 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3839 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3841 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3842 goto exit_add_rule_internal;
3845 cur_fltr = &m_entry->fltr_info;
3846 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3848 exit_add_rule_internal:
3849 ice_release_lock(rule_lock);
3854 * ice_remove_vsi_list_rule
3855 * @hw: pointer to the hardware structure
3856 * @vsi_list_id: VSI list ID generated as part of allocate resource
3857 * @lkup_type: switch rule filter lookup type
3859 * The VSI list should be emptied before this function is called to remove the
3862 static enum ice_status
3863 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3864 enum ice_sw_lkup_type lkup_type)
3866 /* Free the vsi_list resource that we allocated. It is assumed that the
3867 * list is empty at this point.
3869 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3870 ice_aqc_opc_free_res);
3874 * ice_rem_update_vsi_list
3875 * @hw: pointer to the hardware structure
3876 * @vsi_handle: VSI handle of the VSI to remove
3877 * @fm_list: filter management entry for which the VSI list management needs to
3880 static enum ice_status
3881 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3882 struct ice_fltr_mgmt_list_entry *fm_list)
3884 enum ice_sw_lkup_type lkup_type;
3885 enum ice_status status = ICE_SUCCESS;
3888 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3889 fm_list->vsi_count == 0)
3890 return ICE_ERR_PARAM;
3892 /* A rule with the VSI being removed does not exist */
3893 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3894 return ICE_ERR_DOES_NOT_EXIST;
3896 lkup_type = fm_list->fltr_info.lkup_type;
3897 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3898 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3899 ice_aqc_opc_update_sw_rules,
3904 fm_list->vsi_count--;
3905 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3907 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3908 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3909 struct ice_vsi_list_map_info *vsi_list_info =
3910 fm_list->vsi_list_info;
3913 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3915 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3916 return ICE_ERR_OUT_OF_RANGE;
3918 /* Make sure VSI list is empty before removing it below */
3919 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3921 ice_aqc_opc_update_sw_rules,
3926 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3927 tmp_fltr_info.fwd_id.hw_vsi_id =
3928 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3929 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3930 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3932 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3933 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3937 fm_list->fltr_info = tmp_fltr_info;
3940 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3941 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3942 struct ice_vsi_list_map_info *vsi_list_info =
3943 fm_list->vsi_list_info;
3945 /* Remove the VSI list since it is no longer used */
3946 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3948 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3949 vsi_list_id, status);
3953 LIST_DEL(&vsi_list_info->list_entry);
3954 ice_free(hw, vsi_list_info);
3955 fm_list->vsi_list_info = NULL;
3962 * ice_remove_rule_internal - Remove a filter rule of a given type
3964 * @hw: pointer to the hardware structure
3965 * @recp_list: recipe list for which the rule needs to removed
3966 * @f_entry: rule entry containing filter information
3968 static enum ice_status
3969 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3970 struct ice_fltr_list_entry *f_entry)
3972 struct ice_fltr_mgmt_list_entry *list_elem;
3973 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3974 enum ice_status status = ICE_SUCCESS;
3975 bool remove_rule = false;
3978 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3979 return ICE_ERR_PARAM;
3980 f_entry->fltr_info.fwd_id.hw_vsi_id =
3981 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3983 rule_lock = &recp_list->filt_rule_lock;
3984 ice_acquire_lock(rule_lock);
3985 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3986 &f_entry->fltr_info);
3988 status = ICE_ERR_DOES_NOT_EXIST;
3992 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3994 } else if (!list_elem->vsi_list_info) {
3995 status = ICE_ERR_DOES_NOT_EXIST;
3997 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3998 /* a ref_cnt > 1 indicates that the vsi_list is being
3999 * shared by multiple rules. Decrement the ref_cnt and
4000 * remove this rule, but do not modify the list, as it
4001 * is in-use by other rules.
4003 list_elem->vsi_list_info->ref_cnt--;
4006 /* a ref_cnt of 1 indicates the vsi_list is only used
4007 * by one rule. However, the original removal request is only
4008 * for a single VSI. Update the vsi_list first, and only
4009 * remove the rule if there are no further VSIs in this list.
4011 vsi_handle = f_entry->fltr_info.vsi_handle;
4012 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4015 /* if VSI count goes to zero after updating the VSI list */
4016 if (list_elem->vsi_count == 0)
4021 /* Remove the lookup rule */
4022 struct ice_aqc_sw_rules_elem *s_rule;
4024 s_rule = (struct ice_aqc_sw_rules_elem *)
4025 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4027 status = ICE_ERR_NO_MEMORY;
4031 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4032 ice_aqc_opc_remove_sw_rules);
4034 status = ice_aq_sw_rules(hw, s_rule,
4035 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4036 ice_aqc_opc_remove_sw_rules, NULL);
4038 /* Remove a book keeping from the list */
4039 ice_free(hw, s_rule);
4044 LIST_DEL(&list_elem->list_entry);
4045 ice_free(hw, list_elem);
4048 ice_release_lock(rule_lock);
4053 * ice_aq_get_res_alloc - get allocated resources
4054 * @hw: pointer to the HW struct
4055 * @num_entries: pointer to u16 to store the number of resource entries returned
4056 * @buf: pointer to buffer
4057 * @buf_size: size of buf
4058 * @cd: pointer to command details structure or NULL
4060 * The caller-supplied buffer must be large enough to store the resource
4061 * information for all resource types. Each resource type is an
4062 * ice_aqc_get_res_resp_elem structure.
4065 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4066 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4067 struct ice_sq_cd *cd)
4069 struct ice_aqc_get_res_alloc *resp;
4070 enum ice_status status;
4071 struct ice_aq_desc desc;
4074 return ICE_ERR_BAD_PTR;
4076 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4077 return ICE_ERR_INVAL_SIZE;
4079 resp = &desc.params.get_res;
4081 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4082 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4084 if (!status && num_entries)
4085 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4091 * ice_aq_get_res_descs - get allocated resource descriptors
4092 * @hw: pointer to the hardware structure
4093 * @num_entries: number of resource entries in buffer
4094 * @buf: structure to hold response data buffer
4095 * @buf_size: size of buffer
4096 * @res_type: resource type
4097 * @res_shared: is resource shared
4098 * @desc_id: input - first desc ID to start; output - next desc ID
4099 * @cd: pointer to command details structure or NULL
4102 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4103 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4104 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4106 struct ice_aqc_get_allocd_res_desc *cmd;
4107 struct ice_aq_desc desc;
4108 enum ice_status status;
4110 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4112 cmd = &desc.params.get_res_desc;
4115 return ICE_ERR_PARAM;
4117 if (buf_size != (num_entries * sizeof(*buf)))
4118 return ICE_ERR_PARAM;
4120 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4122 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4123 ICE_AQC_RES_TYPE_M) | (res_shared ?
4124 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4125 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4127 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4129 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4135 * ice_add_mac_rule - Add a MAC address based filter rule
4136 * @hw: pointer to the hardware structure
4137 * @m_list: list of MAC addresses and forwarding information
4138 * @sw: pointer to switch info struct for which function add rule
4139 * @lport: logic port number on which function add rule
4141 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4142 * multiple unicast addresses, the function assumes that all the
4143 * addresses are unique in a given add_mac call. It doesn't
4144 * check for duplicates in this case, removing duplicates from a given
4145 * list should be taken care of in the caller of this function.
4147 static enum ice_status
4148 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4149 struct ice_switch_info *sw, u8 lport)
4151 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4152 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4153 struct ice_fltr_list_entry *m_list_itr;
4154 struct LIST_HEAD_TYPE *rule_head;
4155 u16 total_elem_left, s_rule_size;
4156 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4157 enum ice_status status = ICE_SUCCESS;
4158 u16 num_unicast = 0;
4162 rule_lock = &recp_list->filt_rule_lock;
4163 rule_head = &recp_list->filt_rules;
4165 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4167 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4171 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4172 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4173 if (!ice_is_vsi_valid(hw, vsi_handle))
4174 return ICE_ERR_PARAM;
4175 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4176 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4177 /* update the src in case it is VSI num */
4178 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4179 return ICE_ERR_PARAM;
4180 m_list_itr->fltr_info.src = hw_vsi_id;
4181 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4182 IS_ZERO_ETHER_ADDR(add))
4183 return ICE_ERR_PARAM;
4184 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4185 /* Don't overwrite the unicast address */
4186 ice_acquire_lock(rule_lock);
4187 if (ice_find_rule_entry(rule_head,
4188 &m_list_itr->fltr_info)) {
4189 ice_release_lock(rule_lock);
4190 return ICE_ERR_ALREADY_EXISTS;
4192 ice_release_lock(rule_lock);
4194 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4195 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4196 m_list_itr->status =
4197 ice_add_rule_internal(hw, recp_list, lport,
4199 if (m_list_itr->status)
4200 return m_list_itr->status;
4204 ice_acquire_lock(rule_lock);
4205 /* Exit if no suitable entries were found for adding bulk switch rule */
4207 status = ICE_SUCCESS;
4208 goto ice_add_mac_exit;
4211 /* Allocate switch rule buffer for the bulk update for unicast */
4212 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4213 s_rule = (struct ice_aqc_sw_rules_elem *)
4214 ice_calloc(hw, num_unicast, s_rule_size);
4216 status = ICE_ERR_NO_MEMORY;
4217 goto ice_add_mac_exit;
4221 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4223 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4224 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4226 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4227 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4228 ice_aqc_opc_add_sw_rules);
4229 r_iter = (struct ice_aqc_sw_rules_elem *)
4230 ((u8 *)r_iter + s_rule_size);
4234 /* Call AQ bulk switch rule update for all unicast addresses */
4236 /* Call AQ switch rule in AQ_MAX chunk */
4237 for (total_elem_left = num_unicast; total_elem_left > 0;
4238 total_elem_left -= elem_sent) {
4239 struct ice_aqc_sw_rules_elem *entry = r_iter;
4241 elem_sent = MIN_T(u8, total_elem_left,
4242 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4243 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4244 elem_sent, ice_aqc_opc_add_sw_rules,
4247 goto ice_add_mac_exit;
4248 r_iter = (struct ice_aqc_sw_rules_elem *)
4249 ((u8 *)r_iter + (elem_sent * s_rule_size));
4252 /* Fill up rule ID based on the value returned from FW */
4254 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4256 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4257 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4258 struct ice_fltr_mgmt_list_entry *fm_entry;
4260 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4261 f_info->fltr_rule_id =
4262 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4263 f_info->fltr_act = ICE_FWD_TO_VSI;
4264 /* Create an entry to track this MAC address */
4265 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4266 ice_malloc(hw, sizeof(*fm_entry));
4268 status = ICE_ERR_NO_MEMORY;
4269 goto ice_add_mac_exit;
4271 fm_entry->fltr_info = *f_info;
4272 fm_entry->vsi_count = 1;
4273 /* The book keeping entries will get removed when
4274 * base driver calls remove filter AQ command
4277 LIST_ADD(&fm_entry->list_entry, rule_head);
4278 r_iter = (struct ice_aqc_sw_rules_elem *)
4279 ((u8 *)r_iter + s_rule_size);
4284 ice_release_lock(rule_lock);
4286 ice_free(hw, s_rule);
4291 * ice_add_mac - Add a MAC address based filter rule
4292 * @hw: pointer to the hardware structure
4293 * @m_list: list of MAC addresses and forwarding information
4295 * Function add MAC rule for logical port from HW struct
4297 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4300 return ICE_ERR_PARAM;
4302 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4303 hw->port_info->lport);
4307 * ice_add_vlan_internal - Add one VLAN based filter rule
4308 * @hw: pointer to the hardware structure
4309 * @recp_list: recipe list for which rule has to be added
4310 * @f_entry: filter entry containing one VLAN information
4312 static enum ice_status
4313 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4314 struct ice_fltr_list_entry *f_entry)
4316 struct ice_fltr_mgmt_list_entry *v_list_itr;
4317 struct ice_fltr_info *new_fltr, *cur_fltr;
4318 enum ice_sw_lkup_type lkup_type;
4319 u16 vsi_list_id = 0, vsi_handle;
4320 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4321 enum ice_status status = ICE_SUCCESS;
4323 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4324 return ICE_ERR_PARAM;
4326 f_entry->fltr_info.fwd_id.hw_vsi_id =
4327 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4328 new_fltr = &f_entry->fltr_info;
4330 /* VLAN ID should only be 12 bits */
4331 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4332 return ICE_ERR_PARAM;
4334 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4335 return ICE_ERR_PARAM;
4337 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4338 lkup_type = new_fltr->lkup_type;
4339 vsi_handle = new_fltr->vsi_handle;
4340 rule_lock = &recp_list->filt_rule_lock;
4341 ice_acquire_lock(rule_lock);
4342 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4344 struct ice_vsi_list_map_info *map_info = NULL;
4346 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4347 /* All VLAN pruning rules use a VSI list. Check if
4348 * there is already a VSI list containing VSI that we
4349 * want to add. If found, use the same vsi_list_id for
4350 * this new VLAN rule or else create a new list.
4352 map_info = ice_find_vsi_list_entry(recp_list,
4356 status = ice_create_vsi_list_rule(hw,
4364 /* Convert the action to forwarding to a VSI list. */
4365 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4366 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4369 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4371 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4374 status = ICE_ERR_DOES_NOT_EXIST;
4377 /* reuse VSI list for new rule and increment ref_cnt */
4379 v_list_itr->vsi_list_info = map_info;
4380 map_info->ref_cnt++;
4382 v_list_itr->vsi_list_info =
4383 ice_create_vsi_list_map(hw, &vsi_handle,
4387 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4388 /* Update existing VSI list to add new VSI ID only if it used
4391 cur_fltr = &v_list_itr->fltr_info;
4392 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4395 /* If VLAN rule exists and VSI list being used by this rule is
4396 * referenced by more than 1 VLAN rule. Then create a new VSI
4397 * list appending previous VSI with new VSI and update existing
4398 * VLAN rule to point to new VSI list ID
4400 struct ice_fltr_info tmp_fltr;
4401 u16 vsi_handle_arr[2];
4404 /* Current implementation only supports reusing VSI list with
4405 * one VSI count. We should never hit below condition
4407 if (v_list_itr->vsi_count > 1 &&
4408 v_list_itr->vsi_list_info->ref_cnt > 1) {
4409 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4410 status = ICE_ERR_CFG;
4415 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4418 /* A rule already exists with the new VSI being added */
4419 if (cur_handle == vsi_handle) {
4420 status = ICE_ERR_ALREADY_EXISTS;
4424 vsi_handle_arr[0] = cur_handle;
4425 vsi_handle_arr[1] = vsi_handle;
4426 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4427 &vsi_list_id, lkup_type);
4431 tmp_fltr = v_list_itr->fltr_info;
4432 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4433 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4434 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4435 /* Update the previous switch rule to a new VSI list which
4436 * includes current VSI that is requested
4438 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4442 /* before overriding VSI list map info. decrement ref_cnt of
4445 v_list_itr->vsi_list_info->ref_cnt--;
4447 /* now update to newly created list */
4448 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4449 v_list_itr->vsi_list_info =
4450 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4452 v_list_itr->vsi_count++;
4456 ice_release_lock(rule_lock);
4461 * ice_add_vlan_rule - Add VLAN based filter rule
4462 * @hw: pointer to the hardware structure
4463 * @v_list: list of VLAN entries and forwarding information
4464 * @sw: pointer to switch info struct for which function add rule
4466 static enum ice_status
4467 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4468 struct ice_switch_info *sw)
4470 struct ice_fltr_list_entry *v_list_itr;
4471 struct ice_sw_recipe *recp_list;
4473 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4474 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4476 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4477 return ICE_ERR_PARAM;
4478 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4479 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4481 if (v_list_itr->status)
4482 return v_list_itr->status;
4488 * ice_add_vlan - Add a VLAN based filter rule
4489 * @hw: pointer to the hardware structure
4490 * @v_list: list of VLAN and forwarding information
4492 * Function add VLAN rule for logical port from HW struct
4494 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4497 return ICE_ERR_PARAM;
4499 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4503 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4504 * @hw: pointer to the hardware structure
4505 * @mv_list: list of MAC and VLAN filters
4506 * @sw: pointer to switch info struct for which function add rule
4507 * @lport: logic port number on which function add rule
4509 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4510 * pruning bits enabled, then it is the responsibility of the caller to make
4511 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4512 * VLAN won't be received on that VSI otherwise.
4514 static enum ice_status
4515 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4516 struct ice_switch_info *sw, u8 lport)
4518 struct ice_fltr_list_entry *mv_list_itr;
4519 struct ice_sw_recipe *recp_list;
4521 if (!mv_list || !hw)
4522 return ICE_ERR_PARAM;
4524 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4525 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4527 enum ice_sw_lkup_type l_type =
4528 mv_list_itr->fltr_info.lkup_type;
4530 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4531 return ICE_ERR_PARAM;
4532 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4533 mv_list_itr->status =
4534 ice_add_rule_internal(hw, recp_list, lport,
4536 if (mv_list_itr->status)
4537 return mv_list_itr->status;
4543 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4544 * @hw: pointer to the hardware structure
4545 * @mv_list: list of MAC VLAN addresses and forwarding information
4547 * Function add MAC VLAN rule for logical port from HW struct
4550 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4552 if (!mv_list || !hw)
4553 return ICE_ERR_PARAM;
4555 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4556 hw->port_info->lport);
4560 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4561 * @hw: pointer to the hardware structure
4562 * @em_list: list of ether type MAC filter, MAC is optional
4563 * @sw: pointer to switch info struct for which function add rule
4564 * @lport: logic port number on which function add rule
4566 * This function requires the caller to populate the entries in
4567 * the filter list with the necessary fields (including flags to
4568 * indicate Tx or Rx rules).
4570 static enum ice_status
4571 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4572 struct ice_switch_info *sw, u8 lport)
4574 struct ice_fltr_list_entry *em_list_itr;
4576 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4578 struct ice_sw_recipe *recp_list;
4579 enum ice_sw_lkup_type l_type;
4581 l_type = em_list_itr->fltr_info.lkup_type;
4582 recp_list = &sw->recp_list[l_type];
4584 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4585 l_type != ICE_SW_LKUP_ETHERTYPE)
4586 return ICE_ERR_PARAM;
4588 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4591 if (em_list_itr->status)
4592 return em_list_itr->status;
4598 * ice_add_eth_mac - Add a ethertype based filter rule
4599 * @hw: pointer to the hardware structure
4600 * @em_list: list of ethertype and forwarding information
4602 * Function add ethertype rule for logical port from HW struct
4605 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4607 if (!em_list || !hw)
4608 return ICE_ERR_PARAM;
4610 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4611 hw->port_info->lport);
4615 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4616 * @hw: pointer to the hardware structure
4617 * @em_list: list of ethertype or ethertype MAC entries
4618 * @sw: pointer to switch info struct for which function add rule
4620 static enum ice_status
4621 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4622 struct ice_switch_info *sw)
4624 struct ice_fltr_list_entry *em_list_itr, *tmp;
4626 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4628 struct ice_sw_recipe *recp_list;
4629 enum ice_sw_lkup_type l_type;
4631 l_type = em_list_itr->fltr_info.lkup_type;
4633 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4634 l_type != ICE_SW_LKUP_ETHERTYPE)
4635 return ICE_ERR_PARAM;
4637 recp_list = &sw->recp_list[l_type];
4638 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4640 if (em_list_itr->status)
4641 return em_list_itr->status;
4647 * ice_remove_eth_mac - remove a ethertype based filter rule
4648 * @hw: pointer to the hardware structure
4649 * @em_list: list of ethertype and forwarding information
4653 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4655 if (!em_list || !hw)
4656 return ICE_ERR_PARAM;
4658 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4662 * ice_rem_sw_rule_info
4663 * @hw: pointer to the hardware structure
4664 * @rule_head: pointer to the switch list structure that we want to delete
4667 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4669 if (!LIST_EMPTY(rule_head)) {
4670 struct ice_fltr_mgmt_list_entry *entry;
4671 struct ice_fltr_mgmt_list_entry *tmp;
4673 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4674 ice_fltr_mgmt_list_entry, list_entry) {
4675 LIST_DEL(&entry->list_entry);
4676 ice_free(hw, entry);
4682 * ice_rem_adv_rule_info
4683 * @hw: pointer to the hardware structure
4684 * @rule_head: pointer to the switch list structure that we want to delete
4687 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4689 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4690 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4692 if (LIST_EMPTY(rule_head))
4695 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4696 ice_adv_fltr_mgmt_list_entry, list_entry) {
4697 LIST_DEL(&lst_itr->list_entry);
4698 ice_free(hw, lst_itr->lkups);
4699 ice_free(hw, lst_itr);
4704 * ice_rem_all_sw_rules_info
4705 * @hw: pointer to the hardware structure
4707 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4709 struct ice_switch_info *sw = hw->switch_info;
4712 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4713 struct LIST_HEAD_TYPE *rule_head;
4715 rule_head = &sw->recp_list[i].filt_rules;
4716 if (!sw->recp_list[i].adv_rule)
4717 ice_rem_sw_rule_info(hw, rule_head);
4719 ice_rem_adv_rule_info(hw, rule_head);
4720 if (sw->recp_list[i].adv_rule &&
4721 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4722 sw->recp_list[i].adv_rule = false;
4727 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4728 * @pi: pointer to the port_info structure
4729 * @vsi_handle: VSI handle to set as default
4730 * @set: true to add the above mentioned switch rule, false to remove it
4731 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4733 * add filter rule to set/unset given VSI as default VSI for the switch
4734 * (represented by swid)
4737 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4740 struct ice_aqc_sw_rules_elem *s_rule;
4741 struct ice_fltr_info f_info;
4742 struct ice_hw *hw = pi->hw;
4743 enum ice_adminq_opc opcode;
4744 enum ice_status status;
4748 if (!ice_is_vsi_valid(hw, vsi_handle))
4749 return ICE_ERR_PARAM;
4750 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4752 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4753 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4755 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4757 return ICE_ERR_NO_MEMORY;
4759 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4761 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4762 f_info.flag = direction;
4763 f_info.fltr_act = ICE_FWD_TO_VSI;
4764 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4766 if (f_info.flag & ICE_FLTR_RX) {
4767 f_info.src = pi->lport;
4768 f_info.src_id = ICE_SRC_ID_LPORT;
4770 f_info.fltr_rule_id =
4771 pi->dflt_rx_vsi_rule_id;
4772 } else if (f_info.flag & ICE_FLTR_TX) {
4773 f_info.src_id = ICE_SRC_ID_VSI;
4774 f_info.src = hw_vsi_id;
4776 f_info.fltr_rule_id =
4777 pi->dflt_tx_vsi_rule_id;
4781 opcode = ice_aqc_opc_add_sw_rules;
4783 opcode = ice_aqc_opc_remove_sw_rules;
4785 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4787 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4788 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4791 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4793 if (f_info.flag & ICE_FLTR_TX) {
4794 pi->dflt_tx_vsi_num = hw_vsi_id;
4795 pi->dflt_tx_vsi_rule_id = index;
4796 } else if (f_info.flag & ICE_FLTR_RX) {
4797 pi->dflt_rx_vsi_num = hw_vsi_id;
4798 pi->dflt_rx_vsi_rule_id = index;
4801 if (f_info.flag & ICE_FLTR_TX) {
4802 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4803 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4804 } else if (f_info.flag & ICE_FLTR_RX) {
4805 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4806 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4811 ice_free(hw, s_rule);
4816 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4817 * @list_head: head of rule list
4818 * @f_info: rule information
4820 * Helper function to search for a unicast rule entry - this is to be used
4821 * to remove unicast MAC filter that is not shared with other VSIs on the
4824 * Returns pointer to entry storing the rule if found
4826 static struct ice_fltr_mgmt_list_entry *
4827 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4828 struct ice_fltr_info *f_info)
4830 struct ice_fltr_mgmt_list_entry *list_itr;
4832 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4834 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4835 sizeof(f_info->l_data)) &&
4836 f_info->fwd_id.hw_vsi_id ==
4837 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4838 f_info->flag == list_itr->fltr_info.flag)
4845 * ice_remove_mac_rule - remove a MAC based filter rule
4846 * @hw: pointer to the hardware structure
4847 * @m_list: list of MAC addresses and forwarding information
4848 * @recp_list: list from which function remove MAC address
4850 * This function removes either a MAC filter rule or a specific VSI from a
4851 * VSI list for a multicast MAC address.
4853 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4854 * ice_add_mac. Caller should be aware that this call will only work if all
4855 * the entries passed into m_list were added previously. It will not attempt to
4856 * do a partial remove of entries that were found.
4858 static enum ice_status
4859 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4860 struct ice_sw_recipe *recp_list)
4862 struct ice_fltr_list_entry *list_itr, *tmp;
4863 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4866 return ICE_ERR_PARAM;
4868 rule_lock = &recp_list->filt_rule_lock;
4869 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4871 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4872 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4875 if (l_type != ICE_SW_LKUP_MAC)
4876 return ICE_ERR_PARAM;
4878 vsi_handle = list_itr->fltr_info.vsi_handle;
4879 if (!ice_is_vsi_valid(hw, vsi_handle))
4880 return ICE_ERR_PARAM;
4882 list_itr->fltr_info.fwd_id.hw_vsi_id =
4883 ice_get_hw_vsi_num(hw, vsi_handle);
4884 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4885 /* Don't remove the unicast address that belongs to
4886 * another VSI on the switch, since it is not being
4889 ice_acquire_lock(rule_lock);
4890 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4891 &list_itr->fltr_info)) {
4892 ice_release_lock(rule_lock);
4893 return ICE_ERR_DOES_NOT_EXIST;
4895 ice_release_lock(rule_lock);
4897 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4899 if (list_itr->status)
4900 return list_itr->status;
4906 * ice_remove_mac - remove a MAC address based filter rule
4907 * @hw: pointer to the hardware structure
4908 * @m_list: list of MAC addresses and forwarding information
4911 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4913 struct ice_sw_recipe *recp_list;
4915 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4916 return ice_remove_mac_rule(hw, m_list, recp_list);
4920 * ice_remove_vlan_rule - Remove VLAN based filter rule
4921 * @hw: pointer to the hardware structure
4922 * @v_list: list of VLAN entries and forwarding information
4923 * @recp_list: list from which function remove VLAN
4925 static enum ice_status
4926 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4927 struct ice_sw_recipe *recp_list)
4929 struct ice_fltr_list_entry *v_list_itr, *tmp;
4931 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4933 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4935 if (l_type != ICE_SW_LKUP_VLAN)
4936 return ICE_ERR_PARAM;
4937 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4939 if (v_list_itr->status)
4940 return v_list_itr->status;
4946 * ice_remove_vlan - remove a VLAN address based filter rule
4947 * @hw: pointer to the hardware structure
4948 * @v_list: list of VLAN and forwarding information
4952 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4954 struct ice_sw_recipe *recp_list;
4957 return ICE_ERR_PARAM;
4959 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4960 return ice_remove_vlan_rule(hw, v_list, recp_list);
4964 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4965 * @hw: pointer to the hardware structure
4966 * @v_list: list of MAC VLAN entries and forwarding information
4967 * @recp_list: list from which function remove MAC VLAN
4969 static enum ice_status
4970 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4971 struct ice_sw_recipe *recp_list)
4973 struct ice_fltr_list_entry *v_list_itr, *tmp;
4975 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4976 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4978 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4980 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4981 return ICE_ERR_PARAM;
4982 v_list_itr->status =
4983 ice_remove_rule_internal(hw, recp_list,
4985 if (v_list_itr->status)
4986 return v_list_itr->status;
4992 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4993 * @hw: pointer to the hardware structure
4994 * @mv_list: list of MAC VLAN and forwarding information
4997 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4999 struct ice_sw_recipe *recp_list;
5001 if (!mv_list || !hw)
5002 return ICE_ERR_PARAM;
5004 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5005 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5009 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5010 * @fm_entry: filter entry to inspect
5011 * @vsi_handle: VSI handle to compare with filter info
5014 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5016 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5017 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5018 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5019 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5024 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5025 * @hw: pointer to the hardware structure
5026 * @vsi_handle: VSI handle to remove filters from
5027 * @vsi_list_head: pointer to the list to add entry to
5028 * @fi: pointer to fltr_info of filter entry to copy & add
5030 * Helper function, used when creating a list of filters to remove from
5031 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5032 * original filter entry, with the exception of fltr_info.fltr_act and
5033 * fltr_info.fwd_id fields. These are set such that later logic can
5034 * extract which VSI to remove the fltr from, and pass on that information.
5036 static enum ice_status
5037 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5038 struct LIST_HEAD_TYPE *vsi_list_head,
5039 struct ice_fltr_info *fi)
5041 struct ice_fltr_list_entry *tmp;
5043 /* this memory is freed up in the caller function
5044 * once filters for this VSI are removed
5046 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5048 return ICE_ERR_NO_MEMORY;
5050 tmp->fltr_info = *fi;
5052 /* Overwrite these fields to indicate which VSI to remove filter from,
5053 * so find and remove logic can extract the information from the
5054 * list entries. Note that original entries will still have proper
5057 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5058 tmp->fltr_info.vsi_handle = vsi_handle;
5059 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5061 LIST_ADD(&tmp->list_entry, vsi_list_head);
5067 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5068 * @hw: pointer to the hardware structure
5069 * @vsi_handle: VSI handle to remove filters from
5070 * @lkup_list_head: pointer to the list that has certain lookup type filters
5071 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5073 * Locates all filters in lkup_list_head that are used by the given VSI,
5074 * and adds COPIES of those entries to vsi_list_head (intended to be used
5075 * to remove the listed filters).
5076 * Note that this means all entries in vsi_list_head must be explicitly
5077 * deallocated by the caller when done with list.
5079 static enum ice_status
5080 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5081 struct LIST_HEAD_TYPE *lkup_list_head,
5082 struct LIST_HEAD_TYPE *vsi_list_head)
5084 struct ice_fltr_mgmt_list_entry *fm_entry;
5085 enum ice_status status = ICE_SUCCESS;
5087 /* check to make sure VSI ID is valid and within boundary */
5088 if (!ice_is_vsi_valid(hw, vsi_handle))
5089 return ICE_ERR_PARAM;
5091 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5092 ice_fltr_mgmt_list_entry, list_entry) {
5093 struct ice_fltr_info *fi;
5095 fi = &fm_entry->fltr_info;
5096 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
5099 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5108 * ice_determine_promisc_mask
5109 * @fi: filter info to parse
5111 * Helper function to determine which ICE_PROMISC_ mask corresponds
5112 * to given filter into.
5114 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5116 u16 vid = fi->l_data.mac_vlan.vlan_id;
5117 u8 *macaddr = fi->l_data.mac.mac_addr;
5118 bool is_tx_fltr = false;
5119 u8 promisc_mask = 0;
5121 if (fi->flag == ICE_FLTR_TX)
5124 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5125 promisc_mask |= is_tx_fltr ?
5126 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5127 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5128 promisc_mask |= is_tx_fltr ?
5129 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5130 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5131 promisc_mask |= is_tx_fltr ?
5132 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5134 promisc_mask |= is_tx_fltr ?
5135 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5137 return promisc_mask;
5141 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5142 * @hw: pointer to the hardware structure
5143 * @vsi_handle: VSI handle to retrieve info from
5144 * @promisc_mask: pointer to mask to be filled in
5145 * @vid: VLAN ID of promisc VLAN VSI
5146 * @sw: pointer to switch info struct for which function add rule
5148 static enum ice_status
5149 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5150 u16 *vid, struct ice_switch_info *sw)
5152 struct ice_fltr_mgmt_list_entry *itr;
5153 struct LIST_HEAD_TYPE *rule_head;
5154 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5156 if (!ice_is_vsi_valid(hw, vsi_handle))
5157 return ICE_ERR_PARAM;
5161 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5162 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5164 ice_acquire_lock(rule_lock);
5165 LIST_FOR_EACH_ENTRY(itr, rule_head,
5166 ice_fltr_mgmt_list_entry, list_entry) {
5167 /* Continue if this filter doesn't apply to this VSI or the
5168 * VSI ID is not in the VSI map for this filter
5170 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5173 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5175 ice_release_lock(rule_lock);
5181 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5182 * @hw: pointer to the hardware structure
5183 * @vsi_handle: VSI handle to retrieve info from
5184 * @promisc_mask: pointer to mask to be filled in
5185 * @vid: VLAN ID of promisc VLAN VSI
5188 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5191 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5192 vid, hw->switch_info);
5196 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5197 * @hw: pointer to the hardware structure
5198 * @vsi_handle: VSI handle to retrieve info from
5199 * @promisc_mask: pointer to mask to be filled in
5200 * @vid: VLAN ID of promisc VLAN VSI
5201 * @sw: pointer to switch info struct for which function add rule
5203 static enum ice_status
5204 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5205 u16 *vid, struct ice_switch_info *sw)
5207 struct ice_fltr_mgmt_list_entry *itr;
5208 struct LIST_HEAD_TYPE *rule_head;
5209 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5211 if (!ice_is_vsi_valid(hw, vsi_handle))
5212 return ICE_ERR_PARAM;
5216 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5217 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5219 ice_acquire_lock(rule_lock);
5220 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5222 /* Continue if this filter doesn't apply to this VSI or the
5223 * VSI ID is not in the VSI map for this filter
5225 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5228 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5230 ice_release_lock(rule_lock);
5236 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5237 * @hw: pointer to the hardware structure
5238 * @vsi_handle: VSI handle to retrieve info from
5239 * @promisc_mask: pointer to mask to be filled in
5240 * @vid: VLAN ID of promisc VLAN VSI
5243 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5246 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5247 vid, hw->switch_info);
5251 * ice_remove_promisc - Remove promisc based filter rules
5252 * @hw: pointer to the hardware structure
5253 * @recp_id: recipe ID for which the rule needs to removed
5254 * @v_list: list of promisc entries
5256 static enum ice_status
5257 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5258 struct LIST_HEAD_TYPE *v_list)
5260 struct ice_fltr_list_entry *v_list_itr, *tmp;
5261 struct ice_sw_recipe *recp_list;
5263 recp_list = &hw->switch_info->recp_list[recp_id];
5264 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5266 v_list_itr->status =
5267 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5268 if (v_list_itr->status)
5269 return v_list_itr->status;
5275 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5276 * @hw: pointer to the hardware structure
5277 * @vsi_handle: VSI handle to clear mode
5278 * @promisc_mask: mask of promiscuous config bits to clear
5279 * @vid: VLAN ID to clear VLAN promiscuous
5280 * @sw: pointer to switch info struct for which function add rule
5282 static enum ice_status
5283 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5284 u16 vid, struct ice_switch_info *sw)
5286 struct ice_fltr_list_entry *fm_entry, *tmp;
5287 struct LIST_HEAD_TYPE remove_list_head;
5288 struct ice_fltr_mgmt_list_entry *itr;
5289 struct LIST_HEAD_TYPE *rule_head;
5290 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5291 enum ice_status status = ICE_SUCCESS;
5294 if (!ice_is_vsi_valid(hw, vsi_handle))
5295 return ICE_ERR_PARAM;
5297 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5298 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5300 recipe_id = ICE_SW_LKUP_PROMISC;
5302 rule_head = &sw->recp_list[recipe_id].filt_rules;
5303 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5305 INIT_LIST_HEAD(&remove_list_head);
5307 ice_acquire_lock(rule_lock);
5308 LIST_FOR_EACH_ENTRY(itr, rule_head,
5309 ice_fltr_mgmt_list_entry, list_entry) {
5310 struct ice_fltr_info *fltr_info;
5311 u8 fltr_promisc_mask = 0;
5313 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5315 fltr_info = &itr->fltr_info;
5317 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5318 vid != fltr_info->l_data.mac_vlan.vlan_id)
5321 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5323 /* Skip if filter is not completely specified by given mask */
5324 if (fltr_promisc_mask & ~promisc_mask)
5327 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5331 ice_release_lock(rule_lock);
5332 goto free_fltr_list;
5335 ice_release_lock(rule_lock);
5337 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5340 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5341 ice_fltr_list_entry, list_entry) {
5342 LIST_DEL(&fm_entry->list_entry);
5343 ice_free(hw, fm_entry);
5350 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5351 * @hw: pointer to the hardware structure
5352 * @vsi_handle: VSI handle to clear mode
5353 * @promisc_mask: mask of promiscuous config bits to clear
5354 * @vid: VLAN ID to clear VLAN promiscuous
5357 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5358 u8 promisc_mask, u16 vid)
5360 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5361 vid, hw->switch_info);
5365 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5366 * @hw: pointer to the hardware structure
5367 * @vsi_handle: VSI handle to configure
5368 * @promisc_mask: mask of promiscuous config bits
5369 * @vid: VLAN ID to set VLAN promiscuous
5370 * @lport: logical port number to configure promisc mode
5371 * @sw: pointer to switch info struct for which function add rule
5373 static enum ice_status
5374 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5375 u16 vid, u8 lport, struct ice_switch_info *sw)
5377 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5378 struct ice_fltr_list_entry f_list_entry;
5379 struct ice_fltr_info new_fltr;
5380 enum ice_status status = ICE_SUCCESS;
5386 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5388 if (!ice_is_vsi_valid(hw, vsi_handle))
5389 return ICE_ERR_PARAM;
5390 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5392 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5394 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5395 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5396 new_fltr.l_data.mac_vlan.vlan_id = vid;
5397 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5399 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5400 recipe_id = ICE_SW_LKUP_PROMISC;
5403 /* Separate filters must be set for each direction/packet type
5404 * combination, so we will loop over the mask value, store the
5405 * individual type, and clear it out in the input mask as it
5408 while (promisc_mask) {
5409 struct ice_sw_recipe *recp_list;
5415 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5416 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5417 pkt_type = UCAST_FLTR;
5418 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5419 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5420 pkt_type = UCAST_FLTR;
5422 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5423 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5424 pkt_type = MCAST_FLTR;
5425 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5426 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5427 pkt_type = MCAST_FLTR;
5429 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5430 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5431 pkt_type = BCAST_FLTR;
5432 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5433 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5434 pkt_type = BCAST_FLTR;
5438 /* Check for VLAN promiscuous flag */
5439 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5440 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5441 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5442 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5446 /* Set filter DA based on packet type */
5447 mac_addr = new_fltr.l_data.mac.mac_addr;
5448 if (pkt_type == BCAST_FLTR) {
5449 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5450 } else if (pkt_type == MCAST_FLTR ||
5451 pkt_type == UCAST_FLTR) {
5452 /* Use the dummy ether header DA */
5453 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5454 ICE_NONDMA_TO_NONDMA);
5455 if (pkt_type == MCAST_FLTR)
5456 mac_addr[0] |= 0x1; /* Set multicast bit */
5459 /* Need to reset this to zero for all iterations */
5462 new_fltr.flag |= ICE_FLTR_TX;
5463 new_fltr.src = hw_vsi_id;
5465 new_fltr.flag |= ICE_FLTR_RX;
5466 new_fltr.src = lport;
5469 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5470 new_fltr.vsi_handle = vsi_handle;
5471 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5472 f_list_entry.fltr_info = new_fltr;
5473 recp_list = &sw->recp_list[recipe_id];
5475 status = ice_add_rule_internal(hw, recp_list, lport,
5477 if (status != ICE_SUCCESS)
5478 goto set_promisc_exit;
5486 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5487 * @hw: pointer to the hardware structure
5488 * @vsi_handle: VSI handle to configure
5489 * @promisc_mask: mask of promiscuous config bits
5490 * @vid: VLAN ID to set VLAN promiscuous
5493 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5496 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5497 hw->port_info->lport,
5502 * _ice_set_vlan_vsi_promisc
5503 * @hw: pointer to the hardware structure
5504 * @vsi_handle: VSI handle to configure
5505 * @promisc_mask: mask of promiscuous config bits
5506 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5507 * @lport: logical port number to configure promisc mode
5508 * @sw: pointer to switch info struct for which function add rule
5510 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5512 static enum ice_status
5513 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5514 bool rm_vlan_promisc, u8 lport,
5515 struct ice_switch_info *sw)
5517 struct ice_fltr_list_entry *list_itr, *tmp;
5518 struct LIST_HEAD_TYPE vsi_list_head;
5519 struct LIST_HEAD_TYPE *vlan_head;
5520 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5521 enum ice_status status;
5524 INIT_LIST_HEAD(&vsi_list_head);
5525 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5526 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5527 ice_acquire_lock(vlan_lock);
5528 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5530 ice_release_lock(vlan_lock);
5532 goto free_fltr_list;
5534 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5536 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5537 if (rm_vlan_promisc)
5538 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5542 status = _ice_set_vsi_promisc(hw, vsi_handle,
5543 promisc_mask, vlan_id,
5550 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5551 ice_fltr_list_entry, list_entry) {
5552 LIST_DEL(&list_itr->list_entry);
5553 ice_free(hw, list_itr);
5559 * ice_set_vlan_vsi_promisc
5560 * @hw: pointer to the hardware structure
5561 * @vsi_handle: VSI handle to configure
5562 * @promisc_mask: mask of promiscuous config bits
5563 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5565 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5568 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5569 bool rm_vlan_promisc)
5571 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5572 rm_vlan_promisc, hw->port_info->lport,
5577 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5578 * @hw: pointer to the hardware structure
5579 * @vsi_handle: VSI handle to remove filters from
5580 * @recp_list: recipe list from which function remove fltr
5581 * @lkup: switch rule filter lookup type
5584 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5585 struct ice_sw_recipe *recp_list,
5586 enum ice_sw_lkup_type lkup)
5588 struct ice_fltr_list_entry *fm_entry;
5589 struct LIST_HEAD_TYPE remove_list_head;
5590 struct LIST_HEAD_TYPE *rule_head;
5591 struct ice_fltr_list_entry *tmp;
5592 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5593 enum ice_status status;
5595 INIT_LIST_HEAD(&remove_list_head);
5596 rule_lock = &recp_list[lkup].filt_rule_lock;
5597 rule_head = &recp_list[lkup].filt_rules;
5598 ice_acquire_lock(rule_lock);
5599 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5601 ice_release_lock(rule_lock);
5606 case ICE_SW_LKUP_MAC:
5607 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5609 case ICE_SW_LKUP_VLAN:
5610 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5612 case ICE_SW_LKUP_PROMISC:
5613 case ICE_SW_LKUP_PROMISC_VLAN:
5614 ice_remove_promisc(hw, lkup, &remove_list_head);
5616 case ICE_SW_LKUP_MAC_VLAN:
5617 ice_remove_mac_vlan(hw, &remove_list_head);
5619 case ICE_SW_LKUP_ETHERTYPE:
5620 case ICE_SW_LKUP_ETHERTYPE_MAC:
5621 ice_remove_eth_mac(hw, &remove_list_head);
5623 case ICE_SW_LKUP_DFLT:
5624 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
5626 case ICE_SW_LKUP_LAST:
5627 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5631 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5632 ice_fltr_list_entry, list_entry) {
5633 LIST_DEL(&fm_entry->list_entry);
5634 ice_free(hw, fm_entry);
5639 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5640 * @hw: pointer to the hardware structure
5641 * @vsi_handle: VSI handle to remove filters from
5642 * @sw: pointer to switch info struct
5645 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5646 struct ice_switch_info *sw)
5648 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5650 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5651 sw->recp_list, ICE_SW_LKUP_MAC);
5652 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5653 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5654 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5655 sw->recp_list, ICE_SW_LKUP_PROMISC);
5656 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5657 sw->recp_list, ICE_SW_LKUP_VLAN);
5658 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5659 sw->recp_list, ICE_SW_LKUP_DFLT);
5660 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5661 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5662 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5663 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5664 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5665 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5669 * ice_remove_vsi_fltr - Remove all filters for a VSI
5670 * @hw: pointer to the hardware structure
5671 * @vsi_handle: VSI handle to remove filters from
5673 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5675 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5679 * ice_alloc_res_cntr - allocating resource counter
5680 * @hw: pointer to the hardware structure
5681 * @type: type of resource
5682 * @alloc_shared: if set it is shared else dedicated
5683 * @num_items: number of entries requested for FD resource type
5684 * @counter_id: counter index returned by AQ call
5687 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5690 struct ice_aqc_alloc_free_res_elem *buf;
5691 enum ice_status status;
5694 /* Allocate resource */
5695 buf_len = ice_struct_size(buf, elem, 1);
5696 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5698 return ICE_ERR_NO_MEMORY;
5700 buf->num_elems = CPU_TO_LE16(num_items);
5701 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5702 ICE_AQC_RES_TYPE_M) | alloc_shared);
5704 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5705 ice_aqc_opc_alloc_res, NULL);
5709 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5717 * ice_free_res_cntr - free resource counter
5718 * @hw: pointer to the hardware structure
5719 * @type: type of resource
5720 * @alloc_shared: if set it is shared else dedicated
5721 * @num_items: number of entries to be freed for FD resource type
5722 * @counter_id: counter ID resource which needs to be freed
5725 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5728 struct ice_aqc_alloc_free_res_elem *buf;
5729 enum ice_status status;
5733 buf_len = ice_struct_size(buf, elem, 1);
5734 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5736 return ICE_ERR_NO_MEMORY;
5738 buf->num_elems = CPU_TO_LE16(num_items);
5739 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5740 ICE_AQC_RES_TYPE_M) | alloc_shared);
5741 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5743 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5744 ice_aqc_opc_free_res, NULL);
5746 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
5753 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5754 * @hw: pointer to the hardware structure
5755 * @counter_id: returns counter index
5757 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5759 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5760 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5765 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5766 * @hw: pointer to the hardware structure
5767 * @counter_id: counter index to be freed
5769 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5771 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5772 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5777 * ice_alloc_res_lg_act - add large action resource
5778 * @hw: pointer to the hardware structure
5779 * @l_id: large action ID to fill it in
5780 * @num_acts: number of actions to hold with a large action entry
5782 static enum ice_status
5783 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5785 struct ice_aqc_alloc_free_res_elem *sw_buf;
5786 enum ice_status status;
5789 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5790 return ICE_ERR_PARAM;
5792 /* Allocate resource for large action */
5793 buf_len = ice_struct_size(sw_buf, elem, 1);
5794 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5796 return ICE_ERR_NO_MEMORY;
5798 sw_buf->num_elems = CPU_TO_LE16(1);
5800 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5801 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5802 * If num_acts is greater than 2, then use
5803 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5804 * The num_acts cannot exceed 4. This was ensured at the
5805 * beginning of the function.
5808 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5809 else if (num_acts == 2)
5810 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5812 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5814 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5815 ice_aqc_opc_alloc_res, NULL);
5817 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5819 ice_free(hw, sw_buf);
5824 * ice_add_mac_with_sw_marker - add filter with sw marker
5825 * @hw: pointer to the hardware structure
5826 * @f_info: filter info structure containing the MAC filter information
5827 * @sw_marker: sw marker to tag the Rx descriptor with
5830 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5833 struct ice_fltr_mgmt_list_entry *m_entry;
5834 struct ice_fltr_list_entry fl_info;
5835 struct ice_sw_recipe *recp_list;
5836 struct LIST_HEAD_TYPE l_head;
5837 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5838 enum ice_status ret;
5842 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5843 return ICE_ERR_PARAM;
5845 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5846 return ICE_ERR_PARAM;
5848 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5849 return ICE_ERR_PARAM;
5851 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5852 return ICE_ERR_PARAM;
5853 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5855 /* Add filter if it doesn't exist so then the adding of large
5856 * action always results in update
5859 INIT_LIST_HEAD(&l_head);
5860 fl_info.fltr_info = *f_info;
5861 LIST_ADD(&fl_info.list_entry, &l_head);
5863 entry_exists = false;
5864 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5865 hw->port_info->lport);
5866 if (ret == ICE_ERR_ALREADY_EXISTS)
5867 entry_exists = true;
5871 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5872 rule_lock = &recp_list->filt_rule_lock;
5873 ice_acquire_lock(rule_lock);
5874 /* Get the book keeping entry for the filter */
5875 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5879 /* If counter action was enabled for this rule then don't enable
5880 * sw marker large action
5882 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5883 ret = ICE_ERR_PARAM;
5887 /* if same marker was added before */
5888 if (m_entry->sw_marker_id == sw_marker) {
5889 ret = ICE_ERR_ALREADY_EXISTS;
5893 /* Allocate a hardware table entry to hold large act. Three actions
5894 * for marker based large action
5896 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5900 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5903 /* Update the switch rule to add the marker action */
5904 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5906 ice_release_lock(rule_lock);
5911 ice_release_lock(rule_lock);
5912 /* only remove entry if it did not exist previously */
5914 ret = ice_remove_mac(hw, &l_head);
5920 * ice_add_mac_with_counter - add filter with counter enabled
5921 * @hw: pointer to the hardware structure
5922 * @f_info: pointer to filter info structure containing the MAC filter
5926 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5928 struct ice_fltr_mgmt_list_entry *m_entry;
5929 struct ice_fltr_list_entry fl_info;
5930 struct ice_sw_recipe *recp_list;
5931 struct LIST_HEAD_TYPE l_head;
5932 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5933 enum ice_status ret;
5938 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5939 return ICE_ERR_PARAM;
5941 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5942 return ICE_ERR_PARAM;
5944 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5945 return ICE_ERR_PARAM;
5946 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5947 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5949 entry_exist = false;
5951 rule_lock = &recp_list->filt_rule_lock;
5953 /* Add filter if it doesn't exist so then the adding of large
5954 * action always results in update
5956 INIT_LIST_HEAD(&l_head);
5958 fl_info.fltr_info = *f_info;
5959 LIST_ADD(&fl_info.list_entry, &l_head);
5961 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5962 hw->port_info->lport);
5963 if (ret == ICE_ERR_ALREADY_EXISTS)
5968 ice_acquire_lock(rule_lock);
5969 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5971 ret = ICE_ERR_BAD_PTR;
5975 /* Don't enable counter for a filter for which sw marker was enabled */
5976 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5977 ret = ICE_ERR_PARAM;
5981 /* If a counter was already enabled then don't need to add again */
5982 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5983 ret = ICE_ERR_ALREADY_EXISTS;
5987 /* Allocate a hardware table entry to VLAN counter */
5988 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5992 /* Allocate a hardware table entry to hold large act. Two actions for
5993 * counter based large action
5995 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5999 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6002 /* Update the switch rule to add the counter action */
6003 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6005 ice_release_lock(rule_lock);
6010 ice_release_lock(rule_lock);
6011 /* only remove entry if it did not exist previously */
6013 ret = ice_remove_mac(hw, &l_head);
6018 /* This is mapping table entry that maps every word within a given protocol
6019 * structure to the real byte offset as per the specification of that
6021 * for example dst address is 3 words in ethertype header and corresponding
6022 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6023 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6024 * matching entry describing its field. This needs to be updated if new
6025 * structure is added to that union.
6027 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6028 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6029 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6030 { ICE_ETYPE_OL, { 0 } },
6031 { ICE_VLAN_OFOS, { 0, 2 } },
6032 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6033 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6034 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6035 26, 28, 30, 32, 34, 36, 38 } },
6036 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6037 26, 28, 30, 32, 34, 36, 38 } },
6038 { ICE_TCP_IL, { 0, 2 } },
6039 { ICE_UDP_OF, { 0, 2 } },
6040 { ICE_UDP_ILOS, { 0, 2 } },
6041 { ICE_SCTP_IL, { 0, 2 } },
6042 { ICE_VXLAN, { 8, 10, 12, 14 } },
6043 { ICE_GENEVE, { 8, 10, 12, 14 } },
6044 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6045 { ICE_NVGRE, { 0, 2, 4, 6 } },
6046 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6047 { ICE_PPPOE, { 0, 2, 4, 6 } },
6048 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6049 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6050 { ICE_ESP, { 0, 2, 4, 6 } },
6051 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6052 { ICE_NAT_T, { 8, 10, 12, 14 } },
6053 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6054 { ICE_VLAN_EX, { 0, 2 } },
6057 /* The following table describes preferred grouping of recipes.
6058 * If a recipe that needs to be programmed is a superset or matches one of the
6059 * following combinations, then the recipe needs to be chained as per the
6063 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6064 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6065 { ICE_MAC_IL, ICE_MAC_IL_HW },
6066 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6067 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6068 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6069 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6070 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6071 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6072 { ICE_TCP_IL, ICE_TCP_IL_HW },
6073 { ICE_UDP_OF, ICE_UDP_OF_HW },
6074 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6075 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6076 { ICE_VXLAN, ICE_UDP_OF_HW },
6077 { ICE_GENEVE, ICE_UDP_OF_HW },
6078 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6079 { ICE_NVGRE, ICE_GRE_OF_HW },
6080 { ICE_GTP, ICE_UDP_OF_HW },
6081 { ICE_PPPOE, ICE_PPPOE_HW },
6082 { ICE_PFCP, ICE_UDP_ILOS_HW },
6083 { ICE_L2TPV3, ICE_L2TPV3_HW },
6084 { ICE_ESP, ICE_ESP_HW },
6085 { ICE_AH, ICE_AH_HW },
6086 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6087 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6088 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6092 * ice_find_recp - find a recipe
6093 * @hw: pointer to the hardware structure
6094 * @lkup_exts: extension sequence to match
6096 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6098 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6099 enum ice_sw_tunnel_type tun_type)
6101 bool refresh_required = true;
6102 struct ice_sw_recipe *recp;
6105 /* Walk through existing recipes to find a match */
6106 recp = hw->switch_info->recp_list;
6107 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6108 /* If recipe was not created for this ID, in SW bookkeeping,
6109 * check if FW has an entry for this recipe. If the FW has an
6110 * entry update it in our SW bookkeeping and continue with the
6113 if (!recp[i].recp_created)
6114 if (ice_get_recp_frm_fw(hw,
6115 hw->switch_info->recp_list, i,
6119 /* Skip inverse action recipes */
6120 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6121 ICE_AQ_RECIPE_ACT_INV_ACT)
6124 /* if number of words we are looking for match */
6125 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6126 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6127 struct ice_fv_word *be = lkup_exts->fv_words;
6128 u16 *cr = recp[i].lkup_exts.field_mask;
6129 u16 *de = lkup_exts->field_mask;
6133 /* ar, cr, and qr are related to the recipe words, while
6134 * be, de, and pe are related to the lookup words
6136 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6137 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6139 if (ar[qr].off == be[pe].off &&
6140 ar[qr].prot_id == be[pe].prot_id &&
6142 /* Found the "pe"th word in the
6147 /* After walking through all the words in the
6148 * "i"th recipe if "p"th word was not found then
6149 * this recipe is not what we are looking for.
6150 * So break out from this loop and try the next
6153 if (qr >= recp[i].lkup_exts.n_val_words) {
6158 /* If for "i"th recipe the found was never set to false
6159 * then it means we found our match
6161 if (tun_type == recp[i].tun_type && found)
6162 return i; /* Return the recipe ID */
6165 return ICE_MAX_NUM_RECIPES;
6169 * ice_prot_type_to_id - get protocol ID from protocol type
6170 * @type: protocol type
6171 * @id: pointer to variable that will receive the ID
6173 * Returns true if found, false otherwise
6175 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6179 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6180 if (ice_prot_id_tbl[i].type == type) {
6181 *id = ice_prot_id_tbl[i].protocol_id;
6188 * ice_find_valid_words - count valid words
6189 * @rule: advanced rule with lookup information
6190 * @lkup_exts: byte offset extractions of the words that are valid
6192 * calculate valid words in a lookup rule using mask value
6195 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6196 struct ice_prot_lkup_ext *lkup_exts)
6198 u8 j, word, prot_id, ret_val;
6200 if (!ice_prot_type_to_id(rule->type, &prot_id))
6203 word = lkup_exts->n_val_words;
6205 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6206 if (((u16 *)&rule->m_u)[j] &&
6207 rule->type < ARRAY_SIZE(ice_prot_ext)) {
6208 /* No more space to accommodate */
6209 if (word >= ICE_MAX_CHAIN_WORDS)
6211 lkup_exts->fv_words[word].off =
6212 ice_prot_ext[rule->type].offs[j];
6213 lkup_exts->fv_words[word].prot_id =
6214 ice_prot_id_tbl[rule->type].protocol_id;
6215 lkup_exts->field_mask[word] =
6216 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6220 ret_val = word - lkup_exts->n_val_words;
6221 lkup_exts->n_val_words = word;
6227 * ice_create_first_fit_recp_def - Create a recipe grouping
6228 * @hw: pointer to the hardware structure
6229 * @lkup_exts: an array of protocol header extractions
6230 * @rg_list: pointer to a list that stores new recipe groups
6231 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6233 * Using first fit algorithm, take all the words that are still not done
6234 * and start grouping them in 4-word groups. Each group makes up one
6237 static enum ice_status
6238 ice_create_first_fit_recp_def(struct ice_hw *hw,
6239 struct ice_prot_lkup_ext *lkup_exts,
6240 struct LIST_HEAD_TYPE *rg_list,
6243 struct ice_pref_recipe_group *grp = NULL;
6248 if (!lkup_exts->n_val_words) {
6249 struct ice_recp_grp_entry *entry;
6251 entry = (struct ice_recp_grp_entry *)
6252 ice_malloc(hw, sizeof(*entry));
6254 return ICE_ERR_NO_MEMORY;
6255 LIST_ADD(&entry->l_entry, rg_list);
6256 grp = &entry->r_group;
6258 grp->n_val_pairs = 0;
6261 /* Walk through every word in the rule to check if it is not done. If so
6262 * then this word needs to be part of a new recipe.
6264 for (j = 0; j < lkup_exts->n_val_words; j++)
6265 if (!ice_is_bit_set(lkup_exts->done, j)) {
6267 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6268 struct ice_recp_grp_entry *entry;
6270 entry = (struct ice_recp_grp_entry *)
6271 ice_malloc(hw, sizeof(*entry));
6273 return ICE_ERR_NO_MEMORY;
6274 LIST_ADD(&entry->l_entry, rg_list);
6275 grp = &entry->r_group;
6279 grp->pairs[grp->n_val_pairs].prot_id =
6280 lkup_exts->fv_words[j].prot_id;
6281 grp->pairs[grp->n_val_pairs].off =
6282 lkup_exts->fv_words[j].off;
6283 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6291 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6292 * @hw: pointer to the hardware structure
6293 * @fv_list: field vector with the extraction sequence information
6294 * @rg_list: recipe groupings with protocol-offset pairs
6296 * Helper function to fill in the field vector indices for protocol-offset
6297 * pairs. These indexes are then ultimately programmed into a recipe.
6299 static enum ice_status
6300 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6301 struct LIST_HEAD_TYPE *rg_list)
6303 struct ice_sw_fv_list_entry *fv;
6304 struct ice_recp_grp_entry *rg;
6305 struct ice_fv_word *fv_ext;
6307 if (LIST_EMPTY(fv_list))
6310 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6311 fv_ext = fv->fv_ptr->ew;
6313 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6316 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6317 struct ice_fv_word *pr;
6322 pr = &rg->r_group.pairs[i];
6323 mask = rg->r_group.mask[i];
6325 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6326 if (fv_ext[j].prot_id == pr->prot_id &&
6327 fv_ext[j].off == pr->off) {
6330 /* Store index of field vector */
6332 rg->fv_mask[i] = mask;
6336 /* Protocol/offset could not be found, caller gave an
6340 return ICE_ERR_PARAM;
6348 * ice_find_free_recp_res_idx - find free result indexes for recipe
6349 * @hw: pointer to hardware structure
6350 * @profiles: bitmap of profiles that will be associated with the new recipe
6351 * @free_idx: pointer to variable to receive the free index bitmap
6353 * The algorithm used here is:
6354 * 1. When creating a new recipe, create a set P which contains all
6355 * Profiles that will be associated with our new recipe
6357 * 2. For each Profile p in set P:
6358 * a. Add all recipes associated with Profile p into set R
6359 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6360 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6361 * i. Or just assume they all have the same possible indexes:
6363 * i.e., PossibleIndexes = 0x0000F00000000000
6365 * 3. For each Recipe r in set R:
6366 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6367 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6369 * FreeIndexes will contain the bits indicating the indexes free for use,
6370 * then the code needs to update the recipe[r].used_result_idx_bits to
6371 * indicate which indexes were selected for use by this recipe.
6374 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6375 ice_bitmap_t *free_idx)
6377 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6378 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6379 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6382 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6383 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6384 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6385 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6387 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6389 /* For each profile we are going to associate the recipe with, add the
6390 * recipes that are associated with that profile. This will give us
6391 * the set of recipes that our recipe may collide with. Also, determine
6392 * what possible result indexes are usable given this set of profiles.
6394 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6395 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6396 ICE_MAX_NUM_RECIPES);
6397 ice_and_bitmap(possible_idx, possible_idx,
6398 hw->switch_info->prof_res_bm[bit],
6402 /* For each recipe that our new recipe may collide with, determine
6403 * which indexes have been used.
6405 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6406 ice_or_bitmap(used_idx, used_idx,
6407 hw->switch_info->recp_list[bit].res_idxs,
6410 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6412 /* return number of free indexes */
6413 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6417 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6418 * @hw: pointer to hardware structure
6419 * @rm: recipe management list entry
6420 * @profiles: bitmap of profiles that will be associated.
6422 static enum ice_status
6423 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6424 ice_bitmap_t *profiles)
6426 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6427 struct ice_aqc_recipe_data_elem *tmp;
6428 struct ice_aqc_recipe_data_elem *buf;
6429 struct ice_recp_grp_entry *entry;
6430 enum ice_status status;
6436 /* When more than one recipe are required, another recipe is needed to
6437 * chain them together. Matching a tunnel metadata ID takes up one of
6438 * the match fields in the chaining recipe reducing the number of
6439 * chained recipes by one.
6441 /* check number of free result indices */
6442 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6443 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6445 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6446 free_res_idx, rm->n_grp_count);
6448 if (rm->n_grp_count > 1) {
6449 if (rm->n_grp_count > free_res_idx)
6450 return ICE_ERR_MAX_LIMIT;
6455 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6456 return ICE_ERR_MAX_LIMIT;
6458 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6459 ICE_MAX_NUM_RECIPES,
6462 return ICE_ERR_NO_MEMORY;
6464 buf = (struct ice_aqc_recipe_data_elem *)
6465 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6467 status = ICE_ERR_NO_MEMORY;
6471 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6472 recipe_count = ICE_MAX_NUM_RECIPES;
6473 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6475 if (status || recipe_count == 0)
6478 /* Allocate the recipe resources, and configure them according to the
6479 * match fields from protocol headers and extracted field vectors.
6481 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6482 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6485 status = ice_alloc_recipe(hw, &entry->rid);
6489 /* Clear the result index of the located recipe, as this will be
6490 * updated, if needed, later in the recipe creation process.
6492 tmp[0].content.result_indx = 0;
6494 buf[recps] = tmp[0];
6495 buf[recps].recipe_indx = (u8)entry->rid;
6496 /* if the recipe is a non-root recipe RID should be programmed
6497 * as 0 for the rules to be applied correctly.
6499 buf[recps].content.rid = 0;
6500 ice_memset(&buf[recps].content.lkup_indx, 0,
6501 sizeof(buf[recps].content.lkup_indx),
6504 /* All recipes use look-up index 0 to match switch ID. */
6505 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6506 buf[recps].content.mask[0] =
6507 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6508 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6511 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6512 buf[recps].content.lkup_indx[i] = 0x80;
6513 buf[recps].content.mask[i] = 0;
6516 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6517 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6518 buf[recps].content.mask[i + 1] =
6519 CPU_TO_LE16(entry->fv_mask[i]);
6522 if (rm->n_grp_count > 1) {
6523 /* Checks to see if there really is a valid result index
6526 if (chain_idx >= ICE_MAX_FV_WORDS) {
6527 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6528 status = ICE_ERR_MAX_LIMIT;
6532 entry->chain_idx = chain_idx;
6533 buf[recps].content.result_indx =
6534 ICE_AQ_RECIPE_RESULT_EN |
6535 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6536 ICE_AQ_RECIPE_RESULT_DATA_M);
6537 ice_clear_bit(chain_idx, result_idx_bm);
6538 chain_idx = ice_find_first_bit(result_idx_bm,
6542 /* fill recipe dependencies */
6543 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6544 ICE_MAX_NUM_RECIPES);
6545 ice_set_bit(buf[recps].recipe_indx,
6546 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6547 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6551 if (rm->n_grp_count == 1) {
6552 rm->root_rid = buf[0].recipe_indx;
6553 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6554 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6555 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6556 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6557 sizeof(buf[0].recipe_bitmap),
6558 ICE_NONDMA_TO_NONDMA);
6560 status = ICE_ERR_BAD_PTR;
6563 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6564 * the recipe which is getting created if specified
6565 * by user. Usually any advanced switch filter, which results
6566 * into new extraction sequence, ended up creating a new recipe
6567 * of type ROOT and usually recipes are associated with profiles
6568 * Switch rule referreing newly created recipe, needs to have
6569 * either/or 'fwd' or 'join' priority, otherwise switch rule
6570 * evaluation will not happen correctly. In other words, if
6571 * switch rule to be evaluated on priority basis, then recipe
6572 * needs to have priority, otherwise it will be evaluated last.
6574 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6576 struct ice_recp_grp_entry *last_chain_entry;
6579 /* Allocate the last recipe that will chain the outcomes of the
6580 * other recipes together
6582 status = ice_alloc_recipe(hw, &rid);
6586 buf[recps].recipe_indx = (u8)rid;
6587 buf[recps].content.rid = (u8)rid;
6588 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6589 /* the new entry created should also be part of rg_list to
6590 * make sure we have complete recipe
6592 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6593 sizeof(*last_chain_entry));
6594 if (!last_chain_entry) {
6595 status = ICE_ERR_NO_MEMORY;
6598 last_chain_entry->rid = rid;
6599 ice_memset(&buf[recps].content.lkup_indx, 0,
6600 sizeof(buf[recps].content.lkup_indx),
6602 /* All recipes use look-up index 0 to match switch ID. */
6603 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6604 buf[recps].content.mask[0] =
6605 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6606 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6607 buf[recps].content.lkup_indx[i] =
6608 ICE_AQ_RECIPE_LKUP_IGNORE;
6609 buf[recps].content.mask[i] = 0;
6613 /* update r_bitmap with the recp that is used for chaining */
6614 ice_set_bit(rid, rm->r_bitmap);
6615 /* this is the recipe that chains all the other recipes so it
6616 * should not have a chaining ID to indicate the same
6618 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6619 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6621 last_chain_entry->fv_idx[i] = entry->chain_idx;
6622 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6623 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6624 ice_set_bit(entry->rid, rm->r_bitmap);
6626 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6627 if (sizeof(buf[recps].recipe_bitmap) >=
6628 sizeof(rm->r_bitmap)) {
6629 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6630 sizeof(buf[recps].recipe_bitmap),
6631 ICE_NONDMA_TO_NONDMA);
6633 status = ICE_ERR_BAD_PTR;
6636 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6639 rm->root_rid = (u8)rid;
6641 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6645 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6646 ice_release_change_lock(hw);
6650 /* Every recipe that just got created add it to the recipe
6653 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6654 struct ice_switch_info *sw = hw->switch_info;
6655 bool is_root, idx_found = false;
6656 struct ice_sw_recipe *recp;
6657 u16 idx, buf_idx = 0;
6659 /* find buffer index for copying some data */
6660 for (idx = 0; idx < rm->n_grp_count; idx++)
6661 if (buf[idx].recipe_indx == entry->rid) {
6667 status = ICE_ERR_OUT_OF_RANGE;
6671 recp = &sw->recp_list[entry->rid];
6672 is_root = (rm->root_rid == entry->rid);
6673 recp->is_root = is_root;
6675 recp->root_rid = entry->rid;
6676 recp->big_recp = (is_root && rm->n_grp_count > 1);
6678 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6679 entry->r_group.n_val_pairs *
6680 sizeof(struct ice_fv_word),
6681 ICE_NONDMA_TO_NONDMA);
6683 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6684 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6686 /* Copy non-result fv index values and masks to recipe. This
6687 * call will also update the result recipe bitmask.
6689 ice_collect_result_idx(&buf[buf_idx], recp);
6691 /* for non-root recipes, also copy to the root, this allows
6692 * easier matching of a complete chained recipe
6695 ice_collect_result_idx(&buf[buf_idx],
6696 &sw->recp_list[rm->root_rid]);
6698 recp->n_ext_words = entry->r_group.n_val_pairs;
6699 recp->chain_idx = entry->chain_idx;
6700 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6701 recp->n_grp_count = rm->n_grp_count;
6702 recp->tun_type = rm->tun_type;
6703 recp->recp_created = true;
6717 * ice_create_recipe_group - creates recipe group
6718 * @hw: pointer to hardware structure
6719 * @rm: recipe management list entry
6720 * @lkup_exts: lookup elements
6722 static enum ice_status
6723 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6724 struct ice_prot_lkup_ext *lkup_exts)
6726 enum ice_status status;
6729 rm->n_grp_count = 0;
6731 /* Create recipes for words that are marked not done by packing them
6734 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6735 &rm->rg_list, &recp_count);
6737 rm->n_grp_count += recp_count;
6738 rm->n_ext_words = lkup_exts->n_val_words;
6739 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6740 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6741 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6742 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6749 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6750 * @hw: pointer to hardware structure
6751 * @lkups: lookup elements or match criteria for the advanced recipe, one
6752 * structure per protocol header
6753 * @lkups_cnt: number of protocols
6754 * @bm: bitmap of field vectors to consider
6755 * @fv_list: pointer to a list that holds the returned field vectors
6757 static enum ice_status
6758 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6759 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6761 enum ice_status status;
6768 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6770 return ICE_ERR_NO_MEMORY;
6772 for (i = 0; i < lkups_cnt; i++)
6773 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6774 status = ICE_ERR_CFG;
6778 /* Find field vectors that include all specified protocol types */
6779 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6782 ice_free(hw, prot_ids);
6787 * ice_tun_type_match_mask - determine if tun type needs a match mask
6788 * @tun_type: tunnel type
6789 * @mask: mask to be used for the tunnel
6791 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6794 case ICE_SW_TUN_VXLAN_GPE:
6795 case ICE_SW_TUN_GENEVE:
6796 case ICE_SW_TUN_VXLAN:
6797 case ICE_SW_TUN_NVGRE:
6798 case ICE_SW_TUN_UDP:
6799 case ICE_ALL_TUNNELS:
6800 case ICE_SW_TUN_AND_NON_TUN_QINQ:
6801 case ICE_NON_TUN_QINQ:
6802 case ICE_SW_TUN_PPPOE_QINQ:
6803 case ICE_SW_TUN_PPPOE_PAY_QINQ:
6804 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6805 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6806 *mask = ICE_TUN_FLAG_MASK;
6809 case ICE_SW_TUN_GENEVE_VLAN:
6810 case ICE_SW_TUN_VXLAN_VLAN:
6811 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6821 * ice_add_special_words - Add words that are not protocols, such as metadata
6822 * @rinfo: other information regarding the rule e.g. priority and action info
6823 * @lkup_exts: lookup word structure
6825 static enum ice_status
6826 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6827 struct ice_prot_lkup_ext *lkup_exts)
6831 /* If this is a tunneled packet, then add recipe index to match the
6832 * tunnel bit in the packet metadata flags.
6834 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6835 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6836 u8 word = lkup_exts->n_val_words++;
6838 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6839 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6840 lkup_exts->field_mask[word] = mask;
6842 return ICE_ERR_MAX_LIMIT;
6849 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6850 * @hw: pointer to hardware structure
6851 * @rinfo: other information regarding the rule e.g. priority and action info
6852 * @bm: pointer to memory for returning the bitmap of field vectors
6855 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6858 enum ice_prof_type prof_type;
6860 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6862 switch (rinfo->tun_type) {
6864 case ICE_NON_TUN_QINQ:
6865 prof_type = ICE_PROF_NON_TUN;
6867 case ICE_ALL_TUNNELS:
6868 prof_type = ICE_PROF_TUN_ALL;
6870 case ICE_SW_TUN_VXLAN_GPE:
6871 case ICE_SW_TUN_GENEVE:
6872 case ICE_SW_TUN_GENEVE_VLAN:
6873 case ICE_SW_TUN_VXLAN:
6874 case ICE_SW_TUN_VXLAN_VLAN:
6875 case ICE_SW_TUN_UDP:
6876 case ICE_SW_TUN_GTP:
6877 prof_type = ICE_PROF_TUN_UDP;
6879 case ICE_SW_TUN_NVGRE:
6880 prof_type = ICE_PROF_TUN_GRE;
6882 case ICE_SW_TUN_PPPOE:
6883 case ICE_SW_TUN_PPPOE_QINQ:
6884 prof_type = ICE_PROF_TUN_PPPOE;
6886 case ICE_SW_TUN_PPPOE_PAY:
6887 case ICE_SW_TUN_PPPOE_PAY_QINQ:
6888 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6890 case ICE_SW_TUN_PPPOE_IPV4:
6891 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6892 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6893 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6894 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6896 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6897 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6899 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6900 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6902 case ICE_SW_TUN_PPPOE_IPV6:
6903 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6904 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6905 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6906 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6908 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6909 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6911 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6912 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6914 case ICE_SW_TUN_PROFID_IPV6_ESP:
6915 case ICE_SW_TUN_IPV6_ESP:
6916 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6918 case ICE_SW_TUN_PROFID_IPV6_AH:
6919 case ICE_SW_TUN_IPV6_AH:
6920 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6922 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6923 case ICE_SW_TUN_IPV6_L2TPV3:
6924 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6926 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6927 case ICE_SW_TUN_IPV6_NAT_T:
6928 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6930 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6931 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6933 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6934 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6936 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6937 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6939 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6940 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6942 case ICE_SW_TUN_IPV4_NAT_T:
6943 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6945 case ICE_SW_TUN_IPV4_L2TPV3:
6946 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6948 case ICE_SW_TUN_IPV4_ESP:
6949 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6951 case ICE_SW_TUN_IPV4_AH:
6952 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6954 case ICE_SW_IPV4_TCP:
6955 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6957 case ICE_SW_IPV4_UDP:
6958 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6960 case ICE_SW_IPV6_TCP:
6961 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6963 case ICE_SW_IPV6_UDP:
6964 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6966 case ICE_SW_TUN_IPV4_GTPU_IPV4:
6967 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6968 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6969 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6970 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6971 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6972 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6974 case ICE_SW_TUN_IPV6_GTPU_IPV4:
6975 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6976 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6977 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6978 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6979 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6980 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6982 case ICE_SW_TUN_IPV4_GTPU_IPV6:
6983 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6984 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6985 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6986 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6987 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6988 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6990 case ICE_SW_TUN_IPV6_GTPU_IPV6:
6991 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6992 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6993 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6994 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6995 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6996 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6998 case ICE_SW_TUN_AND_NON_TUN:
6999 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7001 prof_type = ICE_PROF_ALL;
7005 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7009 * ice_is_prof_rule - determine if rule type is a profile rule
7010 * @type: the rule type
7012 * if the rule type is a profile rule, that means that there no field value
7013 * match required, in this case just a profile hit is required.
7015 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7018 case ICE_SW_TUN_PROFID_IPV6_ESP:
7019 case ICE_SW_TUN_PROFID_IPV6_AH:
7020 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7021 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7022 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7023 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7024 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7025 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7035 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7036 * @hw: pointer to hardware structure
7037 * @lkups: lookup elements or match criteria for the advanced recipe, one
7038 * structure per protocol header
7039 * @lkups_cnt: number of protocols
7040 * @rinfo: other information regarding the rule e.g. priority and action info
7041 * @rid: return the recipe ID of the recipe created
7043 static enum ice_status
7044 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7045 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7047 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7048 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7049 struct ice_prot_lkup_ext *lkup_exts;
7050 struct ice_recp_grp_entry *r_entry;
7051 struct ice_sw_fv_list_entry *fvit;
7052 struct ice_recp_grp_entry *r_tmp;
7053 struct ice_sw_fv_list_entry *tmp;
7054 enum ice_status status = ICE_SUCCESS;
7055 struct ice_sw_recipe *rm;
7058 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7059 return ICE_ERR_PARAM;
7061 lkup_exts = (struct ice_prot_lkup_ext *)
7062 ice_malloc(hw, sizeof(*lkup_exts));
7064 return ICE_ERR_NO_MEMORY;
7066 /* Determine the number of words to be matched and if it exceeds a
7067 * recipe's restrictions
7069 for (i = 0; i < lkups_cnt; i++) {
7072 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7073 status = ICE_ERR_CFG;
7074 goto err_free_lkup_exts;
7077 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7079 status = ICE_ERR_CFG;
7080 goto err_free_lkup_exts;
7084 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7086 status = ICE_ERR_NO_MEMORY;
7087 goto err_free_lkup_exts;
7090 /* Get field vectors that contain fields extracted from all the protocol
7091 * headers being programmed.
7093 INIT_LIST_HEAD(&rm->fv_list);
7094 INIT_LIST_HEAD(&rm->rg_list);
7096 /* Get bitmap of field vectors (profiles) that are compatible with the
7097 * rule request; only these will be searched in the subsequent call to
7100 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7102 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7106 /* Create any special protocol/offset pairs, such as looking at tunnel
7107 * bits by extracting metadata
7109 status = ice_add_special_words(rinfo, lkup_exts);
7111 goto err_free_lkup_exts;
7113 /* Group match words into recipes using preferred recipe grouping
7116 status = ice_create_recipe_group(hw, rm, lkup_exts);
7120 /* set the recipe priority if specified */
7121 rm->priority = (u8)rinfo->priority;
7123 /* Find offsets from the field vector. Pick the first one for all the
7126 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7130 /* An empty FV list means to use all the profiles returned in the
7133 if (LIST_EMPTY(&rm->fv_list)) {
7136 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7137 struct ice_sw_fv_list_entry *fvl;
7139 fvl = (struct ice_sw_fv_list_entry *)
7140 ice_malloc(hw, sizeof(*fvl));
7144 fvl->profile_id = j;
7145 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7149 /* get bitmap of all profiles the recipe will be associated with */
7150 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7151 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7153 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7154 ice_set_bit((u16)fvit->profile_id, profiles);
7157 /* Look for a recipe which matches our requested fv / mask list */
7158 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7159 if (*rid < ICE_MAX_NUM_RECIPES)
7160 /* Success if found a recipe that match the existing criteria */
7163 rm->tun_type = rinfo->tun_type;
7164 /* Recipe we need does not exist, add a recipe */
7165 status = ice_add_sw_recipe(hw, rm, profiles);
7169 /* Associate all the recipes created with all the profiles in the
7170 * common field vector.
7172 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7174 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7177 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7178 (u8 *)r_bitmap, NULL);
7182 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7183 ICE_MAX_NUM_RECIPES);
7184 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7188 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7191 ice_release_change_lock(hw);
7196 /* Update profile to recipe bitmap array */
7197 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7198 ICE_MAX_NUM_RECIPES);
7200 /* Update recipe to profile bitmap array */
7201 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7202 ice_set_bit((u16)fvit->profile_id,
7203 recipe_to_profile[j]);
7206 *rid = rm->root_rid;
7207 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7208 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7210 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7211 ice_recp_grp_entry, l_entry) {
7212 LIST_DEL(&r_entry->l_entry);
7213 ice_free(hw, r_entry);
7216 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7218 LIST_DEL(&fvit->list_entry);
7223 ice_free(hw, rm->root_buf);
7228 ice_free(hw, lkup_exts);
7234 * ice_find_dummy_packet - find dummy packet by tunnel type
7236 * @lkups: lookup elements or match criteria for the advanced recipe, one
7237 * structure per protocol header
7238 * @lkups_cnt: number of protocols
7239 * @tun_type: tunnel type from the match criteria
7240 * @pkt: dummy packet to fill according to filter match criteria
7241 * @pkt_len: packet length of dummy packet
7242 * @offsets: pointer to receive the pointer to the offsets for the packet
7245 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7246 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7248 const struct ice_dummy_pkt_offsets **offsets)
7250 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7254 for (i = 0; i < lkups_cnt; i++) {
7255 if (lkups[i].type == ICE_UDP_ILOS)
7257 else if (lkups[i].type == ICE_TCP_IL)
7259 else if (lkups[i].type == ICE_IPV6_OFOS)
7261 else if (lkups[i].type == ICE_VLAN_OFOS)
7263 else if (lkups[i].type == ICE_IPV4_OFOS &&
7264 lkups[i].h_u.ipv4_hdr.protocol ==
7265 ICE_IPV4_NVGRE_PROTO_ID &&
7266 lkups[i].m_u.ipv4_hdr.protocol ==
7269 else if (lkups[i].type == ICE_PPPOE &&
7270 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7271 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7272 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7275 else if (lkups[i].type == ICE_ETYPE_OL &&
7276 lkups[i].h_u.ethertype.ethtype_id ==
7277 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7278 lkups[i].m_u.ethertype.ethtype_id ==
7281 else if (lkups[i].type == ICE_IPV4_IL &&
7282 lkups[i].h_u.ipv4_hdr.protocol ==
7284 lkups[i].m_u.ipv4_hdr.protocol ==
7289 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7290 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7291 *pkt = dummy_qinq_ipv6_pkt;
7292 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7293 *offsets = dummy_qinq_ipv6_packet_offsets;
7295 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7296 tun_type == ICE_NON_TUN_QINQ) {
7297 *pkt = dummy_qinq_ipv4_pkt;
7298 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7299 *offsets = dummy_qinq_ipv4_packet_offsets;
7303 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7304 *pkt = dummy_qinq_pppoe_ipv6_packet;
7305 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7306 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7308 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7309 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7310 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7311 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7313 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7314 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7315 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7316 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7317 *offsets = dummy_qinq_pppoe_packet_offsets;
7321 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7322 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7323 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7324 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7326 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7327 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7328 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7329 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7331 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7332 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7333 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7334 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7336 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7337 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7338 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7339 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7341 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7342 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7343 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7344 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7346 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7347 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7348 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7349 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7353 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7354 *pkt = dummy_ipv4_esp_pkt;
7355 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7356 *offsets = dummy_ipv4_esp_packet_offsets;
7360 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7361 *pkt = dummy_ipv6_esp_pkt;
7362 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7363 *offsets = dummy_ipv6_esp_packet_offsets;
7367 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7368 *pkt = dummy_ipv4_ah_pkt;
7369 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7370 *offsets = dummy_ipv4_ah_packet_offsets;
7374 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7375 *pkt = dummy_ipv6_ah_pkt;
7376 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7377 *offsets = dummy_ipv6_ah_packet_offsets;
7381 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7382 *pkt = dummy_ipv4_nat_pkt;
7383 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7384 *offsets = dummy_ipv4_nat_packet_offsets;
7388 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7389 *pkt = dummy_ipv6_nat_pkt;
7390 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7391 *offsets = dummy_ipv6_nat_packet_offsets;
7395 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7396 *pkt = dummy_ipv4_l2tpv3_pkt;
7397 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7398 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7402 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7403 *pkt = dummy_ipv6_l2tpv3_pkt;
7404 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7405 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7409 if (tun_type == ICE_SW_TUN_GTP) {
7410 *pkt = dummy_udp_gtp_packet;
7411 *pkt_len = sizeof(dummy_udp_gtp_packet);
7412 *offsets = dummy_udp_gtp_packet_offsets;
7416 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7417 *pkt = dummy_pppoe_ipv6_packet;
7418 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7419 *offsets = dummy_pppoe_packet_offsets;
7421 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7422 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7423 *pkt = dummy_pppoe_ipv4_packet;
7424 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7425 *offsets = dummy_pppoe_packet_offsets;
7429 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7430 *pkt = dummy_pppoe_ipv4_packet;
7431 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7432 *offsets = dummy_pppoe_packet_ipv4_offsets;
7436 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7437 *pkt = dummy_pppoe_ipv4_tcp_packet;
7438 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7439 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7443 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7444 *pkt = dummy_pppoe_ipv4_udp_packet;
7445 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7446 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7450 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7451 *pkt = dummy_pppoe_ipv6_packet;
7452 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7453 *offsets = dummy_pppoe_packet_ipv6_offsets;
7457 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7458 *pkt = dummy_pppoe_ipv6_tcp_packet;
7459 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7460 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7464 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7465 *pkt = dummy_pppoe_ipv6_udp_packet;
7466 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7467 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7471 if (tun_type == ICE_SW_IPV4_TCP) {
7472 *pkt = dummy_tcp_packet;
7473 *pkt_len = sizeof(dummy_tcp_packet);
7474 *offsets = dummy_tcp_packet_offsets;
7478 if (tun_type == ICE_SW_IPV4_UDP) {
7479 *pkt = dummy_udp_packet;
7480 *pkt_len = sizeof(dummy_udp_packet);
7481 *offsets = dummy_udp_packet_offsets;
7485 if (tun_type == ICE_SW_IPV6_TCP) {
7486 *pkt = dummy_tcp_ipv6_packet;
7487 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7488 *offsets = dummy_tcp_ipv6_packet_offsets;
7492 if (tun_type == ICE_SW_IPV6_UDP) {
7493 *pkt = dummy_udp_ipv6_packet;
7494 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7495 *offsets = dummy_udp_ipv6_packet_offsets;
7499 if (tun_type == ICE_ALL_TUNNELS) {
7500 *pkt = dummy_gre_udp_packet;
7501 *pkt_len = sizeof(dummy_gre_udp_packet);
7502 *offsets = dummy_gre_udp_packet_offsets;
7506 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7508 *pkt = dummy_gre_tcp_packet;
7509 *pkt_len = sizeof(dummy_gre_tcp_packet);
7510 *offsets = dummy_gre_tcp_packet_offsets;
7514 *pkt = dummy_gre_udp_packet;
7515 *pkt_len = sizeof(dummy_gre_udp_packet);
7516 *offsets = dummy_gre_udp_packet_offsets;
7520 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7521 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7522 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7523 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7525 *pkt = dummy_udp_tun_tcp_packet;
7526 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7527 *offsets = dummy_udp_tun_tcp_packet_offsets;
7531 *pkt = dummy_udp_tun_udp_packet;
7532 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7533 *offsets = dummy_udp_tun_udp_packet_offsets;
7539 *pkt = dummy_vlan_udp_packet;
7540 *pkt_len = sizeof(dummy_vlan_udp_packet);
7541 *offsets = dummy_vlan_udp_packet_offsets;
7544 *pkt = dummy_udp_packet;
7545 *pkt_len = sizeof(dummy_udp_packet);
7546 *offsets = dummy_udp_packet_offsets;
7548 } else if (udp && ipv6) {
7550 *pkt = dummy_vlan_udp_ipv6_packet;
7551 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7552 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7555 *pkt = dummy_udp_ipv6_packet;
7556 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7557 *offsets = dummy_udp_ipv6_packet_offsets;
7559 } else if ((tcp && ipv6) || ipv6) {
7561 *pkt = dummy_vlan_tcp_ipv6_packet;
7562 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7563 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7566 *pkt = dummy_tcp_ipv6_packet;
7567 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7568 *offsets = dummy_tcp_ipv6_packet_offsets;
7573 *pkt = dummy_vlan_tcp_packet;
7574 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7575 *offsets = dummy_vlan_tcp_packet_offsets;
7577 *pkt = dummy_tcp_packet;
7578 *pkt_len = sizeof(dummy_tcp_packet);
7579 *offsets = dummy_tcp_packet_offsets;
7584 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7586 * @lkups: lookup elements or match criteria for the advanced recipe, one
7587 * structure per protocol header
7588 * @lkups_cnt: number of protocols
7589 * @s_rule: stores rule information from the match criteria
7590 * @dummy_pkt: dummy packet to fill according to filter match criteria
7591 * @pkt_len: packet length of dummy packet
7592 * @offsets: offset info for the dummy packet
7594 static enum ice_status
7595 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7596 struct ice_aqc_sw_rules_elem *s_rule,
7597 const u8 *dummy_pkt, u16 pkt_len,
7598 const struct ice_dummy_pkt_offsets *offsets)
7603 /* Start with a packet with a pre-defined/dummy content. Then, fill
7604 * in the header values to be looked up or matched.
7606 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7608 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7610 for (i = 0; i < lkups_cnt; i++) {
7611 enum ice_protocol_type type;
7612 u16 offset = 0, len = 0, j;
7615 /* find the start of this layer; it should be found since this
7616 * was already checked when search for the dummy packet
7618 type = lkups[i].type;
7619 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7620 if (type == offsets[j].type) {
7621 offset = offsets[j].offset;
7626 /* this should never happen in a correct calling sequence */
7628 return ICE_ERR_PARAM;
7630 switch (lkups[i].type) {
7633 len = sizeof(struct ice_ether_hdr);
7636 len = sizeof(struct ice_ethtype_hdr);
7640 len = sizeof(struct ice_vlan_hdr);
7644 len = sizeof(struct ice_ipv4_hdr);
7648 len = sizeof(struct ice_ipv6_hdr);
7653 len = sizeof(struct ice_l4_hdr);
7656 len = sizeof(struct ice_sctp_hdr);
7659 len = sizeof(struct ice_nvgre);
7664 len = sizeof(struct ice_udp_tnl_hdr);
7668 case ICE_GTP_NO_PAY:
7669 len = sizeof(struct ice_udp_gtp_hdr);
7672 len = sizeof(struct ice_pppoe_hdr);
7675 len = sizeof(struct ice_esp_hdr);
7678 len = sizeof(struct ice_nat_t_hdr);
7681 len = sizeof(struct ice_ah_hdr);
7684 len = sizeof(struct ice_l2tpv3_sess_hdr);
7687 return ICE_ERR_PARAM;
7690 /* the length should be a word multiple */
7691 if (len % ICE_BYTES_PER_WORD)
7694 /* We have the offset to the header start, the length, the
7695 * caller's header values and mask. Use this information to
7696 * copy the data into the dummy packet appropriately based on
7697 * the mask. Note that we need to only write the bits as
7698 * indicated by the mask to make sure we don't improperly write
7699 * over any significant packet data.
7701 for (j = 0; j < len / sizeof(u16); j++)
7702 if (((u16 *)&lkups[i].m_u)[j])
7703 ((u16 *)(pkt + offset))[j] =
7704 (((u16 *)(pkt + offset))[j] &
7705 ~((u16 *)&lkups[i].m_u)[j]) |
7706 (((u16 *)&lkups[i].h_u)[j] &
7707 ((u16 *)&lkups[i].m_u)[j]);
7710 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7716 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7717 * @hw: pointer to the hardware structure
7718 * @tun_type: tunnel type
7719 * @pkt: dummy packet to fill in
7720 * @offsets: offset info for the dummy packet
7722 static enum ice_status
7723 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7724 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7729 case ICE_SW_TUN_AND_NON_TUN:
7730 case ICE_SW_TUN_VXLAN_GPE:
7731 case ICE_SW_TUN_VXLAN:
7732 case ICE_SW_TUN_VXLAN_VLAN:
7733 case ICE_SW_TUN_UDP:
7734 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7738 case ICE_SW_TUN_GENEVE:
7739 case ICE_SW_TUN_GENEVE_VLAN:
7740 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7745 /* Nothing needs to be done for this tunnel type */
7749 /* Find the outer UDP protocol header and insert the port number */
7750 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7751 if (offsets[i].type == ICE_UDP_OF) {
7752 struct ice_l4_hdr *hdr;
7755 offset = offsets[i].offset;
7756 hdr = (struct ice_l4_hdr *)&pkt[offset];
7757 hdr->dst_port = CPU_TO_BE16(open_port);
7767 * ice_find_adv_rule_entry - Search a rule entry
7768 * @hw: pointer to the hardware structure
7769 * @lkups: lookup elements or match criteria for the advanced recipe, one
7770 * structure per protocol header
7771 * @lkups_cnt: number of protocols
7772 * @recp_id: recipe ID for which we are finding the rule
7773 * @rinfo: other information regarding the rule e.g. priority and action info
7775 * Helper function to search for a given advance rule entry
7776 * Returns pointer to entry storing the rule if found
7778 static struct ice_adv_fltr_mgmt_list_entry *
7779 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7780 u16 lkups_cnt, u16 recp_id,
7781 struct ice_adv_rule_info *rinfo)
7783 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7784 struct ice_switch_info *sw = hw->switch_info;
7787 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7788 ice_adv_fltr_mgmt_list_entry, list_entry) {
7789 bool lkups_matched = true;
7791 if (lkups_cnt != list_itr->lkups_cnt)
7793 for (i = 0; i < list_itr->lkups_cnt; i++)
7794 if (memcmp(&list_itr->lkups[i], &lkups[i],
7796 lkups_matched = false;
7799 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7800 rinfo->tun_type == list_itr->rule_info.tun_type &&
7808 * ice_adv_add_update_vsi_list
7809 * @hw: pointer to the hardware structure
7810 * @m_entry: pointer to current adv filter management list entry
7811 * @cur_fltr: filter information from the book keeping entry
7812 * @new_fltr: filter information with the new VSI to be added
7814 * Call AQ command to add or update previously created VSI list with new VSI.
7816 * Helper function to do book keeping associated with adding filter information
7817 * The algorithm to do the booking keeping is described below :
7818 * When a VSI needs to subscribe to a given advanced filter
7819 * if only one VSI has been added till now
7820 * Allocate a new VSI list and add two VSIs
7821 * to this list using switch rule command
7822 * Update the previously created switch rule with the
7823 * newly created VSI list ID
7824 * if a VSI list was previously created
7825 * Add the new VSI to the previously created VSI list set
7826 * using the update switch rule command
7828 static enum ice_status
7829 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7830 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7831 struct ice_adv_rule_info *cur_fltr,
7832 struct ice_adv_rule_info *new_fltr)
7834 enum ice_status status;
7835 u16 vsi_list_id = 0;
7837 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7838 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7839 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7840 return ICE_ERR_NOT_IMPL;
7842 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7843 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7844 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7845 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7846 return ICE_ERR_NOT_IMPL;
7848 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7849 /* Only one entry existed in the mapping and it was not already
7850 * a part of a VSI list. So, create a VSI list with the old and
7853 struct ice_fltr_info tmp_fltr;
7854 u16 vsi_handle_arr[2];
7856 /* A rule already exists with the new VSI being added */
7857 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7858 new_fltr->sw_act.fwd_id.hw_vsi_id)
7859 return ICE_ERR_ALREADY_EXISTS;
7861 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7862 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7863 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7869 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7870 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7871 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7872 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7873 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7874 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7876 /* Update the previous switch rule of "forward to VSI" to
7879 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7883 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7884 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7885 m_entry->vsi_list_info =
7886 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7889 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7891 if (!m_entry->vsi_list_info)
7894 /* A rule already exists with the new VSI being added */
7895 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7898 /* Update the previously created VSI list set with
7899 * the new VSI ID passed in
7901 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7903 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7905 ice_aqc_opc_update_sw_rules,
7907 /* update VSI list mapping info with new VSI ID */
7909 ice_set_bit(vsi_handle,
7910 m_entry->vsi_list_info->vsi_map);
7913 m_entry->vsi_count++;
7918 * ice_add_adv_rule - helper function to create an advanced switch rule
7919 * @hw: pointer to the hardware structure
7920 * @lkups: information on the words that needs to be looked up. All words
7921 * together makes one recipe
7922 * @lkups_cnt: num of entries in the lkups array
7923 * @rinfo: other information related to the rule that needs to be programmed
7924 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7925 * ignored is case of error.
7927 * This function can program only 1 rule at a time. The lkups is used to
7928 * describe the all the words that forms the "lookup" portion of the recipe.
7929 * These words can span multiple protocols. Callers to this function need to
7930 * pass in a list of protocol headers with lookup information along and mask
7931 * that determines which words are valid from the given protocol header.
7932 * rinfo describes other information related to this rule such as forwarding
7933 * IDs, priority of this rule, etc.
7936 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7937 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7938 struct ice_rule_query_data *added_entry)
7940 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7941 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7942 const struct ice_dummy_pkt_offsets *pkt_offsets;
7943 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7944 struct LIST_HEAD_TYPE *rule_head;
7945 struct ice_switch_info *sw;
7946 enum ice_status status;
7947 const u8 *pkt = NULL;
7953 /* Initialize profile to result index bitmap */
7954 if (!hw->switch_info->prof_res_bm_init) {
7955 hw->switch_info->prof_res_bm_init = 1;
7956 ice_init_prof_result_bm(hw);
7959 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7960 if (!prof_rule && !lkups_cnt)
7961 return ICE_ERR_PARAM;
7963 /* get # of words we need to match */
7965 for (i = 0; i < lkups_cnt; i++) {
7968 ptr = (u16 *)&lkups[i].m_u;
7969 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7975 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7976 return ICE_ERR_PARAM;
7978 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7979 return ICE_ERR_PARAM;
7982 /* make sure that we can locate a dummy packet */
7983 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7986 status = ICE_ERR_PARAM;
7987 goto err_ice_add_adv_rule;
7990 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7991 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7992 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7993 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7996 vsi_handle = rinfo->sw_act.vsi_handle;
7997 if (!ice_is_vsi_valid(hw, vsi_handle))
7998 return ICE_ERR_PARAM;
8000 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8001 rinfo->sw_act.fwd_id.hw_vsi_id =
8002 ice_get_hw_vsi_num(hw, vsi_handle);
8003 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8004 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8006 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8009 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8011 /* we have to add VSI to VSI_LIST and increment vsi_count.
8012 * Also Update VSI list so that we can change forwarding rule
8013 * if the rule already exists, we will check if it exists with
8014 * same vsi_id, if not then add it to the VSI list if it already
8015 * exists if not then create a VSI list and add the existing VSI
8016 * ID and the new VSI ID to the list
8017 * We will add that VSI to the list
8019 status = ice_adv_add_update_vsi_list(hw, m_entry,
8020 &m_entry->rule_info,
8023 added_entry->rid = rid;
8024 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8025 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8029 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8030 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8032 return ICE_ERR_NO_MEMORY;
8033 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8034 switch (rinfo->sw_act.fltr_act) {
8035 case ICE_FWD_TO_VSI:
8036 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8037 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8038 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8041 act |= ICE_SINGLE_ACT_TO_Q;
8042 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8043 ICE_SINGLE_ACT_Q_INDEX_M;
8045 case ICE_FWD_TO_QGRP:
8046 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8047 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8048 act |= ICE_SINGLE_ACT_TO_Q;
8049 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8050 ICE_SINGLE_ACT_Q_INDEX_M;
8051 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8052 ICE_SINGLE_ACT_Q_REGION_M;
8054 case ICE_DROP_PACKET:
8055 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8056 ICE_SINGLE_ACT_VALID_BIT;
8059 status = ICE_ERR_CFG;
8060 goto err_ice_add_adv_rule;
8063 /* set the rule LOOKUP type based on caller specified 'RX'
8064 * instead of hardcoding it to be either LOOKUP_TX/RX
8066 * for 'RX' set the source to be the port number
8067 * for 'TX' set the source to be the source HW VSI number (determined
8071 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8072 s_rule->pdata.lkup_tx_rx.src =
8073 CPU_TO_LE16(hw->port_info->lport);
8075 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8076 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8079 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8080 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8082 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8083 pkt_len, pkt_offsets);
8085 goto err_ice_add_adv_rule;
8087 if (rinfo->tun_type != ICE_NON_TUN &&
8088 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8089 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8090 s_rule->pdata.lkup_tx_rx.hdr,
8093 goto err_ice_add_adv_rule;
8096 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8097 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8100 goto err_ice_add_adv_rule;
8101 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8102 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8104 status = ICE_ERR_NO_MEMORY;
8105 goto err_ice_add_adv_rule;
8108 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8109 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8110 ICE_NONDMA_TO_NONDMA);
8111 if (!adv_fltr->lkups && !prof_rule) {
8112 status = ICE_ERR_NO_MEMORY;
8113 goto err_ice_add_adv_rule;
8116 adv_fltr->lkups_cnt = lkups_cnt;
8117 adv_fltr->rule_info = *rinfo;
8118 adv_fltr->rule_info.fltr_rule_id =
8119 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8120 sw = hw->switch_info;
8121 sw->recp_list[rid].adv_rule = true;
8122 rule_head = &sw->recp_list[rid].filt_rules;
8124 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8125 adv_fltr->vsi_count = 1;
8127 /* Add rule entry to book keeping list */
8128 LIST_ADD(&adv_fltr->list_entry, rule_head);
8130 added_entry->rid = rid;
8131 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8132 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8134 err_ice_add_adv_rule:
8135 if (status && adv_fltr) {
8136 ice_free(hw, adv_fltr->lkups);
8137 ice_free(hw, adv_fltr);
8140 ice_free(hw, s_rule);
8146 * ice_adv_rem_update_vsi_list
8147 * @hw: pointer to the hardware structure
8148 * @vsi_handle: VSI handle of the VSI to remove
8149 * @fm_list: filter management entry for which the VSI list management needs to
8152 static enum ice_status
8153 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8154 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8156 struct ice_vsi_list_map_info *vsi_list_info;
8157 enum ice_sw_lkup_type lkup_type;
8158 enum ice_status status;
8161 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8162 fm_list->vsi_count == 0)
8163 return ICE_ERR_PARAM;
8165 /* A rule with the VSI being removed does not exist */
8166 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8167 return ICE_ERR_DOES_NOT_EXIST;
8169 lkup_type = ICE_SW_LKUP_LAST;
8170 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8171 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8172 ice_aqc_opc_update_sw_rules,
8177 fm_list->vsi_count--;
8178 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8179 vsi_list_info = fm_list->vsi_list_info;
8180 if (fm_list->vsi_count == 1) {
8181 struct ice_fltr_info tmp_fltr;
8184 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8186 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8187 return ICE_ERR_OUT_OF_RANGE;
8189 /* Make sure VSI list is empty before removing it below */
8190 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8192 ice_aqc_opc_update_sw_rules,
8197 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8198 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8199 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8200 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8201 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8202 tmp_fltr.fwd_id.hw_vsi_id =
8203 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8204 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8205 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8206 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8208 /* Update the previous switch rule of "MAC forward to VSI" to
8209 * "MAC fwd to VSI list"
8211 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8213 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8214 tmp_fltr.fwd_id.hw_vsi_id, status);
8217 fm_list->vsi_list_info->ref_cnt--;
8219 /* Remove the VSI list since it is no longer used */
8220 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8222 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8223 vsi_list_id, status);
8227 LIST_DEL(&vsi_list_info->list_entry);
8228 ice_free(hw, vsi_list_info);
8229 fm_list->vsi_list_info = NULL;
8236 * ice_rem_adv_rule - removes existing advanced switch rule
8237 * @hw: pointer to the hardware structure
8238 * @lkups: information on the words that needs to be looked up. All words
8239 * together makes one recipe
8240 * @lkups_cnt: num of entries in the lkups array
8241 * @rinfo: Its the pointer to the rule information for the rule
8243 * This function can be used to remove 1 rule at a time. The lkups is
8244 * used to describe all the words that forms the "lookup" portion of the
8245 * rule. These words can span multiple protocols. Callers to this function
8246 * need to pass in a list of protocol headers with lookup information along
8247 * and mask that determines which words are valid from the given protocol
8248 * header. rinfo describes other information related to this rule such as
8249 * forwarding IDs, priority of this rule, etc.
8252 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8253 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8255 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8256 struct ice_prot_lkup_ext lkup_exts;
8257 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8258 enum ice_status status = ICE_SUCCESS;
8259 bool remove_rule = false;
8260 u16 i, rid, vsi_handle;
8262 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8263 for (i = 0; i < lkups_cnt; i++) {
8266 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8269 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8274 /* Create any special protocol/offset pairs, such as looking at tunnel
8275 * bits by extracting metadata
8277 status = ice_add_special_words(rinfo, &lkup_exts);
8281 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8282 /* If did not find a recipe that match the existing criteria */
8283 if (rid == ICE_MAX_NUM_RECIPES)
8284 return ICE_ERR_PARAM;
8286 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8287 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8288 /* the rule is already removed */
8291 ice_acquire_lock(rule_lock);
8292 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8294 } else if (list_elem->vsi_count > 1) {
8295 remove_rule = false;
8296 vsi_handle = rinfo->sw_act.vsi_handle;
8297 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8299 vsi_handle = rinfo->sw_act.vsi_handle;
8300 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8302 ice_release_lock(rule_lock);
8305 if (list_elem->vsi_count == 0)
8308 ice_release_lock(rule_lock);
8310 struct ice_aqc_sw_rules_elem *s_rule;
8313 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8314 s_rule = (struct ice_aqc_sw_rules_elem *)
8315 ice_malloc(hw, rule_buf_sz);
8317 return ICE_ERR_NO_MEMORY;
8318 s_rule->pdata.lkup_tx_rx.act = 0;
8319 s_rule->pdata.lkup_tx_rx.index =
8320 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8321 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8322 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8324 ice_aqc_opc_remove_sw_rules, NULL);
8325 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8326 struct ice_switch_info *sw = hw->switch_info;
8328 ice_acquire_lock(rule_lock);
8329 LIST_DEL(&list_elem->list_entry);
8330 ice_free(hw, list_elem->lkups);
8331 ice_free(hw, list_elem);
8332 ice_release_lock(rule_lock);
8333 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8334 sw->recp_list[rid].adv_rule = false;
8336 ice_free(hw, s_rule);
8342 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8343 * @hw: pointer to the hardware structure
8344 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8346 * This function is used to remove 1 rule at a time. The removal is based on
8347 * the remove_entry parameter. This function will remove rule for a given
8348 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8351 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8352 struct ice_rule_query_data *remove_entry)
8354 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8355 struct LIST_HEAD_TYPE *list_head;
8356 struct ice_adv_rule_info rinfo;
8357 struct ice_switch_info *sw;
8359 sw = hw->switch_info;
8360 if (!sw->recp_list[remove_entry->rid].recp_created)
8361 return ICE_ERR_PARAM;
8362 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8363 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8365 if (list_itr->rule_info.fltr_rule_id ==
8366 remove_entry->rule_id) {
8367 rinfo = list_itr->rule_info;
8368 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8369 return ice_rem_adv_rule(hw, list_itr->lkups,
8370 list_itr->lkups_cnt, &rinfo);
8373 /* either list is empty or unable to find rule */
8374 return ICE_ERR_DOES_NOT_EXIST;
8378 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8380 * @hw: pointer to the hardware structure
8381 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8383 * This function is used to remove all the rules for a given VSI and as soon
8384 * as removing a rule fails, it will return immediately with the error code,
8385 * else it will return ICE_SUCCESS
8387 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8389 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8390 struct ice_vsi_list_map_info *map_info;
8391 struct LIST_HEAD_TYPE *list_head;
8392 struct ice_adv_rule_info rinfo;
8393 struct ice_switch_info *sw;
8394 enum ice_status status;
8397 sw = hw->switch_info;
8398 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8399 if (!sw->recp_list[rid].recp_created)
8401 if (!sw->recp_list[rid].adv_rule)
8404 list_head = &sw->recp_list[rid].filt_rules;
8405 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8406 ice_adv_fltr_mgmt_list_entry,
8408 rinfo = list_itr->rule_info;
8410 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8411 map_info = list_itr->vsi_list_info;
8415 if (!ice_is_bit_set(map_info->vsi_map,
8418 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8422 rinfo.sw_act.vsi_handle = vsi_handle;
8423 status = ice_rem_adv_rule(hw, list_itr->lkups,
8424 list_itr->lkups_cnt, &rinfo);
8434 * ice_replay_fltr - Replay all the filters stored by a specific list head
8435 * @hw: pointer to the hardware structure
8436 * @list_head: list for which filters needs to be replayed
8437 * @recp_id: Recipe ID for which rules need to be replayed
8439 static enum ice_status
8440 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8442 struct ice_fltr_mgmt_list_entry *itr;
8443 enum ice_status status = ICE_SUCCESS;
8444 struct ice_sw_recipe *recp_list;
8445 u8 lport = hw->port_info->lport;
8446 struct LIST_HEAD_TYPE l_head;
8448 if (LIST_EMPTY(list_head))
8451 recp_list = &hw->switch_info->recp_list[recp_id];
8452 /* Move entries from the given list_head to a temporary l_head so that
8453 * they can be replayed. Otherwise when trying to re-add the same
8454 * filter, the function will return already exists
8456 LIST_REPLACE_INIT(list_head, &l_head);
8458 /* Mark the given list_head empty by reinitializing it so filters
8459 * could be added again by *handler
8461 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8463 struct ice_fltr_list_entry f_entry;
8466 f_entry.fltr_info = itr->fltr_info;
8467 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8468 status = ice_add_rule_internal(hw, recp_list, lport,
8470 if (status != ICE_SUCCESS)
8475 /* Add a filter per VSI separately */
8476 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8478 if (!ice_is_vsi_valid(hw, vsi_handle))
8481 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8482 f_entry.fltr_info.vsi_handle = vsi_handle;
8483 f_entry.fltr_info.fwd_id.hw_vsi_id =
8484 ice_get_hw_vsi_num(hw, vsi_handle);
8485 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8486 if (recp_id == ICE_SW_LKUP_VLAN)
8487 status = ice_add_vlan_internal(hw, recp_list,
8490 status = ice_add_rule_internal(hw, recp_list,
8493 if (status != ICE_SUCCESS)
8498 /* Clear the filter management list */
8499 ice_rem_sw_rule_info(hw, &l_head);
8504 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8505 * @hw: pointer to the hardware structure
8507 * NOTE: This function does not clean up partially added filters on error.
8508 * It is up to caller of the function to issue a reset or fail early.
8510 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8512 struct ice_switch_info *sw = hw->switch_info;
8513 enum ice_status status = ICE_SUCCESS;
8516 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8517 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8519 status = ice_replay_fltr(hw, i, head);
8520 if (status != ICE_SUCCESS)
8527 * ice_replay_vsi_fltr - Replay filters for requested VSI
8528 * @hw: pointer to the hardware structure
8529 * @pi: pointer to port information structure
8530 * @sw: pointer to switch info struct for which function replays filters
8531 * @vsi_handle: driver VSI handle
8532 * @recp_id: Recipe ID for which rules need to be replayed
8533 * @list_head: list for which filters need to be replayed
8535 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8536 * It is required to pass valid VSI handle.
8538 static enum ice_status
8539 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8540 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8541 struct LIST_HEAD_TYPE *list_head)
8543 struct ice_fltr_mgmt_list_entry *itr;
8544 enum ice_status status = ICE_SUCCESS;
8545 struct ice_sw_recipe *recp_list;
8548 if (LIST_EMPTY(list_head))
8550 recp_list = &sw->recp_list[recp_id];
8551 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8553 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8555 struct ice_fltr_list_entry f_entry;
8557 f_entry.fltr_info = itr->fltr_info;
8558 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8559 itr->fltr_info.vsi_handle == vsi_handle) {
8560 /* update the src in case it is VSI num */
8561 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8562 f_entry.fltr_info.src = hw_vsi_id;
8563 status = ice_add_rule_internal(hw, recp_list,
8566 if (status != ICE_SUCCESS)
8570 if (!itr->vsi_list_info ||
8571 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8573 /* Clearing it so that the logic can add it back */
8574 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8575 f_entry.fltr_info.vsi_handle = vsi_handle;
8576 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8577 /* update the src in case it is VSI num */
8578 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8579 f_entry.fltr_info.src = hw_vsi_id;
8580 if (recp_id == ICE_SW_LKUP_VLAN)
8581 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8583 status = ice_add_rule_internal(hw, recp_list,
8586 if (status != ICE_SUCCESS)
8594 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8595 * @hw: pointer to the hardware structure
8596 * @vsi_handle: driver VSI handle
8597 * @list_head: list for which filters need to be replayed
8599 * Replay the advanced rule for the given VSI.
8601 static enum ice_status
8602 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8603 struct LIST_HEAD_TYPE *list_head)
8605 struct ice_rule_query_data added_entry = { 0 };
8606 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8607 enum ice_status status = ICE_SUCCESS;
8609 if (LIST_EMPTY(list_head))
8611 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8613 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8614 u16 lk_cnt = adv_fltr->lkups_cnt;
8616 if (vsi_handle != rinfo->sw_act.vsi_handle)
8618 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8627 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8628 * @hw: pointer to the hardware structure
8629 * @pi: pointer to port information structure
8630 * @vsi_handle: driver VSI handle
8632 * Replays filters for requested VSI via vsi_handle.
8635 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8638 struct ice_switch_info *sw = hw->switch_info;
8639 enum ice_status status;
8642 /* Update the recipes that were created */
8643 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8644 struct LIST_HEAD_TYPE *head;
8646 head = &sw->recp_list[i].filt_replay_rules;
8647 if (!sw->recp_list[i].adv_rule)
8648 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8651 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8652 if (status != ICE_SUCCESS)
8660 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8661 * @hw: pointer to the HW struct
8662 * @sw: pointer to switch info struct for which function removes filters
8664 * Deletes the filter replay rules for given switch
8666 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8673 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8674 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8675 struct LIST_HEAD_TYPE *l_head;
8677 l_head = &sw->recp_list[i].filt_replay_rules;
8678 if (!sw->recp_list[i].adv_rule)
8679 ice_rem_sw_rule_info(hw, l_head);
8681 ice_rem_adv_rule_info(hw, l_head);
8687 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8688 * @hw: pointer to the HW struct
8690 * Deletes the filter replay rules.
8692 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8694 ice_rm_sw_replay_rule_info(hw, hw->switch_info);