1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
509 { ICE_IPV4_OFOS, 14 },
512 { ICE_PROTOCOL_LAST, 0 },
515 static const u8 dummy_udp_gtp_packet[] = {
516 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
521 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x11, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 0x00, 0x1c, 0x00, 0x00,
530 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x00, 0x85,
534 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 0x00, 0x00, 0x00, 0x00,
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
541 { ICE_IPV4_OFOS, 14 },
545 { ICE_PROTOCOL_LAST, 0 },
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550 0x00, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
554 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555 0x00, 0x00, 0x40, 0x00,
556 0x40, 0x11, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561 0x00, 0x00, 0x00, 0x00,
563 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
564 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x85,
567 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568 0x00, 0x00, 0x00, 0x00,
570 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571 0x00, 0x00, 0x40, 0x00,
572 0x40, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, 0x00, 0x00,
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
581 { ICE_IPV4_OFOS, 14 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
594 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595 0x00, 0x00, 0x40, 0x00,
596 0x40, 0x11, 0x00, 0x00,
597 0x00, 0x00, 0x00, 0x00,
598 0x00, 0x00, 0x00, 0x00,
600 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601 0x00, 0x00, 0x00, 0x00,
603 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x85,
607 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608 0x00, 0x00, 0x00, 0x00,
610 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611 0x00, 0x00, 0x3b, 0x00,
612 0x00, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
627 { ICE_IPV6_OFOS, 14 },
631 { ICE_PROTOCOL_LAST, 0 },
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
640 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652 0x00, 0x00, 0x00, 0x00,
654 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x85,
658 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659 0x00, 0x00, 0x00, 0x00,
661 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662 0x00, 0x00, 0x40, 0x00,
663 0x40, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00,
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
673 { ICE_IPV6_OFOS, 14 },
677 { ICE_PROTOCOL_LAST, 0 },
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698 0x00, 0x00, 0x00, 0x00,
700 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x85,
704 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705 0x00, 0x00, 0x00, 0x00,
707 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708 0x00, 0x00, 0x3b, 0x00,
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
713 0x00, 0x00, 0x00, 0x00,
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
724 { ICE_IPV4_OFOS, 14 },
726 { ICE_GTP_NO_PAY, 42 },
727 { ICE_PROTOCOL_LAST, 0 },
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
733 { ICE_IPV6_OFOS, 14 },
735 { ICE_GTP_NO_PAY, 62 },
736 { ICE_PROTOCOL_LAST, 0 },
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
741 { ICE_ETYPE_OL, 12 },
742 { ICE_VLAN_OFOS, 14},
744 { ICE_PROTOCOL_LAST, 0 },
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
749 { ICE_ETYPE_OL, 12 },
750 { ICE_VLAN_OFOS, 14},
752 { ICE_IPV4_OFOS, 26 },
753 { ICE_PROTOCOL_LAST, 0 },
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758 0x00, 0x00, 0x00, 0x00,
759 0x00, 0x00, 0x00, 0x00,
761 0x81, 0x00, /* ICE_ETYPE_OL 12 */
763 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
765 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
768 0x00, 0x21, /* PPP Link Layer 24 */
770 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
782 { ICE_ETYPE_OL, 12 },
783 { ICE_VLAN_OFOS, 14},
785 { ICE_IPV4_OFOS, 26 },
787 { ICE_PROTOCOL_LAST, 0 },
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x00, 0x00, 0x00,
795 0x81, 0x00, /* ICE_ETYPE_OL 12 */
797 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
799 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
802 0x00, 0x21, /* PPP Link Layer 24 */
804 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805 0x00, 0x01, 0x00, 0x00,
806 0x00, 0x06, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
813 0x50, 0x00, 0x00, 0x00,
814 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
822 { ICE_ETYPE_OL, 12 },
823 { ICE_VLAN_OFOS, 14},
825 { ICE_IPV4_OFOS, 26 },
826 { ICE_UDP_ILOS, 46 },
827 { ICE_PROTOCOL_LAST, 0 },
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832 0x00, 0x00, 0x00, 0x00,
833 0x00, 0x00, 0x00, 0x00,
835 0x81, 0x00, /* ICE_ETYPE_OL 12 */
837 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
839 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
842 0x00, 0x21, /* PPP Link Layer 24 */
844 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845 0x00, 0x01, 0x00, 0x00,
846 0x00, 0x11, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851 0x00, 0x08, 0x00, 0x00,
853 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
858 { ICE_ETYPE_OL, 12 },
859 { ICE_VLAN_OFOS, 14},
861 { ICE_IPV6_OFOS, 26 },
862 { ICE_PROTOCOL_LAST, 0 },
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x00, 0x00, 0x00,
870 0x81, 0x00, /* ICE_ETYPE_OL 12 */
872 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
874 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
877 0x00, 0x57, /* PPP Link Layer 24 */
879 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880 0x00, 0x00, 0x3b, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x00, 0x00,
890 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
896 { ICE_ETYPE_OL, 12 },
897 { ICE_VLAN_OFOS, 14},
899 { ICE_IPV6_OFOS, 26 },
901 { ICE_PROTOCOL_LAST, 0 },
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x81, 0x00, /* ICE_ETYPE_OL 12 */
911 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
913 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
916 0x00, 0x57, /* PPP Link Layer 24 */
918 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920 0x00, 0x00, 0x00, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
929 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x50, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
941 { ICE_ETYPE_OL, 12 },
942 { ICE_VLAN_OFOS, 14},
944 { ICE_IPV6_OFOS, 26 },
945 { ICE_UDP_ILOS, 66 },
946 { ICE_PROTOCOL_LAST, 0 },
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
954 0x81, 0x00, /* ICE_ETYPE_OL 12 */
956 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
958 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
961 0x00, 0x57, /* PPP Link Layer 24 */
963 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975 0x00, 0x08, 0x00, 0x00,
977 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
982 { ICE_IPV4_OFOS, 14 },
984 { ICE_PROTOCOL_LAST, 0 },
987 static const u8 dummy_ipv4_esp_pkt[] = {
988 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
993 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994 0x00, 0x00, 0x40, 0x00,
995 0x40, 0x32, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
997 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005 { ICE_MAC_OFOS, 0 },
1006 { ICE_IPV6_OFOS, 14 },
1008 { ICE_PROTOCOL_LAST, 0 },
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x00, 0x00,
1017 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034 { ICE_MAC_OFOS, 0 },
1035 { ICE_IPV4_OFOS, 14 },
1037 { ICE_PROTOCOL_LAST, 0 },
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00,
1046 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047 0x00, 0x00, 0x40, 0x00,
1048 0x40, 0x33, 0x00, 0x00,
1049 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059 { ICE_MAC_OFOS, 0 },
1060 { ICE_IPV6_OFOS, 14 },
1062 { ICE_PROTOCOL_LAST, 0 },
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1071 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073 0x00, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1076 0x00, 0x00, 0x00, 0x00,
1077 0x00, 0x00, 0x00, 0x00,
1078 0x00, 0x00, 0x00, 0x00,
1079 0x00, 0x00, 0x00, 0x00,
1080 0x00, 0x00, 0x00, 0x00,
1082 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083 0x00, 0x00, 0x00, 0x00,
1084 0x00, 0x00, 0x00, 0x00,
1085 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089 { ICE_MAC_OFOS, 0 },
1090 { ICE_IPV4_OFOS, 14 },
1091 { ICE_UDP_ILOS, 34 },
1093 { ICE_PROTOCOL_LAST, 0 },
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1102 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103 0x00, 0x00, 0x40, 0x00,
1104 0x40, 0x11, 0x00, 0x00,
1105 0x00, 0x00, 0x00, 0x00,
1106 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109 0x00, 0x00, 0x00, 0x00,
1111 0x00, 0x00, 0x00, 0x00,
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117 { ICE_MAC_OFOS, 0 },
1118 { ICE_IPV6_OFOS, 14 },
1119 { ICE_UDP_ILOS, 54 },
1121 { ICE_PROTOCOL_LAST, 0 },
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126 0x00, 0x00, 0x00, 0x00,
1127 0x00, 0x00, 0x00, 0x00,
1130 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132 0x00, 0x00, 0x00, 0x00,
1133 0x00, 0x00, 0x00, 0x00,
1134 0x00, 0x00, 0x00, 0x00,
1135 0x00, 0x00, 0x00, 0x00,
1136 0x00, 0x00, 0x00, 0x00,
1137 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1139 0x00, 0x00, 0x00, 0x00,
1141 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142 0x00, 0x00, 0x00, 0x00,
1144 0x00, 0x00, 0x00, 0x00,
1145 0x00, 0x00, 0x00, 0x00,
1146 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_IPV4_OFOS, 14 },
1154 { ICE_PROTOCOL_LAST, 0 },
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159 0x00, 0x00, 0x00, 0x00,
1160 0x00, 0x00, 0x00, 0x00,
1163 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164 0x00, 0x00, 0x40, 0x00,
1165 0x40, 0x73, 0x00, 0x00,
1166 0x00, 0x00, 0x00, 0x00,
1167 0x00, 0x00, 0x00, 0x00,
1169 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170 0x00, 0x00, 0x00, 0x00,
1171 0x00, 0x00, 0x00, 0x00,
1172 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176 { ICE_MAC_OFOS, 0 },
1177 { ICE_IPV6_OFOS, 14 },
1179 { ICE_PROTOCOL_LAST, 0 },
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184 0x00, 0x00, 0x00, 0x00,
1185 0x00, 0x00, 0x00, 0x00,
1188 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189 0x00, 0x0c, 0x73, 0x40,
1190 0x00, 0x00, 0x00, 0x00,
1191 0x00, 0x00, 0x00, 0x00,
1192 0x00, 0x00, 0x00, 0x00,
1193 0x00, 0x00, 0x00, 0x00,
1194 0x00, 0x00, 0x00, 0x00,
1195 0x00, 0x00, 0x00, 0x00,
1196 0x00, 0x00, 0x00, 0x00,
1197 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200 0x00, 0x00, 0x00, 0x00,
1201 0x00, 0x00, 0x00, 0x00,
1202 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1205 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1206 { ICE_MAC_OFOS, 0 },
1207 { ICE_VLAN_EX, 14 },
1208 { ICE_VLAN_OFOS, 18 },
1209 { ICE_IPV4_OFOS, 22 },
1210 { ICE_PROTOCOL_LAST, 0 },
1213 static const u8 dummy_qinq_ipv4_pkt[] = {
1214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1215 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00,
1219 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1220 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1222 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1223 0x00, 0x01, 0x00, 0x00,
1224 0x00, 0x11, 0x00, 0x00,
1225 0x00, 0x00, 0x00, 0x00,
1226 0x00, 0x00, 0x00, 0x00,
1228 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1229 0x00, 0x08, 0x00, 0x00,
1231 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1234 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1235 { ICE_MAC_OFOS, 0 },
1236 { ICE_VLAN_EX, 14 },
1237 { ICE_VLAN_OFOS, 18 },
1238 { ICE_IPV6_OFOS, 22 },
1239 { ICE_PROTOCOL_LAST, 0 },
1242 static const u8 dummy_qinq_ipv6_pkt[] = {
1243 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1244 0x00, 0x00, 0x00, 0x00,
1245 0x00, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1249 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1251 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1252 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1253 0x00, 0x00, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1255 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00,
1257 0x00, 0x00, 0x00, 0x00,
1258 0x00, 0x00, 0x00, 0x00,
1259 0x00, 0x00, 0x00, 0x00,
1260 0x00, 0x00, 0x00, 0x00,
1262 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1263 0x00, 0x10, 0x00, 0x00,
1265 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1266 0x00, 0x00, 0x00, 0x00,
1268 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1271 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1272 { ICE_MAC_OFOS, 0 },
1273 { ICE_VLAN_EX, 14 },
1274 { ICE_VLAN_OFOS, 18 },
1276 { ICE_PROTOCOL_LAST, 0 },
1280 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1281 { ICE_MAC_OFOS, 0 },
1282 { ICE_VLAN_EX, 14 },
1283 { ICE_VLAN_OFOS, 18 },
1285 { ICE_IPV4_OFOS, 30 },
1286 { ICE_PROTOCOL_LAST, 0 },
1289 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1290 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1295 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1296 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1298 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1301 0x00, 0x21, /* PPP Link Layer 28 */
1303 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1304 0x00, 0x00, 0x00, 0x00,
1305 0x00, 0x00, 0x00, 0x00,
1306 0x00, 0x00, 0x00, 0x00,
1307 0x00, 0x00, 0x00, 0x00,
1309 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1313 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1314 { ICE_MAC_OFOS, 0 },
1315 { ICE_ETYPE_OL, 12 },
1317 { ICE_VLAN_OFOS, 18 },
1319 { ICE_IPV6_OFOS, 30 },
1320 { ICE_PROTOCOL_LAST, 0 },
1323 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1324 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1325 0x00, 0x00, 0x00, 0x00,
1326 0x00, 0x00, 0x00, 0x00,
1328 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1330 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1331 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1333 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1336 0x00, 0x57, /* PPP Link Layer 28*/
1338 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1339 0x00, 0x00, 0x3b, 0x00,
1340 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, 0x00, 0x00,
1343 0x00, 0x00, 0x00, 0x00,
1344 0x00, 0x00, 0x00, 0x00,
1345 0x00, 0x00, 0x00, 0x00,
1346 0x00, 0x00, 0x00, 0x00,
1347 0x00, 0x00, 0x00, 0x00,
1349 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1352 /* this is a recipe to profile association bitmap */
1353 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1354 ICE_MAX_NUM_PROFILES);
1356 /* this is a profile to recipe association bitmap */
1357 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1358 ICE_MAX_NUM_RECIPES);
1360 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1363 * ice_collect_result_idx - copy result index values
1364 * @buf: buffer that contains the result index
1365 * @recp: the recipe struct to copy data into
1367 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1368 struct ice_sw_recipe *recp)
1370 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1371 ice_set_bit(buf->content.result_indx &
1372 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1376 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1377 * @rid: recipe ID that we are populating
1379 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1381 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1382 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1383 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1384 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1385 enum ice_sw_tunnel_type tun_type;
1386 u16 i, j, profile_num = 0;
1387 bool non_tun_valid = false;
1388 bool pppoe_valid = false;
1389 bool vxlan_valid = false;
1390 bool gre_valid = false;
1391 bool gtp_valid = false;
1392 bool flag_valid = false;
1394 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1395 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1400 for (i = 0; i < 12; i++) {
1401 if (gre_profile[i] == j)
1405 for (i = 0; i < 12; i++) {
1406 if (vxlan_profile[i] == j)
1410 for (i = 0; i < 7; i++) {
1411 if (pppoe_profile[i] == j)
1415 for (i = 0; i < 6; i++) {
1416 if (non_tun_profile[i] == j)
1417 non_tun_valid = true;
1420 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1421 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1424 if ((j >= ICE_PROFID_IPV4_ESP &&
1425 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1426 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1427 j <= ICE_PROFID_IPV6_GTPU_TEID))
1431 if (!non_tun_valid && vxlan_valid)
1432 tun_type = ICE_SW_TUN_VXLAN;
1433 else if (!non_tun_valid && gre_valid)
1434 tun_type = ICE_SW_TUN_NVGRE;
1435 else if (!non_tun_valid && pppoe_valid)
1436 tun_type = ICE_SW_TUN_PPPOE;
1437 else if (!non_tun_valid && gtp_valid)
1438 tun_type = ICE_SW_TUN_GTP;
1439 else if (non_tun_valid &&
1440 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1441 tun_type = ICE_SW_TUN_AND_NON_TUN;
1442 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1444 tun_type = ICE_NON_TUN;
1446 tun_type = ICE_NON_TUN;
1448 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1449 i = ice_is_bit_set(recipe_to_profile[rid],
1450 ICE_PROFID_PPPOE_IPV4_OTHER);
1451 j = ice_is_bit_set(recipe_to_profile[rid],
1452 ICE_PROFID_PPPOE_IPV6_OTHER);
1454 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1456 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1459 if (tun_type == ICE_SW_TUN_GTP) {
1460 if (ice_is_bit_set(recipe_to_profile[rid],
1461 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1462 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1463 else if (ice_is_bit_set(recipe_to_profile[rid],
1464 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1465 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1466 else if (ice_is_bit_set(recipe_to_profile[rid],
1467 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1468 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1469 else if (ice_is_bit_set(recipe_to_profile[rid],
1470 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1471 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1474 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1475 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1476 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1478 case ICE_PROFID_IPV4_TCP:
1479 tun_type = ICE_SW_IPV4_TCP;
1481 case ICE_PROFID_IPV4_UDP:
1482 tun_type = ICE_SW_IPV4_UDP;
1484 case ICE_PROFID_IPV6_TCP:
1485 tun_type = ICE_SW_IPV6_TCP;
1487 case ICE_PROFID_IPV6_UDP:
1488 tun_type = ICE_SW_IPV6_UDP;
1490 case ICE_PROFID_PPPOE_PAY:
1491 tun_type = ICE_SW_TUN_PPPOE_PAY;
1493 case ICE_PROFID_PPPOE_IPV4_TCP:
1494 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1496 case ICE_PROFID_PPPOE_IPV4_UDP:
1497 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1499 case ICE_PROFID_PPPOE_IPV4_OTHER:
1500 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1502 case ICE_PROFID_PPPOE_IPV6_TCP:
1503 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1505 case ICE_PROFID_PPPOE_IPV6_UDP:
1506 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1508 case ICE_PROFID_PPPOE_IPV6_OTHER:
1509 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1511 case ICE_PROFID_IPV4_ESP:
1512 tun_type = ICE_SW_TUN_IPV4_ESP;
1514 case ICE_PROFID_IPV6_ESP:
1515 tun_type = ICE_SW_TUN_IPV6_ESP;
1517 case ICE_PROFID_IPV4_AH:
1518 tun_type = ICE_SW_TUN_IPV4_AH;
1520 case ICE_PROFID_IPV6_AH:
1521 tun_type = ICE_SW_TUN_IPV6_AH;
1523 case ICE_PROFID_IPV4_NAT_T:
1524 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1526 case ICE_PROFID_IPV6_NAT_T:
1527 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1529 case ICE_PROFID_IPV4_PFCP_NODE:
1531 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1533 case ICE_PROFID_IPV6_PFCP_NODE:
1535 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1537 case ICE_PROFID_IPV4_PFCP_SESSION:
1539 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1541 case ICE_PROFID_IPV6_PFCP_SESSION:
1543 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1545 case ICE_PROFID_MAC_IPV4_L2TPV3:
1546 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1548 case ICE_PROFID_MAC_IPV6_L2TPV3:
1549 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1551 case ICE_PROFID_IPV4_GTPU_TEID:
1552 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1554 case ICE_PROFID_IPV6_GTPU_TEID:
1555 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1566 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1567 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1568 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1569 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1570 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1571 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1572 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1573 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1574 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1575 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1576 else if (vlan && tun_type == ICE_NON_TUN)
1577 tun_type = ICE_NON_TUN_QINQ;
1583 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1584 * @hw: pointer to hardware structure
1585 * @recps: struct that we need to populate
1586 * @rid: recipe ID that we are populating
1587 * @refresh_required: true if we should get recipe to profile mapping from FW
1589 * This function is used to populate all the necessary entries into our
1590 * bookkeeping so that we have a current list of all the recipes that are
1591 * programmed in the firmware.
1593 static enum ice_status
1594 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1595 bool *refresh_required)
1597 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1598 struct ice_aqc_recipe_data_elem *tmp;
1599 u16 num_recps = ICE_MAX_NUM_RECIPES;
1600 struct ice_prot_lkup_ext *lkup_exts;
1601 enum ice_status status;
1606 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1608 /* we need a buffer big enough to accommodate all the recipes */
1609 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1610 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1612 return ICE_ERR_NO_MEMORY;
1614 tmp[0].recipe_indx = rid;
1615 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1616 /* non-zero status meaning recipe doesn't exist */
1620 /* Get recipe to profile map so that we can get the fv from lkups that
1621 * we read for a recipe from FW. Since we want to minimize the number of
1622 * times we make this FW call, just make one call and cache the copy
1623 * until a new recipe is added. This operation is only required the
1624 * first time to get the changes from FW. Then to search existing
1625 * entries we don't need to update the cache again until another recipe
1628 if (*refresh_required) {
1629 ice_get_recp_to_prof_map(hw);
1630 *refresh_required = false;
1633 /* Start populating all the entries for recps[rid] based on lkups from
1634 * firmware. Note that we are only creating the root recipe in our
1637 lkup_exts = &recps[rid].lkup_exts;
1639 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1640 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1641 struct ice_recp_grp_entry *rg_entry;
1642 u8 i, prof, idx, prot = 0;
1646 rg_entry = (struct ice_recp_grp_entry *)
1647 ice_malloc(hw, sizeof(*rg_entry));
1649 status = ICE_ERR_NO_MEMORY;
1653 idx = root_bufs.recipe_indx;
1654 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1656 /* Mark all result indices in this chain */
1657 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1658 ice_set_bit(root_bufs.content.result_indx &
1659 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1661 /* get the first profile that is associated with rid */
1662 prof = ice_find_first_bit(recipe_to_profile[idx],
1663 ICE_MAX_NUM_PROFILES);
1664 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1665 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1667 rg_entry->fv_idx[i] = lkup_indx;
1668 rg_entry->fv_mask[i] =
1669 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1671 /* If the recipe is a chained recipe then all its
1672 * child recipe's result will have a result index.
1673 * To fill fv_words we should not use those result
1674 * index, we only need the protocol ids and offsets.
1675 * We will skip all the fv_idx which stores result
1676 * index in them. We also need to skip any fv_idx which
1677 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1678 * valid offset value.
1680 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1681 rg_entry->fv_idx[i]) ||
1682 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1683 rg_entry->fv_idx[i] == 0)
1686 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1687 rg_entry->fv_idx[i], &prot, &off);
1688 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1689 lkup_exts->fv_words[fv_word_idx].off = off;
1690 lkup_exts->field_mask[fv_word_idx] =
1691 rg_entry->fv_mask[i];
1692 if (prot == ICE_META_DATA_ID_HW &&
1693 off == ICE_TUN_FLAG_MDID_OFF)
1697 /* populate rg_list with the data from the child entry of this
1700 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1702 /* Propagate some data to the recipe database */
1703 recps[idx].is_root = !!is_root;
1704 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1705 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1706 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1707 recps[idx].chain_idx = root_bufs.content.result_indx &
1708 ~ICE_AQ_RECIPE_RESULT_EN;
1709 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1711 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1717 /* Only do the following for root recipes entries */
1718 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1719 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1720 recps[idx].root_rid = root_bufs.content.rid &
1721 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1722 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1725 /* Complete initialization of the root recipe entry */
1726 lkup_exts->n_val_words = fv_word_idx;
1727 recps[rid].big_recp = (num_recps > 1);
1728 recps[rid].n_grp_count = (u8)num_recps;
1729 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
1730 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1731 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1732 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1733 if (!recps[rid].root_buf)
1736 /* Copy result indexes */
1737 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1738 recps[rid].recp_created = true;
1746 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1747 * @hw: pointer to hardware structure
1749 * This function is used to populate recipe_to_profile matrix where index to
1750 * this array is the recipe ID and the element is the mapping of which profiles
1751 * is this recipe mapped to.
1753 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1755 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1758 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1761 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1762 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1763 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1765 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1766 ICE_MAX_NUM_RECIPES);
1767 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1768 ice_set_bit(i, recipe_to_profile[j]);
1773 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1774 * @hw: pointer to the HW struct
1775 * @recp_list: pointer to sw recipe list
1777 * Allocate memory for the entire recipe table and initialize the structures/
1778 * entries corresponding to basic recipes.
1781 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1783 struct ice_sw_recipe *recps;
1786 recps = (struct ice_sw_recipe *)
1787 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1789 return ICE_ERR_NO_MEMORY;
1791 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1792 recps[i].root_rid = i;
1793 INIT_LIST_HEAD(&recps[i].filt_rules);
1794 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1795 INIT_LIST_HEAD(&recps[i].rg_list);
1796 ice_init_lock(&recps[i].filt_rule_lock);
1805 * ice_aq_get_sw_cfg - get switch configuration
1806 * @hw: pointer to the hardware structure
1807 * @buf: pointer to the result buffer
1808 * @buf_size: length of the buffer available for response
1809 * @req_desc: pointer to requested descriptor
1810 * @num_elems: pointer to number of elements
1811 * @cd: pointer to command details structure or NULL
1813 * Get switch configuration (0x0200) to be placed in buf.
1814 * This admin command returns information such as initial VSI/port number
1815 * and switch ID it belongs to.
1817 * NOTE: *req_desc is both an input/output parameter.
1818 * The caller of this function first calls this function with *request_desc set
1819 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1820 * configuration information has been returned; if non-zero (meaning not all
1821 * the information was returned), the caller should call this function again
1822 * with *req_desc set to the previous value returned by f/w to get the
1823 * next block of switch configuration information.
1825 * *num_elems is output only parameter. This reflects the number of elements
1826 * in response buffer. The caller of this function to use *num_elems while
1827 * parsing the response buffer.
1829 static enum ice_status
1830 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1831 u16 buf_size, u16 *req_desc, u16 *num_elems,
1832 struct ice_sq_cd *cd)
1834 struct ice_aqc_get_sw_cfg *cmd;
1835 struct ice_aq_desc desc;
1836 enum ice_status status;
1838 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1839 cmd = &desc.params.get_sw_conf;
1840 cmd->element = CPU_TO_LE16(*req_desc);
1842 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1844 *req_desc = LE16_TO_CPU(cmd->element);
1845 *num_elems = LE16_TO_CPU(cmd->num_elems);
1852 * ice_alloc_rss_global_lut - allocate a RSS global LUT
1853 * @hw: pointer to the HW struct
1854 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
1855 * @global_lut_id: output parameter for the RSS global LUT's ID
1857 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
1859 struct ice_aqc_alloc_free_res_elem *sw_buf;
1860 enum ice_status status;
1863 buf_len = ice_struct_size(sw_buf, elem, 1);
1864 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1866 return ICE_ERR_NO_MEMORY;
1868 sw_buf->num_elems = CPU_TO_LE16(1);
1869 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
1870 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1871 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1873 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
1875 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
1876 shared_res ? "shared" : "dedicated", status);
1877 goto ice_alloc_global_lut_exit;
1880 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1882 ice_alloc_global_lut_exit:
1883 ice_free(hw, sw_buf);
1888 * ice_free_global_lut - free a RSS global LUT
1889 * @hw: pointer to the HW struct
1890 * @global_lut_id: ID of the RSS global LUT to free
1892 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
1894 struct ice_aqc_alloc_free_res_elem *sw_buf;
1895 u16 buf_len, num_elems = 1;
1896 enum ice_status status;
1898 buf_len = ice_struct_size(sw_buf, elem, num_elems);
1899 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1901 return ICE_ERR_NO_MEMORY;
1903 sw_buf->num_elems = CPU_TO_LE16(num_elems);
1904 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
1905 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
1907 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
1909 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
1910 global_lut_id, status);
1912 ice_free(hw, sw_buf);
1917 * ice_alloc_sw - allocate resources specific to switch
1918 * @hw: pointer to the HW struct
1919 * @ena_stats: true to turn on VEB stats
1920 * @shared_res: true for shared resource, false for dedicated resource
1921 * @sw_id: switch ID returned
1922 * @counter_id: VEB counter ID returned
1924 * allocates switch resources (SWID and VEB counter) (0x0208)
1927 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1930 struct ice_aqc_alloc_free_res_elem *sw_buf;
1931 struct ice_aqc_res_elem *sw_ele;
1932 enum ice_status status;
1935 buf_len = ice_struct_size(sw_buf, elem, 1);
1936 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1938 return ICE_ERR_NO_MEMORY;
1940 /* Prepare buffer for switch ID.
1941 * The number of resource entries in buffer is passed as 1 since only a
1942 * single switch/VEB instance is allocated, and hence a single sw_id
1945 sw_buf->num_elems = CPU_TO_LE16(1);
1947 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1948 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1949 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1951 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1952 ice_aqc_opc_alloc_res, NULL);
1955 goto ice_alloc_sw_exit;
1957 sw_ele = &sw_buf->elem[0];
1958 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1961 /* Prepare buffer for VEB Counter */
1962 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1963 struct ice_aqc_alloc_free_res_elem *counter_buf;
1964 struct ice_aqc_res_elem *counter_ele;
1966 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1967 ice_malloc(hw, buf_len);
1969 status = ICE_ERR_NO_MEMORY;
1970 goto ice_alloc_sw_exit;
1973 /* The number of resource entries in buffer is passed as 1 since
1974 * only a single switch/VEB instance is allocated, and hence a
1975 * single VEB counter is requested.
1977 counter_buf->num_elems = CPU_TO_LE16(1);
1978 counter_buf->res_type =
1979 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1980 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1981 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1985 ice_free(hw, counter_buf);
1986 goto ice_alloc_sw_exit;
1988 counter_ele = &counter_buf->elem[0];
1989 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1990 ice_free(hw, counter_buf);
1994 ice_free(hw, sw_buf);
1999 * ice_free_sw - free resources specific to switch
2000 * @hw: pointer to the HW struct
2001 * @sw_id: switch ID returned
2002 * @counter_id: VEB counter ID returned
2004 * free switch resources (SWID and VEB counter) (0x0209)
2006 * NOTE: This function frees multiple resources. It continues
2007 * releasing other resources even after it encounters error.
2008 * The error code returned is the last error it encountered.
2010 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2012 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2013 enum ice_status status, ret_status;
2016 buf_len = ice_struct_size(sw_buf, elem, 1);
2017 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2019 return ICE_ERR_NO_MEMORY;
2021 /* Prepare buffer to free for switch ID res.
2022 * The number of resource entries in buffer is passed as 1 since only a
2023 * single switch/VEB instance is freed, and hence a single sw_id
2026 sw_buf->num_elems = CPU_TO_LE16(1);
2027 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2028 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2030 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2031 ice_aqc_opc_free_res, NULL);
2034 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2036 /* Prepare buffer to free for VEB Counter resource */
2037 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2038 ice_malloc(hw, buf_len);
2040 ice_free(hw, sw_buf);
2041 return ICE_ERR_NO_MEMORY;
2044 /* The number of resource entries in buffer is passed as 1 since only a
2045 * single switch/VEB instance is freed, and hence a single VEB counter
2048 counter_buf->num_elems = CPU_TO_LE16(1);
2049 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2050 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2052 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2053 ice_aqc_opc_free_res, NULL);
2055 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2056 ret_status = status;
2059 ice_free(hw, counter_buf);
2060 ice_free(hw, sw_buf);
2066 * @hw: pointer to the HW struct
2067 * @vsi_ctx: pointer to a VSI context struct
2068 * @cd: pointer to command details structure or NULL
2070 * Add a VSI context to the hardware (0x0210)
2073 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2074 struct ice_sq_cd *cd)
2076 struct ice_aqc_add_update_free_vsi_resp *res;
2077 struct ice_aqc_add_get_update_free_vsi *cmd;
2078 struct ice_aq_desc desc;
2079 enum ice_status status;
2081 cmd = &desc.params.vsi_cmd;
2082 res = &desc.params.add_update_free_vsi_res;
2084 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2086 if (!vsi_ctx->alloc_from_pool)
2087 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2088 ICE_AQ_VSI_IS_VALID);
2090 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2092 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2094 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2095 sizeof(vsi_ctx->info), cd);
2098 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2099 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2100 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2108 * @hw: pointer to the HW struct
2109 * @vsi_ctx: pointer to a VSI context struct
2110 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2111 * @cd: pointer to command details structure or NULL
2113 * Free VSI context info from hardware (0x0213)
2116 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2117 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2119 struct ice_aqc_add_update_free_vsi_resp *resp;
2120 struct ice_aqc_add_get_update_free_vsi *cmd;
2121 struct ice_aq_desc desc;
2122 enum ice_status status;
2124 cmd = &desc.params.vsi_cmd;
2125 resp = &desc.params.add_update_free_vsi_res;
2127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2129 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2131 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2133 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2135 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2136 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2144 * @hw: pointer to the HW struct
2145 * @vsi_ctx: pointer to a VSI context struct
2146 * @cd: pointer to command details structure or NULL
2148 * Update VSI context in the hardware (0x0211)
2151 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2152 struct ice_sq_cd *cd)
2154 struct ice_aqc_add_update_free_vsi_resp *resp;
2155 struct ice_aqc_add_get_update_free_vsi *cmd;
2156 struct ice_aq_desc desc;
2157 enum ice_status status;
2159 cmd = &desc.params.vsi_cmd;
2160 resp = &desc.params.add_update_free_vsi_res;
2162 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2164 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2166 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2168 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2169 sizeof(vsi_ctx->info), cd);
2172 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2173 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2180 * ice_is_vsi_valid - check whether the VSI is valid or not
2181 * @hw: pointer to the HW struct
2182 * @vsi_handle: VSI handle
2184 * check whether the VSI is valid or not
2186 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2188 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2192 * ice_get_hw_vsi_num - return the HW VSI number
2193 * @hw: pointer to the HW struct
2194 * @vsi_handle: VSI handle
2196 * return the HW VSI number
2197 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2199 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2201 return hw->vsi_ctx[vsi_handle]->vsi_num;
2205 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2206 * @hw: pointer to the HW struct
2207 * @vsi_handle: VSI handle
2209 * return the VSI context entry for a given VSI handle
2211 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2213 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2217 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2218 * @hw: pointer to the HW struct
2219 * @vsi_handle: VSI handle
2220 * @vsi: VSI context pointer
2222 * save the VSI context entry for a given VSI handle
2225 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2227 hw->vsi_ctx[vsi_handle] = vsi;
2231 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2232 * @hw: pointer to the HW struct
2233 * @vsi_handle: VSI handle
2235 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2237 struct ice_vsi_ctx *vsi;
2240 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2243 ice_for_each_traffic_class(i) {
2244 if (vsi->lan_q_ctx[i]) {
2245 ice_free(hw, vsi->lan_q_ctx[i]);
2246 vsi->lan_q_ctx[i] = NULL;
2252 * ice_clear_vsi_ctx - clear the VSI context entry
2253 * @hw: pointer to the HW struct
2254 * @vsi_handle: VSI handle
2256 * clear the VSI context entry
2258 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2260 struct ice_vsi_ctx *vsi;
2262 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2264 ice_clear_vsi_q_ctx(hw, vsi_handle);
2266 hw->vsi_ctx[vsi_handle] = NULL;
2271 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2272 * @hw: pointer to the HW struct
2274 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2278 for (i = 0; i < ICE_MAX_VSI; i++)
2279 ice_clear_vsi_ctx(hw, i);
2283 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2284 * @hw: pointer to the HW struct
2285 * @vsi_handle: unique VSI handle provided by drivers
2286 * @vsi_ctx: pointer to a VSI context struct
2287 * @cd: pointer to command details structure or NULL
2289 * Add a VSI context to the hardware also add it into the VSI handle list.
2290 * If this function gets called after reset for existing VSIs then update
2291 * with the new HW VSI number in the corresponding VSI handle list entry.
2294 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2295 struct ice_sq_cd *cd)
2297 struct ice_vsi_ctx *tmp_vsi_ctx;
2298 enum ice_status status;
2300 if (vsi_handle >= ICE_MAX_VSI)
2301 return ICE_ERR_PARAM;
2302 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2305 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2307 /* Create a new VSI context */
2308 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2309 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2311 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2312 return ICE_ERR_NO_MEMORY;
2314 *tmp_vsi_ctx = *vsi_ctx;
2316 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2318 /* update with new HW VSI num */
2319 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2326 * ice_free_vsi- free VSI context from hardware and VSI handle list
2327 * @hw: pointer to the HW struct
2328 * @vsi_handle: unique VSI handle
2329 * @vsi_ctx: pointer to a VSI context struct
2330 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2331 * @cd: pointer to command details structure or NULL
2333 * Free VSI context info from hardware as well as from VSI handle list
2336 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2337 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2339 enum ice_status status;
2341 if (!ice_is_vsi_valid(hw, vsi_handle))
2342 return ICE_ERR_PARAM;
2343 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2344 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2346 ice_clear_vsi_ctx(hw, vsi_handle);
2352 * @hw: pointer to the HW struct
2353 * @vsi_handle: unique VSI handle
2354 * @vsi_ctx: pointer to a VSI context struct
2355 * @cd: pointer to command details structure or NULL
2357 * Update VSI context in the hardware
2360 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2361 struct ice_sq_cd *cd)
2363 if (!ice_is_vsi_valid(hw, vsi_handle))
2364 return ICE_ERR_PARAM;
2365 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2366 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2370 * ice_aq_get_vsi_params
2371 * @hw: pointer to the HW struct
2372 * @vsi_ctx: pointer to a VSI context struct
2373 * @cd: pointer to command details structure or NULL
2375 * Get VSI context info from hardware (0x0212)
2378 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2379 struct ice_sq_cd *cd)
2381 struct ice_aqc_add_get_update_free_vsi *cmd;
2382 struct ice_aqc_get_vsi_resp *resp;
2383 struct ice_aq_desc desc;
2384 enum ice_status status;
2386 cmd = &desc.params.vsi_cmd;
2387 resp = &desc.params.get_vsi_resp;
2389 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2391 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2393 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2394 sizeof(vsi_ctx->info), cd);
2396 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2398 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2399 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2406 * ice_aq_add_update_mir_rule - add/update a mirror rule
2407 * @hw: pointer to the HW struct
2408 * @rule_type: Rule Type
2409 * @dest_vsi: VSI number to which packets will be mirrored
2410 * @count: length of the list
2411 * @mr_buf: buffer for list of mirrored VSI numbers
2412 * @cd: pointer to command details structure or NULL
2415 * Add/Update Mirror Rule (0x260).
2418 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2419 u16 count, struct ice_mir_rule_buf *mr_buf,
2420 struct ice_sq_cd *cd, u16 *rule_id)
2422 struct ice_aqc_add_update_mir_rule *cmd;
2423 struct ice_aq_desc desc;
2424 enum ice_status status;
2425 __le16 *mr_list = NULL;
2428 switch (rule_type) {
2429 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2430 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2431 /* Make sure count and mr_buf are set for these rule_types */
2432 if (!(count && mr_buf))
2433 return ICE_ERR_PARAM;
2435 buf_size = count * sizeof(__le16);
2436 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2438 return ICE_ERR_NO_MEMORY;
2440 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2441 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2442 /* Make sure count and mr_buf are not set for these
2445 if (count || mr_buf)
2446 return ICE_ERR_PARAM;
2449 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2450 return ICE_ERR_OUT_OF_RANGE;
2453 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2455 /* Pre-process 'mr_buf' items for add/update of virtual port
2456 * ingress/egress mirroring (but not physical port ingress/egress
2462 for (i = 0; i < count; i++) {
2465 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2467 /* Validate specified VSI number, make sure it is less
2468 * than ICE_MAX_VSI, if not return with error.
2470 if (id >= ICE_MAX_VSI) {
2471 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2473 ice_free(hw, mr_list);
2474 return ICE_ERR_OUT_OF_RANGE;
2477 /* add VSI to mirror rule */
2480 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2481 else /* remove VSI from mirror rule */
2482 mr_list[i] = CPU_TO_LE16(id);
2486 cmd = &desc.params.add_update_rule;
2487 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2488 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2489 ICE_AQC_RULE_ID_VALID_M);
2490 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2491 cmd->num_entries = CPU_TO_LE16(count);
2492 cmd->dest = CPU_TO_LE16(dest_vsi);
2494 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2496 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2498 ice_free(hw, mr_list);
2504 * ice_aq_delete_mir_rule - delete a mirror rule
2505 * @hw: pointer to the HW struct
2506 * @rule_id: Mirror rule ID (to be deleted)
2507 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2508 * otherwise it is returned to the shared pool
2509 * @cd: pointer to command details structure or NULL
2511 * Delete Mirror Rule (0x261).
2514 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2515 struct ice_sq_cd *cd)
2517 struct ice_aqc_delete_mir_rule *cmd;
2518 struct ice_aq_desc desc;
2520 /* rule_id should be in the range 0...63 */
2521 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2522 return ICE_ERR_OUT_OF_RANGE;
2524 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2526 cmd = &desc.params.del_rule;
2527 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2528 cmd->rule_id = CPU_TO_LE16(rule_id);
2531 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2533 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2537 * ice_aq_alloc_free_vsi_list
2538 * @hw: pointer to the HW struct
2539 * @vsi_list_id: VSI list ID returned or used for lookup
2540 * @lkup_type: switch rule filter lookup type
2541 * @opc: switch rules population command type - pass in the command opcode
2543 * allocates or free a VSI list resource
2545 static enum ice_status
2546 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2547 enum ice_sw_lkup_type lkup_type,
2548 enum ice_adminq_opc opc)
2550 struct ice_aqc_alloc_free_res_elem *sw_buf;
2551 struct ice_aqc_res_elem *vsi_ele;
2552 enum ice_status status;
2555 buf_len = ice_struct_size(sw_buf, elem, 1);
2556 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2558 return ICE_ERR_NO_MEMORY;
2559 sw_buf->num_elems = CPU_TO_LE16(1);
2561 if (lkup_type == ICE_SW_LKUP_MAC ||
2562 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2563 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2564 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2565 lkup_type == ICE_SW_LKUP_PROMISC ||
2566 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2567 lkup_type == ICE_SW_LKUP_LAST) {
2568 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2569 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2571 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2573 status = ICE_ERR_PARAM;
2574 goto ice_aq_alloc_free_vsi_list_exit;
2577 if (opc == ice_aqc_opc_free_res)
2578 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2580 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2582 goto ice_aq_alloc_free_vsi_list_exit;
2584 if (opc == ice_aqc_opc_alloc_res) {
2585 vsi_ele = &sw_buf->elem[0];
2586 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2589 ice_aq_alloc_free_vsi_list_exit:
2590 ice_free(hw, sw_buf);
2595 * ice_aq_set_storm_ctrl - Sets storm control configuration
2596 * @hw: pointer to the HW struct
2597 * @bcast_thresh: represents the upper threshold for broadcast storm control
2598 * @mcast_thresh: represents the upper threshold for multicast storm control
2599 * @ctl_bitmask: storm control knobs
2601 * Sets the storm control configuration (0x0280)
2604 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2607 struct ice_aqc_storm_cfg *cmd;
2608 struct ice_aq_desc desc;
2610 cmd = &desc.params.storm_conf;
2612 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2614 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2615 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2616 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2618 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2622 * ice_aq_get_storm_ctrl - gets storm control configuration
2623 * @hw: pointer to the HW struct
2624 * @bcast_thresh: represents the upper threshold for broadcast storm control
2625 * @mcast_thresh: represents the upper threshold for multicast storm control
2626 * @ctl_bitmask: storm control knobs
2628 * Gets the storm control configuration (0x0281)
2631 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2634 enum ice_status status;
2635 struct ice_aq_desc desc;
2637 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2639 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2641 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2644 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2647 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2650 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2657 * ice_aq_sw_rules - add/update/remove switch rules
2658 * @hw: pointer to the HW struct
2659 * @rule_list: pointer to switch rule population list
2660 * @rule_list_sz: total size of the rule list in bytes
2661 * @num_rules: number of switch rules in the rule_list
2662 * @opc: switch rules population command type - pass in the command opcode
2663 * @cd: pointer to command details structure or NULL
2665 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2667 static enum ice_status
2668 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2669 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2671 struct ice_aq_desc desc;
2672 enum ice_status status;
2674 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2676 if (opc != ice_aqc_opc_add_sw_rules &&
2677 opc != ice_aqc_opc_update_sw_rules &&
2678 opc != ice_aqc_opc_remove_sw_rules)
2679 return ICE_ERR_PARAM;
2681 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2683 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2684 desc.params.sw_rules.num_rules_fltr_entry_index =
2685 CPU_TO_LE16(num_rules);
2686 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2687 if (opc != ice_aqc_opc_add_sw_rules &&
2688 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2689 status = ICE_ERR_DOES_NOT_EXIST;
2695 * ice_aq_add_recipe - add switch recipe
2696 * @hw: pointer to the HW struct
2697 * @s_recipe_list: pointer to switch rule population list
2698 * @num_recipes: number of switch recipes in the list
2699 * @cd: pointer to command details structure or NULL
2704 ice_aq_add_recipe(struct ice_hw *hw,
2705 struct ice_aqc_recipe_data_elem *s_recipe_list,
2706 u16 num_recipes, struct ice_sq_cd *cd)
2708 struct ice_aqc_add_get_recipe *cmd;
2709 struct ice_aq_desc desc;
2712 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2713 cmd = &desc.params.add_get_recipe;
2714 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2716 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2717 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2719 buf_size = num_recipes * sizeof(*s_recipe_list);
2721 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2725 * ice_aq_get_recipe - get switch recipe
2726 * @hw: pointer to the HW struct
2727 * @s_recipe_list: pointer to switch rule population list
2728 * @num_recipes: pointer to the number of recipes (input and output)
2729 * @recipe_root: root recipe number of recipe(s) to retrieve
2730 * @cd: pointer to command details structure or NULL
2734 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2735 * On output, *num_recipes will equal the number of entries returned in
2738 * The caller must supply enough space in s_recipe_list to hold all possible
2739 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2742 ice_aq_get_recipe(struct ice_hw *hw,
2743 struct ice_aqc_recipe_data_elem *s_recipe_list,
2744 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2746 struct ice_aqc_add_get_recipe *cmd;
2747 struct ice_aq_desc desc;
2748 enum ice_status status;
2751 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2752 return ICE_ERR_PARAM;
2754 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2755 cmd = &desc.params.add_get_recipe;
2756 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2758 cmd->return_index = CPU_TO_LE16(recipe_root);
2759 cmd->num_sub_recipes = 0;
2761 buf_size = *num_recipes * sizeof(*s_recipe_list);
2763 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2764 /* cppcheck-suppress constArgument */
2765 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2771 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2772 * @hw: pointer to the HW struct
2773 * @profile_id: package profile ID to associate the recipe with
2774 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2775 * @cd: pointer to command details structure or NULL
2776 * Recipe to profile association (0x0291)
2779 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2780 struct ice_sq_cd *cd)
2782 struct ice_aqc_recipe_to_profile *cmd;
2783 struct ice_aq_desc desc;
2785 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2786 cmd = &desc.params.recipe_to_profile;
2787 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2788 cmd->profile_id = CPU_TO_LE16(profile_id);
2789 /* Set the recipe ID bit in the bitmask to let the device know which
2790 * profile we are associating the recipe to
2792 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2793 ICE_NONDMA_TO_NONDMA);
2795 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2799 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2800 * @hw: pointer to the HW struct
2801 * @profile_id: package profile ID to associate the recipe with
2802 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2803 * @cd: pointer to command details structure or NULL
2804 * Associate profile ID with given recipe (0x0293)
2807 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2808 struct ice_sq_cd *cd)
2810 struct ice_aqc_recipe_to_profile *cmd;
2811 struct ice_aq_desc desc;
2812 enum ice_status status;
2814 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2815 cmd = &desc.params.recipe_to_profile;
2816 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2817 cmd->profile_id = CPU_TO_LE16(profile_id);
2819 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2821 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2822 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2828 * ice_alloc_recipe - add recipe resource
2829 * @hw: pointer to the hardware structure
2830 * @rid: recipe ID returned as response to AQ call
2832 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2834 struct ice_aqc_alloc_free_res_elem *sw_buf;
2835 enum ice_status status;
2838 buf_len = ice_struct_size(sw_buf, elem, 1);
2839 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2841 return ICE_ERR_NO_MEMORY;
2843 sw_buf->num_elems = CPU_TO_LE16(1);
2844 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2845 ICE_AQC_RES_TYPE_S) |
2846 ICE_AQC_RES_TYPE_FLAG_SHARED);
2847 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2848 ice_aqc_opc_alloc_res, NULL);
2850 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2851 ice_free(hw, sw_buf);
2856 /* ice_init_port_info - Initialize port_info with switch configuration data
2857 * @pi: pointer to port_info
2858 * @vsi_port_num: VSI number or port number
2859 * @type: Type of switch element (port or VSI)
2860 * @swid: switch ID of the switch the element is attached to
2861 * @pf_vf_num: PF or VF number
2862 * @is_vf: true if the element is a VF, false otherwise
2865 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2866 u16 swid, u16 pf_vf_num, bool is_vf)
2869 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2870 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2872 pi->pf_vf_num = pf_vf_num;
2874 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2875 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2878 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2883 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2884 * @hw: pointer to the hardware structure
2886 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2888 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2889 enum ice_status status;
2896 num_total_ports = 1;
2898 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2899 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2902 return ICE_ERR_NO_MEMORY;
2904 /* Multiple calls to ice_aq_get_sw_cfg may be required
2905 * to get all the switch configuration information. The need
2906 * for additional calls is indicated by ice_aq_get_sw_cfg
2907 * writing a non-zero value in req_desc
2910 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2912 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2913 &req_desc, &num_elems, NULL);
2918 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2919 u16 pf_vf_num, swid, vsi_port_num;
2923 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2924 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2926 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2927 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2929 swid = LE16_TO_CPU(ele->swid);
2931 if (LE16_TO_CPU(ele->pf_vf_num) &
2932 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2935 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2936 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2939 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2940 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2941 if (j == num_total_ports) {
2942 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
2943 status = ICE_ERR_CFG;
2946 ice_init_port_info(hw->port_info,
2947 vsi_port_num, res_type, swid,
2955 } while (req_desc && !status);
2963 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2964 * @hw: pointer to the hardware structure
2965 * @fi: filter info structure to fill/update
2967 * This helper function populates the lb_en and lan_en elements of the provided
2968 * ice_fltr_info struct using the switch's type and characteristics of the
2969 * switch rule being configured.
2971 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2973 if ((fi->flag & ICE_FLTR_RX) &&
2974 (fi->fltr_act == ICE_FWD_TO_VSI ||
2975 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2976 fi->lkup_type == ICE_SW_LKUP_LAST)
2980 if ((fi->flag & ICE_FLTR_TX) &&
2981 (fi->fltr_act == ICE_FWD_TO_VSI ||
2982 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2983 fi->fltr_act == ICE_FWD_TO_Q ||
2984 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2985 /* Setting LB for prune actions will result in replicated
2986 * packets to the internal switch that will be dropped.
2988 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2991 /* Set lan_en to TRUE if
2992 * 1. The switch is a VEB AND
2994 * 2.1 The lookup is a directional lookup like ethertype,
2995 * promiscuous, ethertype-MAC, promiscuous-VLAN
2996 * and default-port OR
2997 * 2.2 The lookup is VLAN, OR
2998 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2999 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3003 * The switch is a VEPA.
3005 * In all other cases, the LAN enable has to be set to false.
3008 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3009 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3010 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3011 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3012 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3013 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3014 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3015 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3016 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3017 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3026 * ice_fill_sw_rule - Helper function to fill switch rule structure
3027 * @hw: pointer to the hardware structure
3028 * @f_info: entry containing packet forwarding information
3029 * @s_rule: switch rule structure to be filled in based on mac_entry
3030 * @opc: switch rules population command type - pass in the command opcode
3033 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3034 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3036 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3044 if (opc == ice_aqc_opc_remove_sw_rules) {
3045 s_rule->pdata.lkup_tx_rx.act = 0;
3046 s_rule->pdata.lkup_tx_rx.index =
3047 CPU_TO_LE16(f_info->fltr_rule_id);
3048 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3052 eth_hdr_sz = sizeof(dummy_eth_header);
3053 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3055 /* initialize the ether header with a dummy header */
3056 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3057 ice_fill_sw_info(hw, f_info);
3059 switch (f_info->fltr_act) {
3060 case ICE_FWD_TO_VSI:
3061 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3062 ICE_SINGLE_ACT_VSI_ID_M;
3063 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3064 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3065 ICE_SINGLE_ACT_VALID_BIT;
3067 case ICE_FWD_TO_VSI_LIST:
3068 act |= ICE_SINGLE_ACT_VSI_LIST;
3069 act |= (f_info->fwd_id.vsi_list_id <<
3070 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3071 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3072 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3073 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3074 ICE_SINGLE_ACT_VALID_BIT;
3077 act |= ICE_SINGLE_ACT_TO_Q;
3078 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3079 ICE_SINGLE_ACT_Q_INDEX_M;
3081 case ICE_DROP_PACKET:
3082 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3083 ICE_SINGLE_ACT_VALID_BIT;
3085 case ICE_FWD_TO_QGRP:
3086 q_rgn = f_info->qgrp_size > 0 ?
3087 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3088 act |= ICE_SINGLE_ACT_TO_Q;
3089 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3090 ICE_SINGLE_ACT_Q_INDEX_M;
3091 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3092 ICE_SINGLE_ACT_Q_REGION_M;
3099 act |= ICE_SINGLE_ACT_LB_ENABLE;
3101 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3103 switch (f_info->lkup_type) {
3104 case ICE_SW_LKUP_MAC:
3105 daddr = f_info->l_data.mac.mac_addr;
3107 case ICE_SW_LKUP_VLAN:
3108 vlan_id = f_info->l_data.vlan.vlan_id;
3109 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3110 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3111 act |= ICE_SINGLE_ACT_PRUNE;
3112 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3115 case ICE_SW_LKUP_ETHERTYPE_MAC:
3116 daddr = f_info->l_data.ethertype_mac.mac_addr;
3118 case ICE_SW_LKUP_ETHERTYPE:
3119 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3120 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3122 case ICE_SW_LKUP_MAC_VLAN:
3123 daddr = f_info->l_data.mac_vlan.mac_addr;
3124 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3126 case ICE_SW_LKUP_PROMISC_VLAN:
3127 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3129 case ICE_SW_LKUP_PROMISC:
3130 daddr = f_info->l_data.mac_vlan.mac_addr;
3136 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3137 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3138 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3140 /* Recipe set depending on lookup type */
3141 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3142 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3143 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3146 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3147 ICE_NONDMA_TO_NONDMA);
3149 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3150 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3151 *off = CPU_TO_BE16(vlan_id);
3154 /* Create the switch rule with the final dummy Ethernet header */
3155 if (opc != ice_aqc_opc_update_sw_rules)
3156 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3160 * ice_add_marker_act
3161 * @hw: pointer to the hardware structure
3162 * @m_ent: the management entry for which sw marker needs to be added
3163 * @sw_marker: sw marker to tag the Rx descriptor with
3164 * @l_id: large action resource ID
3166 * Create a large action to hold software marker and update the switch rule
3167 * entry pointed by m_ent with newly created large action
3169 static enum ice_status
3170 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3171 u16 sw_marker, u16 l_id)
3173 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3174 /* For software marker we need 3 large actions
3175 * 1. FWD action: FWD TO VSI or VSI LIST
3176 * 2. GENERIC VALUE action to hold the profile ID
3177 * 3. GENERIC VALUE action to hold the software marker ID
3179 const u16 num_lg_acts = 3;
3180 enum ice_status status;
3186 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3187 return ICE_ERR_PARAM;
3189 /* Create two back-to-back switch rules and submit them to the HW using
3190 * one memory buffer:
3194 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3195 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3196 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3198 return ICE_ERR_NO_MEMORY;
3200 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3202 /* Fill in the first switch rule i.e. large action */
3203 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3204 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3205 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3207 /* First action VSI forwarding or VSI list forwarding depending on how
3210 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3211 m_ent->fltr_info.fwd_id.hw_vsi_id;
3213 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3214 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3215 if (m_ent->vsi_count > 1)
3216 act |= ICE_LG_ACT_VSI_LIST;
3217 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3219 /* Second action descriptor type */
3220 act = ICE_LG_ACT_GENERIC;
3222 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3223 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3225 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3226 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3228 /* Third action Marker value */
3229 act |= ICE_LG_ACT_GENERIC;
3230 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3231 ICE_LG_ACT_GENERIC_VALUE_M;
3233 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3235 /* call the fill switch rule to fill the lookup Tx Rx structure */
3236 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3237 ice_aqc_opc_update_sw_rules);
3239 /* Update the action to point to the large action ID */
3240 rx_tx->pdata.lkup_tx_rx.act =
3241 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3242 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3243 ICE_SINGLE_ACT_PTR_VAL_M));
3245 /* Use the filter rule ID of the previously created rule with single
3246 * act. Once the update happens, hardware will treat this as large
3249 rx_tx->pdata.lkup_tx_rx.index =
3250 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3252 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3253 ice_aqc_opc_update_sw_rules, NULL);
3255 m_ent->lg_act_idx = l_id;
3256 m_ent->sw_marker_id = sw_marker;
3259 ice_free(hw, lg_act);
3264 * ice_add_counter_act - add/update filter rule with counter action
3265 * @hw: pointer to the hardware structure
3266 * @m_ent: the management entry for which counter needs to be added
3267 * @counter_id: VLAN counter ID returned as part of allocate resource
3268 * @l_id: large action resource ID
3270 static enum ice_status
3271 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3272 u16 counter_id, u16 l_id)
3274 struct ice_aqc_sw_rules_elem *lg_act;
3275 struct ice_aqc_sw_rules_elem *rx_tx;
3276 enum ice_status status;
3277 /* 2 actions will be added while adding a large action counter */
3278 const int num_acts = 2;
3285 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3286 return ICE_ERR_PARAM;
3288 /* Create two back-to-back switch rules and submit them to the HW using
3289 * one memory buffer:
3293 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3294 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3295 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3297 return ICE_ERR_NO_MEMORY;
3299 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3301 /* Fill in the first switch rule i.e. large action */
3302 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3303 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3304 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3306 /* First action VSI forwarding or VSI list forwarding depending on how
3309 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3310 m_ent->fltr_info.fwd_id.hw_vsi_id;
3312 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3313 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3314 ICE_LG_ACT_VSI_LIST_ID_M;
3315 if (m_ent->vsi_count > 1)
3316 act |= ICE_LG_ACT_VSI_LIST;
3317 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3319 /* Second action counter ID */
3320 act = ICE_LG_ACT_STAT_COUNT;
3321 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3322 ICE_LG_ACT_STAT_COUNT_M;
3323 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3325 /* call the fill switch rule to fill the lookup Tx Rx structure */
3326 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3327 ice_aqc_opc_update_sw_rules);
3329 act = ICE_SINGLE_ACT_PTR;
3330 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3331 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3333 /* Use the filter rule ID of the previously created rule with single
3334 * act. Once the update happens, hardware will treat this as large
3337 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3338 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3340 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3341 ice_aqc_opc_update_sw_rules, NULL);
3343 m_ent->lg_act_idx = l_id;
3344 m_ent->counter_index = counter_id;
3347 ice_free(hw, lg_act);
3352 * ice_create_vsi_list_map
3353 * @hw: pointer to the hardware structure
3354 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3355 * @num_vsi: number of VSI handles in the array
3356 * @vsi_list_id: VSI list ID generated as part of allocate resource
3358 * Helper function to create a new entry of VSI list ID to VSI mapping
3359 * using the given VSI list ID
3361 static struct ice_vsi_list_map_info *
3362 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3365 struct ice_switch_info *sw = hw->switch_info;
3366 struct ice_vsi_list_map_info *v_map;
3369 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
3374 v_map->vsi_list_id = vsi_list_id;
3376 for (i = 0; i < num_vsi; i++)
3377 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3379 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3384 * ice_update_vsi_list_rule
3385 * @hw: pointer to the hardware structure
3386 * @vsi_handle_arr: array of VSI handles to form a VSI list
3387 * @num_vsi: number of VSI handles in the array
3388 * @vsi_list_id: VSI list ID generated as part of allocate resource
3389 * @remove: Boolean value to indicate if this is a remove action
3390 * @opc: switch rules population command type - pass in the command opcode
3391 * @lkup_type: lookup type of the filter
3393 * Call AQ command to add a new switch rule or update existing switch rule
3394 * using the given VSI list ID
3396 static enum ice_status
3397 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3398 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3399 enum ice_sw_lkup_type lkup_type)
3401 struct ice_aqc_sw_rules_elem *s_rule;
3402 enum ice_status status;
3408 return ICE_ERR_PARAM;
3410 if (lkup_type == ICE_SW_LKUP_MAC ||
3411 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3412 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3413 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3414 lkup_type == ICE_SW_LKUP_PROMISC ||
3415 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3416 lkup_type == ICE_SW_LKUP_LAST)
3417 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3418 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3419 else if (lkup_type == ICE_SW_LKUP_VLAN)
3420 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3421 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3423 return ICE_ERR_PARAM;
3425 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3426 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3428 return ICE_ERR_NO_MEMORY;
3429 for (i = 0; i < num_vsi; i++) {
3430 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3431 status = ICE_ERR_PARAM;
3434 /* AQ call requires hw_vsi_id(s) */
3435 s_rule->pdata.vsi_list.vsi[i] =
3436 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3439 s_rule->type = CPU_TO_LE16(rule_type);
3440 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3441 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3443 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3446 ice_free(hw, s_rule);
3451 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3452 * @hw: pointer to the HW struct
3453 * @vsi_handle_arr: array of VSI handles to form a VSI list
3454 * @num_vsi: number of VSI handles in the array
3455 * @vsi_list_id: stores the ID of the VSI list to be created
3456 * @lkup_type: switch rule filter's lookup type
3458 static enum ice_status
3459 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3460 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3462 enum ice_status status;
3464 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3465 ice_aqc_opc_alloc_res);
3469 /* Update the newly created VSI list to include the specified VSIs */
3470 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3471 *vsi_list_id, false,
3472 ice_aqc_opc_add_sw_rules, lkup_type);
3476 * ice_create_pkt_fwd_rule
3477 * @hw: pointer to the hardware structure
3478 * @recp_list: corresponding filter management list
3479 * @f_entry: entry containing packet forwarding information
3481 * Create switch rule with given filter information and add an entry
3482 * to the corresponding filter management list to track this switch rule
3485 static enum ice_status
3486 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3487 struct ice_fltr_list_entry *f_entry)
3489 struct ice_fltr_mgmt_list_entry *fm_entry;
3490 struct ice_aqc_sw_rules_elem *s_rule;
3491 enum ice_status status;
3493 s_rule = (struct ice_aqc_sw_rules_elem *)
3494 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3496 return ICE_ERR_NO_MEMORY;
3497 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3498 ice_malloc(hw, sizeof(*fm_entry));
3500 status = ICE_ERR_NO_MEMORY;
3501 goto ice_create_pkt_fwd_rule_exit;
3504 fm_entry->fltr_info = f_entry->fltr_info;
3506 /* Initialize all the fields for the management entry */
3507 fm_entry->vsi_count = 1;
3508 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3509 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3510 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3512 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3513 ice_aqc_opc_add_sw_rules);
3515 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3516 ice_aqc_opc_add_sw_rules, NULL);
3518 ice_free(hw, fm_entry);
3519 goto ice_create_pkt_fwd_rule_exit;
3522 f_entry->fltr_info.fltr_rule_id =
3523 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3524 fm_entry->fltr_info.fltr_rule_id =
3525 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3527 /* The book keeping entries will get removed when base driver
3528 * calls remove filter AQ command
3530 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3532 ice_create_pkt_fwd_rule_exit:
3533 ice_free(hw, s_rule);
3538 * ice_update_pkt_fwd_rule
3539 * @hw: pointer to the hardware structure
3540 * @f_info: filter information for switch rule
3542 * Call AQ command to update a previously created switch rule with a
3545 static enum ice_status
3546 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3548 struct ice_aqc_sw_rules_elem *s_rule;
3549 enum ice_status status;
3551 s_rule = (struct ice_aqc_sw_rules_elem *)
3552 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3554 return ICE_ERR_NO_MEMORY;
3556 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3558 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3560 /* Update switch rule with new rule set to forward VSI list */
3561 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3562 ice_aqc_opc_update_sw_rules, NULL);
3564 ice_free(hw, s_rule);
3569 * ice_update_sw_rule_bridge_mode
3570 * @hw: pointer to the HW struct
3572 * Updates unicast switch filter rules based on VEB/VEPA mode
3574 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3576 struct ice_switch_info *sw = hw->switch_info;
3577 struct ice_fltr_mgmt_list_entry *fm_entry;
3578 enum ice_status status = ICE_SUCCESS;
3579 struct LIST_HEAD_TYPE *rule_head;
3580 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3582 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3583 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3585 ice_acquire_lock(rule_lock);
3586 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3588 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3589 u8 *addr = fi->l_data.mac.mac_addr;
3591 /* Update unicast Tx rules to reflect the selected
3594 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3595 (fi->fltr_act == ICE_FWD_TO_VSI ||
3596 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3597 fi->fltr_act == ICE_FWD_TO_Q ||
3598 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3599 status = ice_update_pkt_fwd_rule(hw, fi);
3605 ice_release_lock(rule_lock);
3611 * ice_add_update_vsi_list
3612 * @hw: pointer to the hardware structure
3613 * @m_entry: pointer to current filter management list entry
3614 * @cur_fltr: filter information from the book keeping entry
3615 * @new_fltr: filter information with the new VSI to be added
3617 * Call AQ command to add or update previously created VSI list with new VSI.
3619 * Helper function to do book keeping associated with adding filter information
3620 * The algorithm to do the book keeping is described below :
3621 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3622 * if only one VSI has been added till now
3623 * Allocate a new VSI list and add two VSIs
3624 * to this list using switch rule command
3625 * Update the previously created switch rule with the
3626 * newly created VSI list ID
3627 * if a VSI list was previously created
3628 * Add the new VSI to the previously created VSI list set
3629 * using the update switch rule command
3631 static enum ice_status
3632 ice_add_update_vsi_list(struct ice_hw *hw,
3633 struct ice_fltr_mgmt_list_entry *m_entry,
3634 struct ice_fltr_info *cur_fltr,
3635 struct ice_fltr_info *new_fltr)
3637 enum ice_status status = ICE_SUCCESS;
3638 u16 vsi_list_id = 0;
3640 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3641 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3642 return ICE_ERR_NOT_IMPL;
3644 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3645 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3646 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3647 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3648 return ICE_ERR_NOT_IMPL;
3650 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3651 /* Only one entry existed in the mapping and it was not already
3652 * a part of a VSI list. So, create a VSI list with the old and
3655 struct ice_fltr_info tmp_fltr;
3656 u16 vsi_handle_arr[2];
3658 /* A rule already exists with the new VSI being added */
3659 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3660 return ICE_ERR_ALREADY_EXISTS;
3662 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3663 vsi_handle_arr[1] = new_fltr->vsi_handle;
3664 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3666 new_fltr->lkup_type);
3670 tmp_fltr = *new_fltr;
3671 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3672 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3673 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3674 /* Update the previous switch rule of "MAC forward to VSI" to
3675 * "MAC fwd to VSI list"
3677 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3681 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3682 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3683 m_entry->vsi_list_info =
3684 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3687 /* If this entry was large action then the large action needs
3688 * to be updated to point to FWD to VSI list
3690 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3692 ice_add_marker_act(hw, m_entry,
3693 m_entry->sw_marker_id,
3694 m_entry->lg_act_idx);
3696 u16 vsi_handle = new_fltr->vsi_handle;
3697 enum ice_adminq_opc opcode;
3699 if (!m_entry->vsi_list_info)
3702 /* A rule already exists with the new VSI being added */
3703 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3706 /* Update the previously created VSI list set with
3707 * the new VSI ID passed in
3709 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3710 opcode = ice_aqc_opc_update_sw_rules;
3712 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3713 vsi_list_id, false, opcode,
3714 new_fltr->lkup_type);
3715 /* update VSI list mapping info with new VSI ID */
3717 ice_set_bit(vsi_handle,
3718 m_entry->vsi_list_info->vsi_map);
3721 m_entry->vsi_count++;
3726 * ice_find_rule_entry - Search a rule entry
3727 * @list_head: head of rule list
3728 * @f_info: rule information
3730 * Helper function to search for a given rule entry
3731 * Returns pointer to entry storing the rule if found
3733 static struct ice_fltr_mgmt_list_entry *
3734 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3735 struct ice_fltr_info *f_info)
3737 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3739 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3741 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3742 sizeof(f_info->l_data)) &&
3743 f_info->flag == list_itr->fltr_info.flag) {
3752 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3753 * @recp_list: VSI lists needs to be searched
3754 * @vsi_handle: VSI handle to be found in VSI list
3755 * @vsi_list_id: VSI list ID found containing vsi_handle
3757 * Helper function to search a VSI list with single entry containing given VSI
3758 * handle element. This can be extended further to search VSI list with more
3759 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3761 static struct ice_vsi_list_map_info *
3762 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3765 struct ice_vsi_list_map_info *map_info = NULL;
3766 struct LIST_HEAD_TYPE *list_head;
3768 list_head = &recp_list->filt_rules;
3769 if (recp_list->adv_rule) {
3770 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3772 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3773 ice_adv_fltr_mgmt_list_entry,
3775 if (list_itr->vsi_list_info) {
3776 map_info = list_itr->vsi_list_info;
3777 if (ice_is_bit_set(map_info->vsi_map,
3779 *vsi_list_id = map_info->vsi_list_id;
3785 struct ice_fltr_mgmt_list_entry *list_itr;
3787 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3788 ice_fltr_mgmt_list_entry,
3790 if (list_itr->vsi_count == 1 &&
3791 list_itr->vsi_list_info) {
3792 map_info = list_itr->vsi_list_info;
3793 if (ice_is_bit_set(map_info->vsi_map,
3795 *vsi_list_id = map_info->vsi_list_id;
3805 * ice_add_rule_internal - add rule for a given lookup type
3806 * @hw: pointer to the hardware structure
3807 * @recp_list: recipe list for which rule has to be added
3808 * @lport: logic port number on which function add rule
3809 * @f_entry: structure containing MAC forwarding information
3811 * Adds or updates the rule lists for a given recipe
3813 static enum ice_status
3814 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3815 u8 lport, struct ice_fltr_list_entry *f_entry)
3817 struct ice_fltr_info *new_fltr, *cur_fltr;
3818 struct ice_fltr_mgmt_list_entry *m_entry;
3819 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3820 enum ice_status status = ICE_SUCCESS;
3822 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3823 return ICE_ERR_PARAM;
3825 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3826 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3827 f_entry->fltr_info.fwd_id.hw_vsi_id =
3828 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3830 rule_lock = &recp_list->filt_rule_lock;
3832 ice_acquire_lock(rule_lock);
3833 new_fltr = &f_entry->fltr_info;
3834 if (new_fltr->flag & ICE_FLTR_RX)
3835 new_fltr->src = lport;
3836 else if (new_fltr->flag & ICE_FLTR_TX)
3838 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3840 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3842 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3843 goto exit_add_rule_internal;
3846 cur_fltr = &m_entry->fltr_info;
3847 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3849 exit_add_rule_internal:
3850 ice_release_lock(rule_lock);
3855 * ice_remove_vsi_list_rule
3856 * @hw: pointer to the hardware structure
3857 * @vsi_list_id: VSI list ID generated as part of allocate resource
3858 * @lkup_type: switch rule filter lookup type
3860 * The VSI list should be emptied before this function is called to remove the
3863 static enum ice_status
3864 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3865 enum ice_sw_lkup_type lkup_type)
3867 /* Free the vsi_list resource that we allocated. It is assumed that the
3868 * list is empty at this point.
3870 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3871 ice_aqc_opc_free_res);
3875 * ice_rem_update_vsi_list
3876 * @hw: pointer to the hardware structure
3877 * @vsi_handle: VSI handle of the VSI to remove
3878 * @fm_list: filter management entry for which the VSI list management needs to
3881 static enum ice_status
3882 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3883 struct ice_fltr_mgmt_list_entry *fm_list)
3885 enum ice_sw_lkup_type lkup_type;
3886 enum ice_status status = ICE_SUCCESS;
3889 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3890 fm_list->vsi_count == 0)
3891 return ICE_ERR_PARAM;
3893 /* A rule with the VSI being removed does not exist */
3894 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3895 return ICE_ERR_DOES_NOT_EXIST;
3897 lkup_type = fm_list->fltr_info.lkup_type;
3898 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3899 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3900 ice_aqc_opc_update_sw_rules,
3905 fm_list->vsi_count--;
3906 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3908 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3909 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3910 struct ice_vsi_list_map_info *vsi_list_info =
3911 fm_list->vsi_list_info;
3914 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3916 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3917 return ICE_ERR_OUT_OF_RANGE;
3919 /* Make sure VSI list is empty before removing it below */
3920 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3922 ice_aqc_opc_update_sw_rules,
3927 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3928 tmp_fltr_info.fwd_id.hw_vsi_id =
3929 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3930 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3931 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3933 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3934 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3938 fm_list->fltr_info = tmp_fltr_info;
3941 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3942 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3943 struct ice_vsi_list_map_info *vsi_list_info =
3944 fm_list->vsi_list_info;
3946 /* Remove the VSI list since it is no longer used */
3947 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3949 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3950 vsi_list_id, status);
3954 LIST_DEL(&vsi_list_info->list_entry);
3955 ice_free(hw, vsi_list_info);
3956 fm_list->vsi_list_info = NULL;
3963 * ice_remove_rule_internal - Remove a filter rule of a given type
3965 * @hw: pointer to the hardware structure
3966 * @recp_list: recipe list for which the rule needs to removed
3967 * @f_entry: rule entry containing filter information
3969 static enum ice_status
3970 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3971 struct ice_fltr_list_entry *f_entry)
3973 struct ice_fltr_mgmt_list_entry *list_elem;
3974 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3975 enum ice_status status = ICE_SUCCESS;
3976 bool remove_rule = false;
3979 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3980 return ICE_ERR_PARAM;
3981 f_entry->fltr_info.fwd_id.hw_vsi_id =
3982 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3984 rule_lock = &recp_list->filt_rule_lock;
3985 ice_acquire_lock(rule_lock);
3986 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3987 &f_entry->fltr_info);
3989 status = ICE_ERR_DOES_NOT_EXIST;
3993 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3995 } else if (!list_elem->vsi_list_info) {
3996 status = ICE_ERR_DOES_NOT_EXIST;
3998 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3999 /* a ref_cnt > 1 indicates that the vsi_list is being
4000 * shared by multiple rules. Decrement the ref_cnt and
4001 * remove this rule, but do not modify the list, as it
4002 * is in-use by other rules.
4004 list_elem->vsi_list_info->ref_cnt--;
4007 /* a ref_cnt of 1 indicates the vsi_list is only used
4008 * by one rule. However, the original removal request is only
4009 * for a single VSI. Update the vsi_list first, and only
4010 * remove the rule if there are no further VSIs in this list.
4012 vsi_handle = f_entry->fltr_info.vsi_handle;
4013 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4016 /* if VSI count goes to zero after updating the VSI list */
4017 if (list_elem->vsi_count == 0)
4022 /* Remove the lookup rule */
4023 struct ice_aqc_sw_rules_elem *s_rule;
4025 s_rule = (struct ice_aqc_sw_rules_elem *)
4026 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4028 status = ICE_ERR_NO_MEMORY;
4032 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4033 ice_aqc_opc_remove_sw_rules);
4035 status = ice_aq_sw_rules(hw, s_rule,
4036 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4037 ice_aqc_opc_remove_sw_rules, NULL);
4039 /* Remove a book keeping from the list */
4040 ice_free(hw, s_rule);
4045 LIST_DEL(&list_elem->list_entry);
4046 ice_free(hw, list_elem);
4049 ice_release_lock(rule_lock);
4054 * ice_aq_get_res_alloc - get allocated resources
4055 * @hw: pointer to the HW struct
4056 * @num_entries: pointer to u16 to store the number of resource entries returned
4057 * @buf: pointer to buffer
4058 * @buf_size: size of buf
4059 * @cd: pointer to command details structure or NULL
4061 * The caller-supplied buffer must be large enough to store the resource
4062 * information for all resource types. Each resource type is an
4063 * ice_aqc_get_res_resp_elem structure.
4066 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4067 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4068 struct ice_sq_cd *cd)
4070 struct ice_aqc_get_res_alloc *resp;
4071 enum ice_status status;
4072 struct ice_aq_desc desc;
4075 return ICE_ERR_BAD_PTR;
4077 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4078 return ICE_ERR_INVAL_SIZE;
4080 resp = &desc.params.get_res;
4082 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4083 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4085 if (!status && num_entries)
4086 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4092 * ice_aq_get_res_descs - get allocated resource descriptors
4093 * @hw: pointer to the hardware structure
4094 * @num_entries: number of resource entries in buffer
4095 * @buf: structure to hold response data buffer
4096 * @buf_size: size of buffer
4097 * @res_type: resource type
4098 * @res_shared: is resource shared
4099 * @desc_id: input - first desc ID to start; output - next desc ID
4100 * @cd: pointer to command details structure or NULL
4103 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4104 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4105 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4107 struct ice_aqc_get_allocd_res_desc *cmd;
4108 struct ice_aq_desc desc;
4109 enum ice_status status;
4111 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4113 cmd = &desc.params.get_res_desc;
4116 return ICE_ERR_PARAM;
4118 if (buf_size != (num_entries * sizeof(*buf)))
4119 return ICE_ERR_PARAM;
4121 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4123 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4124 ICE_AQC_RES_TYPE_M) | (res_shared ?
4125 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4126 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4128 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4130 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4136 * ice_add_mac_rule - Add a MAC address based filter rule
4137 * @hw: pointer to the hardware structure
4138 * @m_list: list of MAC addresses and forwarding information
4139 * @sw: pointer to switch info struct for which function add rule
4140 * @lport: logic port number on which function add rule
4142 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4143 * multiple unicast addresses, the function assumes that all the
4144 * addresses are unique in a given add_mac call. It doesn't
4145 * check for duplicates in this case, removing duplicates from a given
4146 * list should be taken care of in the caller of this function.
4148 static enum ice_status
4149 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4150 struct ice_switch_info *sw, u8 lport)
4152 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4153 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4154 struct ice_fltr_list_entry *m_list_itr;
4155 struct LIST_HEAD_TYPE *rule_head;
4156 u16 total_elem_left, s_rule_size;
4157 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4158 enum ice_status status = ICE_SUCCESS;
4159 u16 num_unicast = 0;
4163 rule_lock = &recp_list->filt_rule_lock;
4164 rule_head = &recp_list->filt_rules;
4166 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4168 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4172 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4173 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4174 if (!ice_is_vsi_valid(hw, vsi_handle))
4175 return ICE_ERR_PARAM;
4176 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4177 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4178 /* update the src in case it is VSI num */
4179 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4180 return ICE_ERR_PARAM;
4181 m_list_itr->fltr_info.src = hw_vsi_id;
4182 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4183 IS_ZERO_ETHER_ADDR(add))
4184 return ICE_ERR_PARAM;
4185 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4186 /* Don't overwrite the unicast address */
4187 ice_acquire_lock(rule_lock);
4188 if (ice_find_rule_entry(rule_head,
4189 &m_list_itr->fltr_info)) {
4190 ice_release_lock(rule_lock);
4191 return ICE_ERR_ALREADY_EXISTS;
4193 ice_release_lock(rule_lock);
4195 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4196 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4197 m_list_itr->status =
4198 ice_add_rule_internal(hw, recp_list, lport,
4200 if (m_list_itr->status)
4201 return m_list_itr->status;
4205 ice_acquire_lock(rule_lock);
4206 /* Exit if no suitable entries were found for adding bulk switch rule */
4208 status = ICE_SUCCESS;
4209 goto ice_add_mac_exit;
4212 /* Allocate switch rule buffer for the bulk update for unicast */
4213 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4214 s_rule = (struct ice_aqc_sw_rules_elem *)
4215 ice_calloc(hw, num_unicast, s_rule_size);
4217 status = ICE_ERR_NO_MEMORY;
4218 goto ice_add_mac_exit;
4222 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4224 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4225 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4227 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4228 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4229 ice_aqc_opc_add_sw_rules);
4230 r_iter = (struct ice_aqc_sw_rules_elem *)
4231 ((u8 *)r_iter + s_rule_size);
4235 /* Call AQ bulk switch rule update for all unicast addresses */
4237 /* Call AQ switch rule in AQ_MAX chunk */
4238 for (total_elem_left = num_unicast; total_elem_left > 0;
4239 total_elem_left -= elem_sent) {
4240 struct ice_aqc_sw_rules_elem *entry = r_iter;
4242 elem_sent = MIN_T(u8, total_elem_left,
4243 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4244 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4245 elem_sent, ice_aqc_opc_add_sw_rules,
4248 goto ice_add_mac_exit;
4249 r_iter = (struct ice_aqc_sw_rules_elem *)
4250 ((u8 *)r_iter + (elem_sent * s_rule_size));
4253 /* Fill up rule ID based on the value returned from FW */
4255 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4257 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4258 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4259 struct ice_fltr_mgmt_list_entry *fm_entry;
4261 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4262 f_info->fltr_rule_id =
4263 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4264 f_info->fltr_act = ICE_FWD_TO_VSI;
4265 /* Create an entry to track this MAC address */
4266 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4267 ice_malloc(hw, sizeof(*fm_entry));
4269 status = ICE_ERR_NO_MEMORY;
4270 goto ice_add_mac_exit;
4272 fm_entry->fltr_info = *f_info;
4273 fm_entry->vsi_count = 1;
4274 /* The book keeping entries will get removed when
4275 * base driver calls remove filter AQ command
4278 LIST_ADD(&fm_entry->list_entry, rule_head);
4279 r_iter = (struct ice_aqc_sw_rules_elem *)
4280 ((u8 *)r_iter + s_rule_size);
4285 ice_release_lock(rule_lock);
4287 ice_free(hw, s_rule);
4292 * ice_add_mac - Add a MAC address based filter rule
4293 * @hw: pointer to the hardware structure
4294 * @m_list: list of MAC addresses and forwarding information
4296 * Function add MAC rule for logical port from HW struct
4298 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4301 return ICE_ERR_PARAM;
4303 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4304 hw->port_info->lport);
4308 * ice_add_vlan_internal - Add one VLAN based filter rule
4309 * @hw: pointer to the hardware structure
4310 * @recp_list: recipe list for which rule has to be added
4311 * @f_entry: filter entry containing one VLAN information
4313 static enum ice_status
4314 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4315 struct ice_fltr_list_entry *f_entry)
4317 struct ice_fltr_mgmt_list_entry *v_list_itr;
4318 struct ice_fltr_info *new_fltr, *cur_fltr;
4319 enum ice_sw_lkup_type lkup_type;
4320 u16 vsi_list_id = 0, vsi_handle;
4321 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4322 enum ice_status status = ICE_SUCCESS;
4324 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4325 return ICE_ERR_PARAM;
4327 f_entry->fltr_info.fwd_id.hw_vsi_id =
4328 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4329 new_fltr = &f_entry->fltr_info;
4331 /* VLAN ID should only be 12 bits */
4332 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4333 return ICE_ERR_PARAM;
4335 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4336 return ICE_ERR_PARAM;
4338 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4339 lkup_type = new_fltr->lkup_type;
4340 vsi_handle = new_fltr->vsi_handle;
4341 rule_lock = &recp_list->filt_rule_lock;
4342 ice_acquire_lock(rule_lock);
4343 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4345 struct ice_vsi_list_map_info *map_info = NULL;
4347 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4348 /* All VLAN pruning rules use a VSI list. Check if
4349 * there is already a VSI list containing VSI that we
4350 * want to add. If found, use the same vsi_list_id for
4351 * this new VLAN rule or else create a new list.
4353 map_info = ice_find_vsi_list_entry(recp_list,
4357 status = ice_create_vsi_list_rule(hw,
4365 /* Convert the action to forwarding to a VSI list. */
4366 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4367 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4370 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4372 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4375 status = ICE_ERR_DOES_NOT_EXIST;
4378 /* reuse VSI list for new rule and increment ref_cnt */
4380 v_list_itr->vsi_list_info = map_info;
4381 map_info->ref_cnt++;
4383 v_list_itr->vsi_list_info =
4384 ice_create_vsi_list_map(hw, &vsi_handle,
4388 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4389 /* Update existing VSI list to add new VSI ID only if it used
4392 cur_fltr = &v_list_itr->fltr_info;
4393 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4396 /* If VLAN rule exists and VSI list being used by this rule is
4397 * referenced by more than 1 VLAN rule. Then create a new VSI
4398 * list appending previous VSI with new VSI and update existing
4399 * VLAN rule to point to new VSI list ID
4401 struct ice_fltr_info tmp_fltr;
4402 u16 vsi_handle_arr[2];
4405 /* Current implementation only supports reusing VSI list with
4406 * one VSI count. We should never hit below condition
4408 if (v_list_itr->vsi_count > 1 &&
4409 v_list_itr->vsi_list_info->ref_cnt > 1) {
4410 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4411 status = ICE_ERR_CFG;
4416 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4419 /* A rule already exists with the new VSI being added */
4420 if (cur_handle == vsi_handle) {
4421 status = ICE_ERR_ALREADY_EXISTS;
4425 vsi_handle_arr[0] = cur_handle;
4426 vsi_handle_arr[1] = vsi_handle;
4427 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4428 &vsi_list_id, lkup_type);
4432 tmp_fltr = v_list_itr->fltr_info;
4433 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4434 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4435 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4436 /* Update the previous switch rule to a new VSI list which
4437 * includes current VSI that is requested
4439 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4443 /* before overriding VSI list map info. decrement ref_cnt of
4446 v_list_itr->vsi_list_info->ref_cnt--;
4448 /* now update to newly created list */
4449 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4450 v_list_itr->vsi_list_info =
4451 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4453 v_list_itr->vsi_count++;
4457 ice_release_lock(rule_lock);
4462 * ice_add_vlan_rule - Add VLAN based filter rule
4463 * @hw: pointer to the hardware structure
4464 * @v_list: list of VLAN entries and forwarding information
4465 * @sw: pointer to switch info struct for which function add rule
4467 static enum ice_status
4468 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4469 struct ice_switch_info *sw)
4471 struct ice_fltr_list_entry *v_list_itr;
4472 struct ice_sw_recipe *recp_list;
4474 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4475 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4477 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4478 return ICE_ERR_PARAM;
4479 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4480 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4482 if (v_list_itr->status)
4483 return v_list_itr->status;
4489 * ice_add_vlan - Add a VLAN based filter rule
4490 * @hw: pointer to the hardware structure
4491 * @v_list: list of VLAN and forwarding information
4493 * Function add VLAN rule for logical port from HW struct
4495 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4498 return ICE_ERR_PARAM;
4500 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4504 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4505 * @hw: pointer to the hardware structure
4506 * @mv_list: list of MAC and VLAN filters
4507 * @sw: pointer to switch info struct for which function add rule
4508 * @lport: logic port number on which function add rule
4510 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4511 * pruning bits enabled, then it is the responsibility of the caller to make
4512 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4513 * VLAN won't be received on that VSI otherwise.
4515 static enum ice_status
4516 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4517 struct ice_switch_info *sw, u8 lport)
4519 struct ice_fltr_list_entry *mv_list_itr;
4520 struct ice_sw_recipe *recp_list;
4522 if (!mv_list || !hw)
4523 return ICE_ERR_PARAM;
4525 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4526 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4528 enum ice_sw_lkup_type l_type =
4529 mv_list_itr->fltr_info.lkup_type;
4531 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4532 return ICE_ERR_PARAM;
4533 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4534 mv_list_itr->status =
4535 ice_add_rule_internal(hw, recp_list, lport,
4537 if (mv_list_itr->status)
4538 return mv_list_itr->status;
4544 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4545 * @hw: pointer to the hardware structure
4546 * @mv_list: list of MAC VLAN addresses and forwarding information
4548 * Function add MAC VLAN rule for logical port from HW struct
4551 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4553 if (!mv_list || !hw)
4554 return ICE_ERR_PARAM;
4556 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4557 hw->port_info->lport);
4561 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4562 * @hw: pointer to the hardware structure
4563 * @em_list: list of ether type MAC filter, MAC is optional
4564 * @sw: pointer to switch info struct for which function add rule
4565 * @lport: logic port number on which function add rule
4567 * This function requires the caller to populate the entries in
4568 * the filter list with the necessary fields (including flags to
4569 * indicate Tx or Rx rules).
4571 static enum ice_status
4572 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4573 struct ice_switch_info *sw, u8 lport)
4575 struct ice_fltr_list_entry *em_list_itr;
4577 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4579 struct ice_sw_recipe *recp_list;
4580 enum ice_sw_lkup_type l_type;
4582 l_type = em_list_itr->fltr_info.lkup_type;
4583 recp_list = &sw->recp_list[l_type];
4585 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4586 l_type != ICE_SW_LKUP_ETHERTYPE)
4587 return ICE_ERR_PARAM;
4589 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4592 if (em_list_itr->status)
4593 return em_list_itr->status;
4599 * ice_add_eth_mac - Add a ethertype based filter rule
4600 * @hw: pointer to the hardware structure
4601 * @em_list: list of ethertype and forwarding information
4603 * Function add ethertype rule for logical port from HW struct
4606 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4608 if (!em_list || !hw)
4609 return ICE_ERR_PARAM;
4611 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4612 hw->port_info->lport);
4616 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4617 * @hw: pointer to the hardware structure
4618 * @em_list: list of ethertype or ethertype MAC entries
4619 * @sw: pointer to switch info struct for which function add rule
4621 static enum ice_status
4622 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4623 struct ice_switch_info *sw)
4625 struct ice_fltr_list_entry *em_list_itr, *tmp;
4627 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4629 struct ice_sw_recipe *recp_list;
4630 enum ice_sw_lkup_type l_type;
4632 l_type = em_list_itr->fltr_info.lkup_type;
4634 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4635 l_type != ICE_SW_LKUP_ETHERTYPE)
4636 return ICE_ERR_PARAM;
4638 recp_list = &sw->recp_list[l_type];
4639 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4641 if (em_list_itr->status)
4642 return em_list_itr->status;
4648 * ice_remove_eth_mac - remove a ethertype based filter rule
4649 * @hw: pointer to the hardware structure
4650 * @em_list: list of ethertype and forwarding information
4654 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4656 if (!em_list || !hw)
4657 return ICE_ERR_PARAM;
4659 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4663 * ice_rem_sw_rule_info
4664 * @hw: pointer to the hardware structure
4665 * @rule_head: pointer to the switch list structure that we want to delete
4668 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4670 if (!LIST_EMPTY(rule_head)) {
4671 struct ice_fltr_mgmt_list_entry *entry;
4672 struct ice_fltr_mgmt_list_entry *tmp;
4674 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4675 ice_fltr_mgmt_list_entry, list_entry) {
4676 LIST_DEL(&entry->list_entry);
4677 ice_free(hw, entry);
4683 * ice_rem_adv_rule_info
4684 * @hw: pointer to the hardware structure
4685 * @rule_head: pointer to the switch list structure that we want to delete
4688 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4690 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4691 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4693 if (LIST_EMPTY(rule_head))
4696 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4697 ice_adv_fltr_mgmt_list_entry, list_entry) {
4698 LIST_DEL(&lst_itr->list_entry);
4699 ice_free(hw, lst_itr->lkups);
4700 ice_free(hw, lst_itr);
4705 * ice_rem_all_sw_rules_info
4706 * @hw: pointer to the hardware structure
4708 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4710 struct ice_switch_info *sw = hw->switch_info;
4713 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4714 struct LIST_HEAD_TYPE *rule_head;
4716 rule_head = &sw->recp_list[i].filt_rules;
4717 if (!sw->recp_list[i].adv_rule)
4718 ice_rem_sw_rule_info(hw, rule_head);
4720 ice_rem_adv_rule_info(hw, rule_head);
4721 if (sw->recp_list[i].adv_rule &&
4722 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4723 sw->recp_list[i].adv_rule = false;
4728 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4729 * @pi: pointer to the port_info structure
4730 * @vsi_handle: VSI handle to set as default
4731 * @set: true to add the above mentioned switch rule, false to remove it
4732 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4734 * add filter rule to set/unset given VSI as default VSI for the switch
4735 * (represented by swid)
4738 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4741 struct ice_aqc_sw_rules_elem *s_rule;
4742 struct ice_fltr_info f_info;
4743 struct ice_hw *hw = pi->hw;
4744 enum ice_adminq_opc opcode;
4745 enum ice_status status;
4749 if (!ice_is_vsi_valid(hw, vsi_handle))
4750 return ICE_ERR_PARAM;
4751 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4753 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4754 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4756 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4758 return ICE_ERR_NO_MEMORY;
4760 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4762 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4763 f_info.flag = direction;
4764 f_info.fltr_act = ICE_FWD_TO_VSI;
4765 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4767 if (f_info.flag & ICE_FLTR_RX) {
4768 f_info.src = pi->lport;
4769 f_info.src_id = ICE_SRC_ID_LPORT;
4771 f_info.fltr_rule_id =
4772 pi->dflt_rx_vsi_rule_id;
4773 } else if (f_info.flag & ICE_FLTR_TX) {
4774 f_info.src_id = ICE_SRC_ID_VSI;
4775 f_info.src = hw_vsi_id;
4777 f_info.fltr_rule_id =
4778 pi->dflt_tx_vsi_rule_id;
4782 opcode = ice_aqc_opc_add_sw_rules;
4784 opcode = ice_aqc_opc_remove_sw_rules;
4786 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4788 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4789 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4792 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4794 if (f_info.flag & ICE_FLTR_TX) {
4795 pi->dflt_tx_vsi_num = hw_vsi_id;
4796 pi->dflt_tx_vsi_rule_id = index;
4797 } else if (f_info.flag & ICE_FLTR_RX) {
4798 pi->dflt_rx_vsi_num = hw_vsi_id;
4799 pi->dflt_rx_vsi_rule_id = index;
4802 if (f_info.flag & ICE_FLTR_TX) {
4803 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4804 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4805 } else if (f_info.flag & ICE_FLTR_RX) {
4806 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4807 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4812 ice_free(hw, s_rule);
4817 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4818 * @list_head: head of rule list
4819 * @f_info: rule information
4821 * Helper function to search for a unicast rule entry - this is to be used
4822 * to remove unicast MAC filter that is not shared with other VSIs on the
4825 * Returns pointer to entry storing the rule if found
4827 static struct ice_fltr_mgmt_list_entry *
4828 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4829 struct ice_fltr_info *f_info)
4831 struct ice_fltr_mgmt_list_entry *list_itr;
4833 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4835 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4836 sizeof(f_info->l_data)) &&
4837 f_info->fwd_id.hw_vsi_id ==
4838 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4839 f_info->flag == list_itr->fltr_info.flag)
4846 * ice_remove_mac_rule - remove a MAC based filter rule
4847 * @hw: pointer to the hardware structure
4848 * @m_list: list of MAC addresses and forwarding information
4849 * @recp_list: list from which function remove MAC address
4851 * This function removes either a MAC filter rule or a specific VSI from a
4852 * VSI list for a multicast MAC address.
4854 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4855 * ice_add_mac. Caller should be aware that this call will only work if all
4856 * the entries passed into m_list were added previously. It will not attempt to
4857 * do a partial remove of entries that were found.
4859 static enum ice_status
4860 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4861 struct ice_sw_recipe *recp_list)
4863 struct ice_fltr_list_entry *list_itr, *tmp;
4864 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4867 return ICE_ERR_PARAM;
4869 rule_lock = &recp_list->filt_rule_lock;
4870 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4872 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4873 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4876 if (l_type != ICE_SW_LKUP_MAC)
4877 return ICE_ERR_PARAM;
4879 vsi_handle = list_itr->fltr_info.vsi_handle;
4880 if (!ice_is_vsi_valid(hw, vsi_handle))
4881 return ICE_ERR_PARAM;
4883 list_itr->fltr_info.fwd_id.hw_vsi_id =
4884 ice_get_hw_vsi_num(hw, vsi_handle);
4885 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4886 /* Don't remove the unicast address that belongs to
4887 * another VSI on the switch, since it is not being
4890 ice_acquire_lock(rule_lock);
4891 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4892 &list_itr->fltr_info)) {
4893 ice_release_lock(rule_lock);
4894 return ICE_ERR_DOES_NOT_EXIST;
4896 ice_release_lock(rule_lock);
4898 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4900 if (list_itr->status)
4901 return list_itr->status;
4907 * ice_remove_mac - remove a MAC address based filter rule
4908 * @hw: pointer to the hardware structure
4909 * @m_list: list of MAC addresses and forwarding information
4912 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4914 struct ice_sw_recipe *recp_list;
4916 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4917 return ice_remove_mac_rule(hw, m_list, recp_list);
4921 * ice_remove_vlan_rule - Remove VLAN based filter rule
4922 * @hw: pointer to the hardware structure
4923 * @v_list: list of VLAN entries and forwarding information
4924 * @recp_list: list from which function remove VLAN
4926 static enum ice_status
4927 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4928 struct ice_sw_recipe *recp_list)
4930 struct ice_fltr_list_entry *v_list_itr, *tmp;
4932 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4934 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4936 if (l_type != ICE_SW_LKUP_VLAN)
4937 return ICE_ERR_PARAM;
4938 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4940 if (v_list_itr->status)
4941 return v_list_itr->status;
4947 * ice_remove_vlan - remove a VLAN address based filter rule
4948 * @hw: pointer to the hardware structure
4949 * @v_list: list of VLAN and forwarding information
4953 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4955 struct ice_sw_recipe *recp_list;
4958 return ICE_ERR_PARAM;
4960 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4961 return ice_remove_vlan_rule(hw, v_list, recp_list);
4965 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4966 * @hw: pointer to the hardware structure
4967 * @v_list: list of MAC VLAN entries and forwarding information
4968 * @recp_list: list from which function remove MAC VLAN
4970 static enum ice_status
4971 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4972 struct ice_sw_recipe *recp_list)
4974 struct ice_fltr_list_entry *v_list_itr, *tmp;
4976 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4977 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4979 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4981 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4982 return ICE_ERR_PARAM;
4983 v_list_itr->status =
4984 ice_remove_rule_internal(hw, recp_list,
4986 if (v_list_itr->status)
4987 return v_list_itr->status;
4993 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4994 * @hw: pointer to the hardware structure
4995 * @mv_list: list of MAC VLAN and forwarding information
4998 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5000 struct ice_sw_recipe *recp_list;
5002 if (!mv_list || !hw)
5003 return ICE_ERR_PARAM;
5005 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5006 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5010 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5011 * @fm_entry: filter entry to inspect
5012 * @vsi_handle: VSI handle to compare with filter info
5015 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5017 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5018 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5019 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5020 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5025 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5026 * @hw: pointer to the hardware structure
5027 * @vsi_handle: VSI handle to remove filters from
5028 * @vsi_list_head: pointer to the list to add entry to
5029 * @fi: pointer to fltr_info of filter entry to copy & add
5031 * Helper function, used when creating a list of filters to remove from
5032 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5033 * original filter entry, with the exception of fltr_info.fltr_act and
5034 * fltr_info.fwd_id fields. These are set such that later logic can
5035 * extract which VSI to remove the fltr from, and pass on that information.
5037 static enum ice_status
5038 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5039 struct LIST_HEAD_TYPE *vsi_list_head,
5040 struct ice_fltr_info *fi)
5042 struct ice_fltr_list_entry *tmp;
5044 /* this memory is freed up in the caller function
5045 * once filters for this VSI are removed
5047 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5049 return ICE_ERR_NO_MEMORY;
5051 tmp->fltr_info = *fi;
5053 /* Overwrite these fields to indicate which VSI to remove filter from,
5054 * so find and remove logic can extract the information from the
5055 * list entries. Note that original entries will still have proper
5058 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5059 tmp->fltr_info.vsi_handle = vsi_handle;
5060 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5062 LIST_ADD(&tmp->list_entry, vsi_list_head);
5068 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5069 * @hw: pointer to the hardware structure
5070 * @vsi_handle: VSI handle to remove filters from
5071 * @lkup_list_head: pointer to the list that has certain lookup type filters
5072 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5074 * Locates all filters in lkup_list_head that are used by the given VSI,
5075 * and adds COPIES of those entries to vsi_list_head (intended to be used
5076 * to remove the listed filters).
5077 * Note that this means all entries in vsi_list_head must be explicitly
5078 * deallocated by the caller when done with list.
5080 static enum ice_status
5081 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5082 struct LIST_HEAD_TYPE *lkup_list_head,
5083 struct LIST_HEAD_TYPE *vsi_list_head)
5085 struct ice_fltr_mgmt_list_entry *fm_entry;
5086 enum ice_status status = ICE_SUCCESS;
5088 /* check to make sure VSI ID is valid and within boundary */
5089 if (!ice_is_vsi_valid(hw, vsi_handle))
5090 return ICE_ERR_PARAM;
5092 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5093 ice_fltr_mgmt_list_entry, list_entry) {
5094 struct ice_fltr_info *fi;
5096 fi = &fm_entry->fltr_info;
5097 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
5100 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5109 * ice_determine_promisc_mask
5110 * @fi: filter info to parse
5112 * Helper function to determine which ICE_PROMISC_ mask corresponds
5113 * to given filter into.
5115 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5117 u16 vid = fi->l_data.mac_vlan.vlan_id;
5118 u8 *macaddr = fi->l_data.mac.mac_addr;
5119 bool is_tx_fltr = false;
5120 u8 promisc_mask = 0;
5122 if (fi->flag == ICE_FLTR_TX)
5125 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5126 promisc_mask |= is_tx_fltr ?
5127 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5128 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5129 promisc_mask |= is_tx_fltr ?
5130 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5131 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5132 promisc_mask |= is_tx_fltr ?
5133 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5135 promisc_mask |= is_tx_fltr ?
5136 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5138 return promisc_mask;
5142 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5143 * @hw: pointer to the hardware structure
5144 * @vsi_handle: VSI handle to retrieve info from
5145 * @promisc_mask: pointer to mask to be filled in
5146 * @vid: VLAN ID of promisc VLAN VSI
5147 * @sw: pointer to switch info struct for which function add rule
5149 static enum ice_status
5150 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5151 u16 *vid, struct ice_switch_info *sw)
5153 struct ice_fltr_mgmt_list_entry *itr;
5154 struct LIST_HEAD_TYPE *rule_head;
5155 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5157 if (!ice_is_vsi_valid(hw, vsi_handle))
5158 return ICE_ERR_PARAM;
5162 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5163 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5165 ice_acquire_lock(rule_lock);
5166 LIST_FOR_EACH_ENTRY(itr, rule_head,
5167 ice_fltr_mgmt_list_entry, list_entry) {
5168 /* Continue if this filter doesn't apply to this VSI or the
5169 * VSI ID is not in the VSI map for this filter
5171 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5174 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5176 ice_release_lock(rule_lock);
5182 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5183 * @hw: pointer to the hardware structure
5184 * @vsi_handle: VSI handle to retrieve info from
5185 * @promisc_mask: pointer to mask to be filled in
5186 * @vid: VLAN ID of promisc VLAN VSI
5189 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5192 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5193 vid, hw->switch_info);
5197 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5198 * @hw: pointer to the hardware structure
5199 * @vsi_handle: VSI handle to retrieve info from
5200 * @promisc_mask: pointer to mask to be filled in
5201 * @vid: VLAN ID of promisc VLAN VSI
5202 * @sw: pointer to switch info struct for which function add rule
5204 static enum ice_status
5205 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5206 u16 *vid, struct ice_switch_info *sw)
5208 struct ice_fltr_mgmt_list_entry *itr;
5209 struct LIST_HEAD_TYPE *rule_head;
5210 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5212 if (!ice_is_vsi_valid(hw, vsi_handle))
5213 return ICE_ERR_PARAM;
5217 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5218 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5220 ice_acquire_lock(rule_lock);
5221 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5223 /* Continue if this filter doesn't apply to this VSI or the
5224 * VSI ID is not in the VSI map for this filter
5226 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5229 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5231 ice_release_lock(rule_lock);
5237 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5238 * @hw: pointer to the hardware structure
5239 * @vsi_handle: VSI handle to retrieve info from
5240 * @promisc_mask: pointer to mask to be filled in
5241 * @vid: VLAN ID of promisc VLAN VSI
5244 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5247 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5248 vid, hw->switch_info);
5252 * ice_remove_promisc - Remove promisc based filter rules
5253 * @hw: pointer to the hardware structure
5254 * @recp_id: recipe ID for which the rule needs to removed
5255 * @v_list: list of promisc entries
5257 static enum ice_status
5258 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5259 struct LIST_HEAD_TYPE *v_list)
5261 struct ice_fltr_list_entry *v_list_itr, *tmp;
5262 struct ice_sw_recipe *recp_list;
5264 recp_list = &hw->switch_info->recp_list[recp_id];
5265 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5267 v_list_itr->status =
5268 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5269 if (v_list_itr->status)
5270 return v_list_itr->status;
5276 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5277 * @hw: pointer to the hardware structure
5278 * @vsi_handle: VSI handle to clear mode
5279 * @promisc_mask: mask of promiscuous config bits to clear
5280 * @vid: VLAN ID to clear VLAN promiscuous
5281 * @sw: pointer to switch info struct for which function add rule
5283 static enum ice_status
5284 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5285 u16 vid, struct ice_switch_info *sw)
5287 struct ice_fltr_list_entry *fm_entry, *tmp;
5288 struct LIST_HEAD_TYPE remove_list_head;
5289 struct ice_fltr_mgmt_list_entry *itr;
5290 struct LIST_HEAD_TYPE *rule_head;
5291 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5292 enum ice_status status = ICE_SUCCESS;
5295 if (!ice_is_vsi_valid(hw, vsi_handle))
5296 return ICE_ERR_PARAM;
5298 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5299 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5301 recipe_id = ICE_SW_LKUP_PROMISC;
5303 rule_head = &sw->recp_list[recipe_id].filt_rules;
5304 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5306 INIT_LIST_HEAD(&remove_list_head);
5308 ice_acquire_lock(rule_lock);
5309 LIST_FOR_EACH_ENTRY(itr, rule_head,
5310 ice_fltr_mgmt_list_entry, list_entry) {
5311 struct ice_fltr_info *fltr_info;
5312 u8 fltr_promisc_mask = 0;
5314 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5316 fltr_info = &itr->fltr_info;
5318 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5319 vid != fltr_info->l_data.mac_vlan.vlan_id)
5322 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5324 /* Skip if filter is not completely specified by given mask */
5325 if (fltr_promisc_mask & ~promisc_mask)
5328 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5332 ice_release_lock(rule_lock);
5333 goto free_fltr_list;
5336 ice_release_lock(rule_lock);
5338 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5341 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5342 ice_fltr_list_entry, list_entry) {
5343 LIST_DEL(&fm_entry->list_entry);
5344 ice_free(hw, fm_entry);
5351 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5352 * @hw: pointer to the hardware structure
5353 * @vsi_handle: VSI handle to clear mode
5354 * @promisc_mask: mask of promiscuous config bits to clear
5355 * @vid: VLAN ID to clear VLAN promiscuous
5358 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5359 u8 promisc_mask, u16 vid)
5361 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5362 vid, hw->switch_info);
5366 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5367 * @hw: pointer to the hardware structure
5368 * @vsi_handle: VSI handle to configure
5369 * @promisc_mask: mask of promiscuous config bits
5370 * @vid: VLAN ID to set VLAN promiscuous
5371 * @lport: logical port number to configure promisc mode
5372 * @sw: pointer to switch info struct for which function add rule
5374 static enum ice_status
5375 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5376 u16 vid, u8 lport, struct ice_switch_info *sw)
5378 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5379 struct ice_fltr_list_entry f_list_entry;
5380 struct ice_fltr_info new_fltr;
5381 enum ice_status status = ICE_SUCCESS;
5387 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5389 if (!ice_is_vsi_valid(hw, vsi_handle))
5390 return ICE_ERR_PARAM;
5391 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5393 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5395 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5396 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5397 new_fltr.l_data.mac_vlan.vlan_id = vid;
5398 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5400 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5401 recipe_id = ICE_SW_LKUP_PROMISC;
5404 /* Separate filters must be set for each direction/packet type
5405 * combination, so we will loop over the mask value, store the
5406 * individual type, and clear it out in the input mask as it
5409 while (promisc_mask) {
5410 struct ice_sw_recipe *recp_list;
5416 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5417 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5418 pkt_type = UCAST_FLTR;
5419 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5420 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5421 pkt_type = UCAST_FLTR;
5423 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5424 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5425 pkt_type = MCAST_FLTR;
5426 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5427 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5428 pkt_type = MCAST_FLTR;
5430 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5431 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5432 pkt_type = BCAST_FLTR;
5433 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5434 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5435 pkt_type = BCAST_FLTR;
5439 /* Check for VLAN promiscuous flag */
5440 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5441 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5442 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5443 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5447 /* Set filter DA based on packet type */
5448 mac_addr = new_fltr.l_data.mac.mac_addr;
5449 if (pkt_type == BCAST_FLTR) {
5450 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5451 } else if (pkt_type == MCAST_FLTR ||
5452 pkt_type == UCAST_FLTR) {
5453 /* Use the dummy ether header DA */
5454 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5455 ICE_NONDMA_TO_NONDMA);
5456 if (pkt_type == MCAST_FLTR)
5457 mac_addr[0] |= 0x1; /* Set multicast bit */
5460 /* Need to reset this to zero for all iterations */
5463 new_fltr.flag |= ICE_FLTR_TX;
5464 new_fltr.src = hw_vsi_id;
5466 new_fltr.flag |= ICE_FLTR_RX;
5467 new_fltr.src = lport;
5470 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5471 new_fltr.vsi_handle = vsi_handle;
5472 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5473 f_list_entry.fltr_info = new_fltr;
5474 recp_list = &sw->recp_list[recipe_id];
5476 status = ice_add_rule_internal(hw, recp_list, lport,
5478 if (status != ICE_SUCCESS)
5479 goto set_promisc_exit;
5487 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5488 * @hw: pointer to the hardware structure
5489 * @vsi_handle: VSI handle to configure
5490 * @promisc_mask: mask of promiscuous config bits
5491 * @vid: VLAN ID to set VLAN promiscuous
5494 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5497 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5498 hw->port_info->lport,
5503 * _ice_set_vlan_vsi_promisc
5504 * @hw: pointer to the hardware structure
5505 * @vsi_handle: VSI handle to configure
5506 * @promisc_mask: mask of promiscuous config bits
5507 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5508 * @lport: logical port number to configure promisc mode
5509 * @sw: pointer to switch info struct for which function add rule
5511 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5513 static enum ice_status
5514 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5515 bool rm_vlan_promisc, u8 lport,
5516 struct ice_switch_info *sw)
5518 struct ice_fltr_list_entry *list_itr, *tmp;
5519 struct LIST_HEAD_TYPE vsi_list_head;
5520 struct LIST_HEAD_TYPE *vlan_head;
5521 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5522 enum ice_status status;
5525 INIT_LIST_HEAD(&vsi_list_head);
5526 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5527 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5528 ice_acquire_lock(vlan_lock);
5529 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5531 ice_release_lock(vlan_lock);
5533 goto free_fltr_list;
5535 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5537 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5538 if (rm_vlan_promisc)
5539 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5543 status = _ice_set_vsi_promisc(hw, vsi_handle,
5544 promisc_mask, vlan_id,
5551 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5552 ice_fltr_list_entry, list_entry) {
5553 LIST_DEL(&list_itr->list_entry);
5554 ice_free(hw, list_itr);
5560 * ice_set_vlan_vsi_promisc
5561 * @hw: pointer to the hardware structure
5562 * @vsi_handle: VSI handle to configure
5563 * @promisc_mask: mask of promiscuous config bits
5564 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5566 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5569 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5570 bool rm_vlan_promisc)
5572 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5573 rm_vlan_promisc, hw->port_info->lport,
5578 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5579 * @hw: pointer to the hardware structure
5580 * @vsi_handle: VSI handle to remove filters from
5581 * @recp_list: recipe list from which function remove fltr
5582 * @lkup: switch rule filter lookup type
5585 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5586 struct ice_sw_recipe *recp_list,
5587 enum ice_sw_lkup_type lkup)
5589 struct ice_fltr_list_entry *fm_entry;
5590 struct LIST_HEAD_TYPE remove_list_head;
5591 struct LIST_HEAD_TYPE *rule_head;
5592 struct ice_fltr_list_entry *tmp;
5593 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5594 enum ice_status status;
5596 INIT_LIST_HEAD(&remove_list_head);
5597 rule_lock = &recp_list[lkup].filt_rule_lock;
5598 rule_head = &recp_list[lkup].filt_rules;
5599 ice_acquire_lock(rule_lock);
5600 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5602 ice_release_lock(rule_lock);
5607 case ICE_SW_LKUP_MAC:
5608 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5610 case ICE_SW_LKUP_VLAN:
5611 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5613 case ICE_SW_LKUP_PROMISC:
5614 case ICE_SW_LKUP_PROMISC_VLAN:
5615 ice_remove_promisc(hw, lkup, &remove_list_head);
5617 case ICE_SW_LKUP_MAC_VLAN:
5618 ice_remove_mac_vlan(hw, &remove_list_head);
5620 case ICE_SW_LKUP_ETHERTYPE:
5621 case ICE_SW_LKUP_ETHERTYPE_MAC:
5622 ice_remove_eth_mac(hw, &remove_list_head);
5624 case ICE_SW_LKUP_DFLT:
5625 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
5627 case ICE_SW_LKUP_LAST:
5628 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5632 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5633 ice_fltr_list_entry, list_entry) {
5634 LIST_DEL(&fm_entry->list_entry);
5635 ice_free(hw, fm_entry);
5640 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5641 * @hw: pointer to the hardware structure
5642 * @vsi_handle: VSI handle to remove filters from
5643 * @sw: pointer to switch info struct
5646 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5647 struct ice_switch_info *sw)
5649 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5651 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5652 sw->recp_list, ICE_SW_LKUP_MAC);
5653 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5654 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5655 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5656 sw->recp_list, ICE_SW_LKUP_PROMISC);
5657 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5658 sw->recp_list, ICE_SW_LKUP_VLAN);
5659 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5660 sw->recp_list, ICE_SW_LKUP_DFLT);
5661 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5662 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5663 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5664 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5665 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5666 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5670 * ice_remove_vsi_fltr - Remove all filters for a VSI
5671 * @hw: pointer to the hardware structure
5672 * @vsi_handle: VSI handle to remove filters from
5674 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5676 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5680 * ice_alloc_res_cntr - allocating resource counter
5681 * @hw: pointer to the hardware structure
5682 * @type: type of resource
5683 * @alloc_shared: if set it is shared else dedicated
5684 * @num_items: number of entries requested for FD resource type
5685 * @counter_id: counter index returned by AQ call
5688 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5691 struct ice_aqc_alloc_free_res_elem *buf;
5692 enum ice_status status;
5695 /* Allocate resource */
5696 buf_len = ice_struct_size(buf, elem, 1);
5697 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5699 return ICE_ERR_NO_MEMORY;
5701 buf->num_elems = CPU_TO_LE16(num_items);
5702 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5703 ICE_AQC_RES_TYPE_M) | alloc_shared);
5705 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5706 ice_aqc_opc_alloc_res, NULL);
5710 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5718 * ice_free_res_cntr - free resource counter
5719 * @hw: pointer to the hardware structure
5720 * @type: type of resource
5721 * @alloc_shared: if set it is shared else dedicated
5722 * @num_items: number of entries to be freed for FD resource type
5723 * @counter_id: counter ID resource which needs to be freed
5726 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5729 struct ice_aqc_alloc_free_res_elem *buf;
5730 enum ice_status status;
5734 buf_len = ice_struct_size(buf, elem, 1);
5735 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5737 return ICE_ERR_NO_MEMORY;
5739 buf->num_elems = CPU_TO_LE16(num_items);
5740 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5741 ICE_AQC_RES_TYPE_M) | alloc_shared);
5742 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5744 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5745 ice_aqc_opc_free_res, NULL);
5747 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
5754 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5755 * @hw: pointer to the hardware structure
5756 * @counter_id: returns counter index
5758 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5760 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5761 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5766 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5767 * @hw: pointer to the hardware structure
5768 * @counter_id: counter index to be freed
5770 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5772 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5773 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5778 * ice_alloc_res_lg_act - add large action resource
5779 * @hw: pointer to the hardware structure
5780 * @l_id: large action ID to fill it in
5781 * @num_acts: number of actions to hold with a large action entry
5783 static enum ice_status
5784 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5786 struct ice_aqc_alloc_free_res_elem *sw_buf;
5787 enum ice_status status;
5790 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5791 return ICE_ERR_PARAM;
5793 /* Allocate resource for large action */
5794 buf_len = ice_struct_size(sw_buf, elem, 1);
5795 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5797 return ICE_ERR_NO_MEMORY;
5799 sw_buf->num_elems = CPU_TO_LE16(1);
5801 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5802 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5803 * If num_acts is greater than 2, then use
5804 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5805 * The num_acts cannot exceed 4. This was ensured at the
5806 * beginning of the function.
5809 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5810 else if (num_acts == 2)
5811 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5813 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5815 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5816 ice_aqc_opc_alloc_res, NULL);
5818 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5820 ice_free(hw, sw_buf);
5825 * ice_add_mac_with_sw_marker - add filter with sw marker
5826 * @hw: pointer to the hardware structure
5827 * @f_info: filter info structure containing the MAC filter information
5828 * @sw_marker: sw marker to tag the Rx descriptor with
5831 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5834 struct ice_fltr_mgmt_list_entry *m_entry;
5835 struct ice_fltr_list_entry fl_info;
5836 struct ice_sw_recipe *recp_list;
5837 struct LIST_HEAD_TYPE l_head;
5838 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5839 enum ice_status ret;
5843 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5844 return ICE_ERR_PARAM;
5846 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5847 return ICE_ERR_PARAM;
5849 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5850 return ICE_ERR_PARAM;
5852 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5853 return ICE_ERR_PARAM;
5854 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5856 /* Add filter if it doesn't exist so then the adding of large
5857 * action always results in update
5860 INIT_LIST_HEAD(&l_head);
5861 fl_info.fltr_info = *f_info;
5862 LIST_ADD(&fl_info.list_entry, &l_head);
5864 entry_exists = false;
5865 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5866 hw->port_info->lport);
5867 if (ret == ICE_ERR_ALREADY_EXISTS)
5868 entry_exists = true;
5872 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5873 rule_lock = &recp_list->filt_rule_lock;
5874 ice_acquire_lock(rule_lock);
5875 /* Get the book keeping entry for the filter */
5876 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5880 /* If counter action was enabled for this rule then don't enable
5881 * sw marker large action
5883 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5884 ret = ICE_ERR_PARAM;
5888 /* if same marker was added before */
5889 if (m_entry->sw_marker_id == sw_marker) {
5890 ret = ICE_ERR_ALREADY_EXISTS;
5894 /* Allocate a hardware table entry to hold large act. Three actions
5895 * for marker based large action
5897 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5901 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5904 /* Update the switch rule to add the marker action */
5905 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5907 ice_release_lock(rule_lock);
5912 ice_release_lock(rule_lock);
5913 /* only remove entry if it did not exist previously */
5915 ret = ice_remove_mac(hw, &l_head);
5921 * ice_add_mac_with_counter - add filter with counter enabled
5922 * @hw: pointer to the hardware structure
5923 * @f_info: pointer to filter info structure containing the MAC filter
5927 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5929 struct ice_fltr_mgmt_list_entry *m_entry;
5930 struct ice_fltr_list_entry fl_info;
5931 struct ice_sw_recipe *recp_list;
5932 struct LIST_HEAD_TYPE l_head;
5933 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5934 enum ice_status ret;
5939 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5940 return ICE_ERR_PARAM;
5942 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5943 return ICE_ERR_PARAM;
5945 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5946 return ICE_ERR_PARAM;
5947 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5948 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5950 entry_exist = false;
5952 rule_lock = &recp_list->filt_rule_lock;
5954 /* Add filter if it doesn't exist so then the adding of large
5955 * action always results in update
5957 INIT_LIST_HEAD(&l_head);
5959 fl_info.fltr_info = *f_info;
5960 LIST_ADD(&fl_info.list_entry, &l_head);
5962 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5963 hw->port_info->lport);
5964 if (ret == ICE_ERR_ALREADY_EXISTS)
5969 ice_acquire_lock(rule_lock);
5970 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5972 ret = ICE_ERR_BAD_PTR;
5976 /* Don't enable counter for a filter for which sw marker was enabled */
5977 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5978 ret = ICE_ERR_PARAM;
5982 /* If a counter was already enabled then don't need to add again */
5983 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5984 ret = ICE_ERR_ALREADY_EXISTS;
5988 /* Allocate a hardware table entry to VLAN counter */
5989 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5993 /* Allocate a hardware table entry to hold large act. Two actions for
5994 * counter based large action
5996 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6000 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6003 /* Update the switch rule to add the counter action */
6004 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6006 ice_release_lock(rule_lock);
6011 ice_release_lock(rule_lock);
6012 /* only remove entry if it did not exist previously */
6014 ret = ice_remove_mac(hw, &l_head);
6019 /* This is mapping table entry that maps every word within a given protocol
6020 * structure to the real byte offset as per the specification of that
6022 * for example dst address is 3 words in ethertype header and corresponding
6023 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6024 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6025 * matching entry describing its field. This needs to be updated if new
6026 * structure is added to that union.
6028 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6029 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6030 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6031 { ICE_ETYPE_OL, { 0 } },
6032 { ICE_VLAN_OFOS, { 0, 2 } },
6033 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6034 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6035 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6036 26, 28, 30, 32, 34, 36, 38 } },
6037 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6038 26, 28, 30, 32, 34, 36, 38 } },
6039 { ICE_TCP_IL, { 0, 2 } },
6040 { ICE_UDP_OF, { 0, 2 } },
6041 { ICE_UDP_ILOS, { 0, 2 } },
6042 { ICE_SCTP_IL, { 0, 2 } },
6043 { ICE_VXLAN, { 8, 10, 12, 14 } },
6044 { ICE_GENEVE, { 8, 10, 12, 14 } },
6045 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6046 { ICE_NVGRE, { 0, 2, 4, 6 } },
6047 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6048 { ICE_PPPOE, { 0, 2, 4, 6 } },
6049 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6050 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6051 { ICE_ESP, { 0, 2, 4, 6 } },
6052 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6053 { ICE_NAT_T, { 8, 10, 12, 14 } },
6054 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6055 { ICE_VLAN_EX, { 0, 2 } },
6058 /* The following table describes preferred grouping of recipes.
6059 * If a recipe that needs to be programmed is a superset or matches one of the
6060 * following combinations, then the recipe needs to be chained as per the
6064 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6065 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6066 { ICE_MAC_IL, ICE_MAC_IL_HW },
6067 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6068 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6069 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6070 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6071 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6072 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6073 { ICE_TCP_IL, ICE_TCP_IL_HW },
6074 { ICE_UDP_OF, ICE_UDP_OF_HW },
6075 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6076 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6077 { ICE_VXLAN, ICE_UDP_OF_HW },
6078 { ICE_GENEVE, ICE_UDP_OF_HW },
6079 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6080 { ICE_NVGRE, ICE_GRE_OF_HW },
6081 { ICE_GTP, ICE_UDP_OF_HW },
6082 { ICE_PPPOE, ICE_PPPOE_HW },
6083 { ICE_PFCP, ICE_UDP_ILOS_HW },
6084 { ICE_L2TPV3, ICE_L2TPV3_HW },
6085 { ICE_ESP, ICE_ESP_HW },
6086 { ICE_AH, ICE_AH_HW },
6087 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6088 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6089 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6093 * ice_find_recp - find a recipe
6094 * @hw: pointer to the hardware structure
6095 * @lkup_exts: extension sequence to match
6097 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6099 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6100 enum ice_sw_tunnel_type tun_type)
6102 bool refresh_required = true;
6103 struct ice_sw_recipe *recp;
6106 /* Walk through existing recipes to find a match */
6107 recp = hw->switch_info->recp_list;
6108 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6109 /* If recipe was not created for this ID, in SW bookkeeping,
6110 * check if FW has an entry for this recipe. If the FW has an
6111 * entry update it in our SW bookkeeping and continue with the
6114 if (!recp[i].recp_created)
6115 if (ice_get_recp_frm_fw(hw,
6116 hw->switch_info->recp_list, i,
6120 /* Skip inverse action recipes */
6121 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6122 ICE_AQ_RECIPE_ACT_INV_ACT)
6125 /* if number of words we are looking for match */
6126 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6127 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6128 struct ice_fv_word *be = lkup_exts->fv_words;
6129 u16 *cr = recp[i].lkup_exts.field_mask;
6130 u16 *de = lkup_exts->field_mask;
6134 /* ar, cr, and qr are related to the recipe words, while
6135 * be, de, and pe are related to the lookup words
6137 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6138 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6140 if (ar[qr].off == be[pe].off &&
6141 ar[qr].prot_id == be[pe].prot_id &&
6143 /* Found the "pe"th word in the
6148 /* After walking through all the words in the
6149 * "i"th recipe if "p"th word was not found then
6150 * this recipe is not what we are looking for.
6151 * So break out from this loop and try the next
6154 if (qr >= recp[i].lkup_exts.n_val_words) {
6159 /* If for "i"th recipe the found was never set to false
6160 * then it means we found our match
6162 if (tun_type == recp[i].tun_type && found)
6163 return i; /* Return the recipe ID */
6166 return ICE_MAX_NUM_RECIPES;
6170 * ice_prot_type_to_id - get protocol ID from protocol type
6171 * @type: protocol type
6172 * @id: pointer to variable that will receive the ID
6174 * Returns true if found, false otherwise
6176 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6180 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6181 if (ice_prot_id_tbl[i].type == type) {
6182 *id = ice_prot_id_tbl[i].protocol_id;
6189 * ice_find_valid_words - count valid words
6190 * @rule: advanced rule with lookup information
6191 * @lkup_exts: byte offset extractions of the words that are valid
6193 * calculate valid words in a lookup rule using mask value
6196 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6197 struct ice_prot_lkup_ext *lkup_exts)
6199 u8 j, word, prot_id, ret_val;
6201 if (!ice_prot_type_to_id(rule->type, &prot_id))
6204 word = lkup_exts->n_val_words;
6206 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6207 if (((u16 *)&rule->m_u)[j] &&
6208 rule->type < ARRAY_SIZE(ice_prot_ext)) {
6209 /* No more space to accommodate */
6210 if (word >= ICE_MAX_CHAIN_WORDS)
6212 lkup_exts->fv_words[word].off =
6213 ice_prot_ext[rule->type].offs[j];
6214 lkup_exts->fv_words[word].prot_id =
6215 ice_prot_id_tbl[rule->type].protocol_id;
6216 lkup_exts->field_mask[word] =
6217 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6221 ret_val = word - lkup_exts->n_val_words;
6222 lkup_exts->n_val_words = word;
6228 * ice_create_first_fit_recp_def - Create a recipe grouping
6229 * @hw: pointer to the hardware structure
6230 * @lkup_exts: an array of protocol header extractions
6231 * @rg_list: pointer to a list that stores new recipe groups
6232 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6234 * Using first fit algorithm, take all the words that are still not done
6235 * and start grouping them in 4-word groups. Each group makes up one
6238 static enum ice_status
6239 ice_create_first_fit_recp_def(struct ice_hw *hw,
6240 struct ice_prot_lkup_ext *lkup_exts,
6241 struct LIST_HEAD_TYPE *rg_list,
6244 struct ice_pref_recipe_group *grp = NULL;
6249 if (!lkup_exts->n_val_words) {
6250 struct ice_recp_grp_entry *entry;
6252 entry = (struct ice_recp_grp_entry *)
6253 ice_malloc(hw, sizeof(*entry));
6255 return ICE_ERR_NO_MEMORY;
6256 LIST_ADD(&entry->l_entry, rg_list);
6257 grp = &entry->r_group;
6259 grp->n_val_pairs = 0;
6262 /* Walk through every word in the rule to check if it is not done. If so
6263 * then this word needs to be part of a new recipe.
6265 for (j = 0; j < lkup_exts->n_val_words; j++)
6266 if (!ice_is_bit_set(lkup_exts->done, j)) {
6268 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6269 struct ice_recp_grp_entry *entry;
6271 entry = (struct ice_recp_grp_entry *)
6272 ice_malloc(hw, sizeof(*entry));
6274 return ICE_ERR_NO_MEMORY;
6275 LIST_ADD(&entry->l_entry, rg_list);
6276 grp = &entry->r_group;
6280 grp->pairs[grp->n_val_pairs].prot_id =
6281 lkup_exts->fv_words[j].prot_id;
6282 grp->pairs[grp->n_val_pairs].off =
6283 lkup_exts->fv_words[j].off;
6284 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6292 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6293 * @hw: pointer to the hardware structure
6294 * @fv_list: field vector with the extraction sequence information
6295 * @rg_list: recipe groupings with protocol-offset pairs
6297 * Helper function to fill in the field vector indices for protocol-offset
6298 * pairs. These indexes are then ultimately programmed into a recipe.
6300 static enum ice_status
6301 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6302 struct LIST_HEAD_TYPE *rg_list)
6304 struct ice_sw_fv_list_entry *fv;
6305 struct ice_recp_grp_entry *rg;
6306 struct ice_fv_word *fv_ext;
6308 if (LIST_EMPTY(fv_list))
6311 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6312 fv_ext = fv->fv_ptr->ew;
6314 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6317 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6318 struct ice_fv_word *pr;
6323 pr = &rg->r_group.pairs[i];
6324 mask = rg->r_group.mask[i];
6326 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6327 if (fv_ext[j].prot_id == pr->prot_id &&
6328 fv_ext[j].off == pr->off) {
6331 /* Store index of field vector */
6333 rg->fv_mask[i] = mask;
6337 /* Protocol/offset could not be found, caller gave an
6341 return ICE_ERR_PARAM;
6349 * ice_find_free_recp_res_idx - find free result indexes for recipe
6350 * @hw: pointer to hardware structure
6351 * @profiles: bitmap of profiles that will be associated with the new recipe
6352 * @free_idx: pointer to variable to receive the free index bitmap
6354 * The algorithm used here is:
6355 * 1. When creating a new recipe, create a set P which contains all
6356 * Profiles that will be associated with our new recipe
6358 * 2. For each Profile p in set P:
6359 * a. Add all recipes associated with Profile p into set R
6360 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6361 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6362 * i. Or just assume they all have the same possible indexes:
6364 * i.e., PossibleIndexes = 0x0000F00000000000
6366 * 3. For each Recipe r in set R:
6367 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6368 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6370 * FreeIndexes will contain the bits indicating the indexes free for use,
6371 * then the code needs to update the recipe[r].used_result_idx_bits to
6372 * indicate which indexes were selected for use by this recipe.
6375 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6376 ice_bitmap_t *free_idx)
6378 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6379 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6380 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6383 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6384 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6385 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6386 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6388 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6390 /* For each profile we are going to associate the recipe with, add the
6391 * recipes that are associated with that profile. This will give us
6392 * the set of recipes that our recipe may collide with. Also, determine
6393 * what possible result indexes are usable given this set of profiles.
6395 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6396 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6397 ICE_MAX_NUM_RECIPES);
6398 ice_and_bitmap(possible_idx, possible_idx,
6399 hw->switch_info->prof_res_bm[bit],
6403 /* For each recipe that our new recipe may collide with, determine
6404 * which indexes have been used.
6406 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6407 ice_or_bitmap(used_idx, used_idx,
6408 hw->switch_info->recp_list[bit].res_idxs,
6411 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6413 /* return number of free indexes */
6414 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6418 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6419 * @hw: pointer to hardware structure
6420 * @rm: recipe management list entry
6421 * @profiles: bitmap of profiles that will be associated.
6423 static enum ice_status
6424 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6425 ice_bitmap_t *profiles)
6427 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6428 struct ice_aqc_recipe_data_elem *tmp;
6429 struct ice_aqc_recipe_data_elem *buf;
6430 struct ice_recp_grp_entry *entry;
6431 enum ice_status status;
6437 /* When more than one recipe are required, another recipe is needed to
6438 * chain them together. Matching a tunnel metadata ID takes up one of
6439 * the match fields in the chaining recipe reducing the number of
6440 * chained recipes by one.
6442 /* check number of free result indices */
6443 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6444 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6446 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6447 free_res_idx, rm->n_grp_count);
6449 if (rm->n_grp_count > 1) {
6450 if (rm->n_grp_count > free_res_idx)
6451 return ICE_ERR_MAX_LIMIT;
6456 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6457 return ICE_ERR_MAX_LIMIT;
6459 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6460 ICE_MAX_NUM_RECIPES,
6463 return ICE_ERR_NO_MEMORY;
6465 buf = (struct ice_aqc_recipe_data_elem *)
6466 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6468 status = ICE_ERR_NO_MEMORY;
6472 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6473 recipe_count = ICE_MAX_NUM_RECIPES;
6474 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6476 if (status || recipe_count == 0)
6479 /* Allocate the recipe resources, and configure them according to the
6480 * match fields from protocol headers and extracted field vectors.
6482 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6483 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6486 status = ice_alloc_recipe(hw, &entry->rid);
6490 /* Clear the result index of the located recipe, as this will be
6491 * updated, if needed, later in the recipe creation process.
6493 tmp[0].content.result_indx = 0;
6495 buf[recps] = tmp[0];
6496 buf[recps].recipe_indx = (u8)entry->rid;
6497 /* if the recipe is a non-root recipe RID should be programmed
6498 * as 0 for the rules to be applied correctly.
6500 buf[recps].content.rid = 0;
6501 ice_memset(&buf[recps].content.lkup_indx, 0,
6502 sizeof(buf[recps].content.lkup_indx),
6505 /* All recipes use look-up index 0 to match switch ID. */
6506 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6507 buf[recps].content.mask[0] =
6508 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6509 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6512 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6513 buf[recps].content.lkup_indx[i] = 0x80;
6514 buf[recps].content.mask[i] = 0;
6517 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6518 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6519 buf[recps].content.mask[i + 1] =
6520 CPU_TO_LE16(entry->fv_mask[i]);
6523 if (rm->n_grp_count > 1) {
6524 /* Checks to see if there really is a valid result index
6527 if (chain_idx >= ICE_MAX_FV_WORDS) {
6528 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6529 status = ICE_ERR_MAX_LIMIT;
6533 entry->chain_idx = chain_idx;
6534 buf[recps].content.result_indx =
6535 ICE_AQ_RECIPE_RESULT_EN |
6536 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6537 ICE_AQ_RECIPE_RESULT_DATA_M);
6538 ice_clear_bit(chain_idx, result_idx_bm);
6539 chain_idx = ice_find_first_bit(result_idx_bm,
6543 /* fill recipe dependencies */
6544 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6545 ICE_MAX_NUM_RECIPES);
6546 ice_set_bit(buf[recps].recipe_indx,
6547 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6548 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6552 if (rm->n_grp_count == 1) {
6553 rm->root_rid = buf[0].recipe_indx;
6554 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6555 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6556 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6557 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6558 sizeof(buf[0].recipe_bitmap),
6559 ICE_NONDMA_TO_NONDMA);
6561 status = ICE_ERR_BAD_PTR;
6564 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6565 * the recipe which is getting created if specified
6566 * by user. Usually any advanced switch filter, which results
6567 * into new extraction sequence, ended up creating a new recipe
6568 * of type ROOT and usually recipes are associated with profiles
6569 * Switch rule referreing newly created recipe, needs to have
6570 * either/or 'fwd' or 'join' priority, otherwise switch rule
6571 * evaluation will not happen correctly. In other words, if
6572 * switch rule to be evaluated on priority basis, then recipe
6573 * needs to have priority, otherwise it will be evaluated last.
6575 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6577 struct ice_recp_grp_entry *last_chain_entry;
6580 /* Allocate the last recipe that will chain the outcomes of the
6581 * other recipes together
6583 status = ice_alloc_recipe(hw, &rid);
6587 buf[recps].recipe_indx = (u8)rid;
6588 buf[recps].content.rid = (u8)rid;
6589 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6590 /* the new entry created should also be part of rg_list to
6591 * make sure we have complete recipe
6593 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6594 sizeof(*last_chain_entry));
6595 if (!last_chain_entry) {
6596 status = ICE_ERR_NO_MEMORY;
6599 last_chain_entry->rid = rid;
6600 ice_memset(&buf[recps].content.lkup_indx, 0,
6601 sizeof(buf[recps].content.lkup_indx),
6603 /* All recipes use look-up index 0 to match switch ID. */
6604 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6605 buf[recps].content.mask[0] =
6606 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6607 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6608 buf[recps].content.lkup_indx[i] =
6609 ICE_AQ_RECIPE_LKUP_IGNORE;
6610 buf[recps].content.mask[i] = 0;
6614 /* update r_bitmap with the recp that is used for chaining */
6615 ice_set_bit(rid, rm->r_bitmap);
6616 /* this is the recipe that chains all the other recipes so it
6617 * should not have a chaining ID to indicate the same
6619 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6620 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6622 last_chain_entry->fv_idx[i] = entry->chain_idx;
6623 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6624 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6625 ice_set_bit(entry->rid, rm->r_bitmap);
6627 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6628 if (sizeof(buf[recps].recipe_bitmap) >=
6629 sizeof(rm->r_bitmap)) {
6630 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6631 sizeof(buf[recps].recipe_bitmap),
6632 ICE_NONDMA_TO_NONDMA);
6634 status = ICE_ERR_BAD_PTR;
6637 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6640 rm->root_rid = (u8)rid;
6642 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6646 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6647 ice_release_change_lock(hw);
6651 /* Every recipe that just got created add it to the recipe
6654 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6655 struct ice_switch_info *sw = hw->switch_info;
6656 bool is_root, idx_found = false;
6657 struct ice_sw_recipe *recp;
6658 u16 idx, buf_idx = 0;
6660 /* find buffer index for copying some data */
6661 for (idx = 0; idx < rm->n_grp_count; idx++)
6662 if (buf[idx].recipe_indx == entry->rid) {
6668 status = ICE_ERR_OUT_OF_RANGE;
6672 recp = &sw->recp_list[entry->rid];
6673 is_root = (rm->root_rid == entry->rid);
6674 recp->is_root = is_root;
6676 recp->root_rid = entry->rid;
6677 recp->big_recp = (is_root && rm->n_grp_count > 1);
6679 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6680 entry->r_group.n_val_pairs *
6681 sizeof(struct ice_fv_word),
6682 ICE_NONDMA_TO_NONDMA);
6684 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6685 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6687 /* Copy non-result fv index values and masks to recipe. This
6688 * call will also update the result recipe bitmask.
6690 ice_collect_result_idx(&buf[buf_idx], recp);
6692 /* for non-root recipes, also copy to the root, this allows
6693 * easier matching of a complete chained recipe
6696 ice_collect_result_idx(&buf[buf_idx],
6697 &sw->recp_list[rm->root_rid]);
6699 recp->n_ext_words = entry->r_group.n_val_pairs;
6700 recp->chain_idx = entry->chain_idx;
6701 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6702 recp->n_grp_count = rm->n_grp_count;
6703 recp->tun_type = rm->tun_type;
6704 recp->recp_created = true;
6718 * ice_create_recipe_group - creates recipe group
6719 * @hw: pointer to hardware structure
6720 * @rm: recipe management list entry
6721 * @lkup_exts: lookup elements
6723 static enum ice_status
6724 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6725 struct ice_prot_lkup_ext *lkup_exts)
6727 enum ice_status status;
6730 rm->n_grp_count = 0;
6732 /* Create recipes for words that are marked not done by packing them
6735 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6736 &rm->rg_list, &recp_count);
6738 rm->n_grp_count += recp_count;
6739 rm->n_ext_words = lkup_exts->n_val_words;
6740 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6741 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6742 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6743 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6750 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6751 * @hw: pointer to hardware structure
6752 * @lkups: lookup elements or match criteria for the advanced recipe, one
6753 * structure per protocol header
6754 * @lkups_cnt: number of protocols
6755 * @bm: bitmap of field vectors to consider
6756 * @fv_list: pointer to a list that holds the returned field vectors
6758 static enum ice_status
6759 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6760 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6762 enum ice_status status;
6769 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6771 return ICE_ERR_NO_MEMORY;
6773 for (i = 0; i < lkups_cnt; i++)
6774 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6775 status = ICE_ERR_CFG;
6779 /* Find field vectors that include all specified protocol types */
6780 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6783 ice_free(hw, prot_ids);
6788 * ice_tun_type_match_mask - determine if tun type needs a match mask
6789 * @tun_type: tunnel type
6790 * @mask: mask to be used for the tunnel
6792 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6795 case ICE_SW_TUN_VXLAN_GPE:
6796 case ICE_SW_TUN_GENEVE:
6797 case ICE_SW_TUN_VXLAN:
6798 case ICE_SW_TUN_NVGRE:
6799 case ICE_SW_TUN_UDP:
6800 case ICE_ALL_TUNNELS:
6801 case ICE_SW_TUN_AND_NON_TUN_QINQ:
6802 case ICE_NON_TUN_QINQ:
6803 case ICE_SW_TUN_PPPOE_QINQ:
6804 case ICE_SW_TUN_PPPOE_PAY_QINQ:
6805 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6806 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6807 *mask = ICE_TUN_FLAG_MASK;
6810 case ICE_SW_TUN_GENEVE_VLAN:
6811 case ICE_SW_TUN_VXLAN_VLAN:
6812 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6822 * ice_add_special_words - Add words that are not protocols, such as metadata
6823 * @rinfo: other information regarding the rule e.g. priority and action info
6824 * @lkup_exts: lookup word structure
6826 static enum ice_status
6827 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6828 struct ice_prot_lkup_ext *lkup_exts)
6832 /* If this is a tunneled packet, then add recipe index to match the
6833 * tunnel bit in the packet metadata flags.
6835 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6836 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6837 u8 word = lkup_exts->n_val_words++;
6839 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6840 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6841 lkup_exts->field_mask[word] = mask;
6843 return ICE_ERR_MAX_LIMIT;
6850 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6851 * @hw: pointer to hardware structure
6852 * @rinfo: other information regarding the rule e.g. priority and action info
6853 * @bm: pointer to memory for returning the bitmap of field vectors
6856 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6859 enum ice_prof_type prof_type;
6861 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6863 switch (rinfo->tun_type) {
6865 case ICE_NON_TUN_QINQ:
6866 prof_type = ICE_PROF_NON_TUN;
6868 case ICE_ALL_TUNNELS:
6869 prof_type = ICE_PROF_TUN_ALL;
6871 case ICE_SW_TUN_VXLAN_GPE:
6872 case ICE_SW_TUN_GENEVE:
6873 case ICE_SW_TUN_GENEVE_VLAN:
6874 case ICE_SW_TUN_VXLAN:
6875 case ICE_SW_TUN_VXLAN_VLAN:
6876 case ICE_SW_TUN_UDP:
6877 case ICE_SW_TUN_GTP:
6878 prof_type = ICE_PROF_TUN_UDP;
6880 case ICE_SW_TUN_NVGRE:
6881 prof_type = ICE_PROF_TUN_GRE;
6883 case ICE_SW_TUN_PPPOE:
6884 case ICE_SW_TUN_PPPOE_QINQ:
6885 prof_type = ICE_PROF_TUN_PPPOE;
6887 case ICE_SW_TUN_PPPOE_PAY:
6888 case ICE_SW_TUN_PPPOE_PAY_QINQ:
6889 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6891 case ICE_SW_TUN_PPPOE_IPV4:
6892 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6893 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6894 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6895 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6897 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6898 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6900 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6901 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6903 case ICE_SW_TUN_PPPOE_IPV6:
6904 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6905 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6906 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6907 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6909 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6910 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6912 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6913 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6915 case ICE_SW_TUN_PROFID_IPV6_ESP:
6916 case ICE_SW_TUN_IPV6_ESP:
6917 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6919 case ICE_SW_TUN_PROFID_IPV6_AH:
6920 case ICE_SW_TUN_IPV6_AH:
6921 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6923 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6924 case ICE_SW_TUN_IPV6_L2TPV3:
6925 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6927 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6928 case ICE_SW_TUN_IPV6_NAT_T:
6929 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6931 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6932 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6934 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6935 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6937 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6938 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6940 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6941 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6943 case ICE_SW_TUN_IPV4_NAT_T:
6944 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6946 case ICE_SW_TUN_IPV4_L2TPV3:
6947 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6949 case ICE_SW_TUN_IPV4_ESP:
6950 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6952 case ICE_SW_TUN_IPV4_AH:
6953 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6955 case ICE_SW_IPV4_TCP:
6956 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6958 case ICE_SW_IPV4_UDP:
6959 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6961 case ICE_SW_IPV6_TCP:
6962 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6964 case ICE_SW_IPV6_UDP:
6965 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6967 case ICE_SW_TUN_IPV4_GTPU_IPV4:
6968 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6969 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6970 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6971 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6972 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6973 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6975 case ICE_SW_TUN_IPV6_GTPU_IPV4:
6976 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6977 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6978 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6979 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6980 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6981 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6983 case ICE_SW_TUN_IPV4_GTPU_IPV6:
6984 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6985 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6986 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6987 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6988 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6989 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6991 case ICE_SW_TUN_IPV6_GTPU_IPV6:
6992 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6993 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6994 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6995 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6996 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6997 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6999 case ICE_SW_TUN_AND_NON_TUN:
7000 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7002 prof_type = ICE_PROF_ALL;
7006 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7010 * ice_is_prof_rule - determine if rule type is a profile rule
7011 * @type: the rule type
7013 * if the rule type is a profile rule, that means that there no field value
7014 * match required, in this case just a profile hit is required.
7016 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7019 case ICE_SW_TUN_PROFID_IPV6_ESP:
7020 case ICE_SW_TUN_PROFID_IPV6_AH:
7021 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7022 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7023 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7024 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7025 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7026 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7036 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7037 * @hw: pointer to hardware structure
7038 * @lkups: lookup elements or match criteria for the advanced recipe, one
7039 * structure per protocol header
7040 * @lkups_cnt: number of protocols
7041 * @rinfo: other information regarding the rule e.g. priority and action info
7042 * @rid: return the recipe ID of the recipe created
7044 static enum ice_status
7045 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7046 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7048 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7049 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7050 struct ice_prot_lkup_ext *lkup_exts;
7051 struct ice_recp_grp_entry *r_entry;
7052 struct ice_sw_fv_list_entry *fvit;
7053 struct ice_recp_grp_entry *r_tmp;
7054 struct ice_sw_fv_list_entry *tmp;
7055 enum ice_status status = ICE_SUCCESS;
7056 struct ice_sw_recipe *rm;
7059 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7060 return ICE_ERR_PARAM;
7062 lkup_exts = (struct ice_prot_lkup_ext *)
7063 ice_malloc(hw, sizeof(*lkup_exts));
7065 return ICE_ERR_NO_MEMORY;
7067 /* Determine the number of words to be matched and if it exceeds a
7068 * recipe's restrictions
7070 for (i = 0; i < lkups_cnt; i++) {
7073 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7074 status = ICE_ERR_CFG;
7075 goto err_free_lkup_exts;
7078 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7080 status = ICE_ERR_CFG;
7081 goto err_free_lkup_exts;
7085 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7087 status = ICE_ERR_NO_MEMORY;
7088 goto err_free_lkup_exts;
7091 /* Get field vectors that contain fields extracted from all the protocol
7092 * headers being programmed.
7094 INIT_LIST_HEAD(&rm->fv_list);
7095 INIT_LIST_HEAD(&rm->rg_list);
7097 /* Get bitmap of field vectors (profiles) that are compatible with the
7098 * rule request; only these will be searched in the subsequent call to
7101 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7103 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7107 /* Create any special protocol/offset pairs, such as looking at tunnel
7108 * bits by extracting metadata
7110 status = ice_add_special_words(rinfo, lkup_exts);
7112 goto err_free_lkup_exts;
7114 /* Group match words into recipes using preferred recipe grouping
7117 status = ice_create_recipe_group(hw, rm, lkup_exts);
7121 /* set the recipe priority if specified */
7122 rm->priority = (u8)rinfo->priority;
7124 /* Find offsets from the field vector. Pick the first one for all the
7127 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7131 /* An empty FV list means to use all the profiles returned in the
7134 if (LIST_EMPTY(&rm->fv_list)) {
7137 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7138 struct ice_sw_fv_list_entry *fvl;
7140 fvl = (struct ice_sw_fv_list_entry *)
7141 ice_malloc(hw, sizeof(*fvl));
7145 fvl->profile_id = j;
7146 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7150 /* get bitmap of all profiles the recipe will be associated with */
7151 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7152 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7154 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7155 ice_set_bit((u16)fvit->profile_id, profiles);
7158 /* Look for a recipe which matches our requested fv / mask list */
7159 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7160 if (*rid < ICE_MAX_NUM_RECIPES)
7161 /* Success if found a recipe that match the existing criteria */
7164 rm->tun_type = rinfo->tun_type;
7165 /* Recipe we need does not exist, add a recipe */
7166 status = ice_add_sw_recipe(hw, rm, profiles);
7170 /* Associate all the recipes created with all the profiles in the
7171 * common field vector.
7173 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7175 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7178 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7179 (u8 *)r_bitmap, NULL);
7183 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7184 ICE_MAX_NUM_RECIPES);
7185 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7189 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7192 ice_release_change_lock(hw);
7197 /* Update profile to recipe bitmap array */
7198 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7199 ICE_MAX_NUM_RECIPES);
7201 /* Update recipe to profile bitmap array */
7202 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7203 ice_set_bit((u16)fvit->profile_id,
7204 recipe_to_profile[j]);
7207 *rid = rm->root_rid;
7208 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7209 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7211 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7212 ice_recp_grp_entry, l_entry) {
7213 LIST_DEL(&r_entry->l_entry);
7214 ice_free(hw, r_entry);
7217 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7219 LIST_DEL(&fvit->list_entry);
7224 ice_free(hw, rm->root_buf);
7229 ice_free(hw, lkup_exts);
7235 * ice_find_dummy_packet - find dummy packet by tunnel type
7237 * @lkups: lookup elements or match criteria for the advanced recipe, one
7238 * structure per protocol header
7239 * @lkups_cnt: number of protocols
7240 * @tun_type: tunnel type from the match criteria
7241 * @pkt: dummy packet to fill according to filter match criteria
7242 * @pkt_len: packet length of dummy packet
7243 * @offsets: pointer to receive the pointer to the offsets for the packet
7246 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7247 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7249 const struct ice_dummy_pkt_offsets **offsets)
7251 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7255 for (i = 0; i < lkups_cnt; i++) {
7256 if (lkups[i].type == ICE_UDP_ILOS)
7258 else if (lkups[i].type == ICE_TCP_IL)
7260 else if (lkups[i].type == ICE_IPV6_OFOS)
7262 else if (lkups[i].type == ICE_VLAN_OFOS)
7264 else if (lkups[i].type == ICE_IPV4_OFOS &&
7265 lkups[i].h_u.ipv4_hdr.protocol ==
7266 ICE_IPV4_NVGRE_PROTO_ID &&
7267 lkups[i].m_u.ipv4_hdr.protocol ==
7270 else if (lkups[i].type == ICE_PPPOE &&
7271 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7272 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7273 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7276 else if (lkups[i].type == ICE_ETYPE_OL &&
7277 lkups[i].h_u.ethertype.ethtype_id ==
7278 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7279 lkups[i].m_u.ethertype.ethtype_id ==
7282 else if (lkups[i].type == ICE_IPV4_IL &&
7283 lkups[i].h_u.ipv4_hdr.protocol ==
7285 lkups[i].m_u.ipv4_hdr.protocol ==
7290 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7291 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7292 *pkt = dummy_qinq_ipv6_pkt;
7293 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7294 *offsets = dummy_qinq_ipv6_packet_offsets;
7296 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7297 tun_type == ICE_NON_TUN_QINQ) {
7298 *pkt = dummy_qinq_ipv4_pkt;
7299 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7300 *offsets = dummy_qinq_ipv4_packet_offsets;
7304 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7305 *pkt = dummy_qinq_pppoe_ipv6_packet;
7306 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7307 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7309 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7310 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7311 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7312 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7314 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7315 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7316 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7317 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7318 *offsets = dummy_qinq_pppoe_packet_offsets;
7322 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7323 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7324 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7325 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7327 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7328 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7329 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7330 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7332 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7333 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7334 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7335 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7337 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7338 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7339 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7340 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7342 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7343 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7344 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7345 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7347 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7348 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7349 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7350 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7354 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7355 *pkt = dummy_ipv4_esp_pkt;
7356 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7357 *offsets = dummy_ipv4_esp_packet_offsets;
7361 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7362 *pkt = dummy_ipv6_esp_pkt;
7363 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7364 *offsets = dummy_ipv6_esp_packet_offsets;
7368 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7369 *pkt = dummy_ipv4_ah_pkt;
7370 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7371 *offsets = dummy_ipv4_ah_packet_offsets;
7375 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7376 *pkt = dummy_ipv6_ah_pkt;
7377 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7378 *offsets = dummy_ipv6_ah_packet_offsets;
7382 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7383 *pkt = dummy_ipv4_nat_pkt;
7384 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7385 *offsets = dummy_ipv4_nat_packet_offsets;
7389 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7390 *pkt = dummy_ipv6_nat_pkt;
7391 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7392 *offsets = dummy_ipv6_nat_packet_offsets;
7396 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7397 *pkt = dummy_ipv4_l2tpv3_pkt;
7398 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7399 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7403 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7404 *pkt = dummy_ipv6_l2tpv3_pkt;
7405 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7406 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7410 if (tun_type == ICE_SW_TUN_GTP) {
7411 *pkt = dummy_udp_gtp_packet;
7412 *pkt_len = sizeof(dummy_udp_gtp_packet);
7413 *offsets = dummy_udp_gtp_packet_offsets;
7417 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7418 *pkt = dummy_pppoe_ipv6_packet;
7419 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7420 *offsets = dummy_pppoe_packet_offsets;
7422 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7423 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7424 *pkt = dummy_pppoe_ipv4_packet;
7425 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7426 *offsets = dummy_pppoe_packet_offsets;
7430 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7431 *pkt = dummy_pppoe_ipv4_packet;
7432 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7433 *offsets = dummy_pppoe_packet_ipv4_offsets;
7437 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7438 *pkt = dummy_pppoe_ipv4_tcp_packet;
7439 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7440 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7444 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7445 *pkt = dummy_pppoe_ipv4_udp_packet;
7446 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7447 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7451 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7452 *pkt = dummy_pppoe_ipv6_packet;
7453 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7454 *offsets = dummy_pppoe_packet_ipv6_offsets;
7458 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7459 *pkt = dummy_pppoe_ipv6_tcp_packet;
7460 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7461 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7465 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7466 *pkt = dummy_pppoe_ipv6_udp_packet;
7467 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7468 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7472 if (tun_type == ICE_SW_IPV4_TCP) {
7473 *pkt = dummy_tcp_packet;
7474 *pkt_len = sizeof(dummy_tcp_packet);
7475 *offsets = dummy_tcp_packet_offsets;
7479 if (tun_type == ICE_SW_IPV4_UDP) {
7480 *pkt = dummy_udp_packet;
7481 *pkt_len = sizeof(dummy_udp_packet);
7482 *offsets = dummy_udp_packet_offsets;
7486 if (tun_type == ICE_SW_IPV6_TCP) {
7487 *pkt = dummy_tcp_ipv6_packet;
7488 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7489 *offsets = dummy_tcp_ipv6_packet_offsets;
7493 if (tun_type == ICE_SW_IPV6_UDP) {
7494 *pkt = dummy_udp_ipv6_packet;
7495 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7496 *offsets = dummy_udp_ipv6_packet_offsets;
7500 if (tun_type == ICE_ALL_TUNNELS) {
7501 *pkt = dummy_gre_udp_packet;
7502 *pkt_len = sizeof(dummy_gre_udp_packet);
7503 *offsets = dummy_gre_udp_packet_offsets;
7507 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7509 *pkt = dummy_gre_tcp_packet;
7510 *pkt_len = sizeof(dummy_gre_tcp_packet);
7511 *offsets = dummy_gre_tcp_packet_offsets;
7515 *pkt = dummy_gre_udp_packet;
7516 *pkt_len = sizeof(dummy_gre_udp_packet);
7517 *offsets = dummy_gre_udp_packet_offsets;
7521 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7522 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7523 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7524 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7526 *pkt = dummy_udp_tun_tcp_packet;
7527 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7528 *offsets = dummy_udp_tun_tcp_packet_offsets;
7532 *pkt = dummy_udp_tun_udp_packet;
7533 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7534 *offsets = dummy_udp_tun_udp_packet_offsets;
7540 *pkt = dummy_vlan_udp_packet;
7541 *pkt_len = sizeof(dummy_vlan_udp_packet);
7542 *offsets = dummy_vlan_udp_packet_offsets;
7545 *pkt = dummy_udp_packet;
7546 *pkt_len = sizeof(dummy_udp_packet);
7547 *offsets = dummy_udp_packet_offsets;
7549 } else if (udp && ipv6) {
7551 *pkt = dummy_vlan_udp_ipv6_packet;
7552 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7553 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7556 *pkt = dummy_udp_ipv6_packet;
7557 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7558 *offsets = dummy_udp_ipv6_packet_offsets;
7560 } else if ((tcp && ipv6) || ipv6) {
7562 *pkt = dummy_vlan_tcp_ipv6_packet;
7563 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7564 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7567 *pkt = dummy_tcp_ipv6_packet;
7568 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7569 *offsets = dummy_tcp_ipv6_packet_offsets;
7574 *pkt = dummy_vlan_tcp_packet;
7575 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7576 *offsets = dummy_vlan_tcp_packet_offsets;
7578 *pkt = dummy_tcp_packet;
7579 *pkt_len = sizeof(dummy_tcp_packet);
7580 *offsets = dummy_tcp_packet_offsets;
7585 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7587 * @lkups: lookup elements or match criteria for the advanced recipe, one
7588 * structure per protocol header
7589 * @lkups_cnt: number of protocols
7590 * @s_rule: stores rule information from the match criteria
7591 * @dummy_pkt: dummy packet to fill according to filter match criteria
7592 * @pkt_len: packet length of dummy packet
7593 * @offsets: offset info for the dummy packet
7595 static enum ice_status
7596 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7597 struct ice_aqc_sw_rules_elem *s_rule,
7598 const u8 *dummy_pkt, u16 pkt_len,
7599 const struct ice_dummy_pkt_offsets *offsets)
7604 /* Start with a packet with a pre-defined/dummy content. Then, fill
7605 * in the header values to be looked up or matched.
7607 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7609 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7611 for (i = 0; i < lkups_cnt; i++) {
7612 enum ice_protocol_type type;
7613 u16 offset = 0, len = 0, j;
7616 /* find the start of this layer; it should be found since this
7617 * was already checked when search for the dummy packet
7619 type = lkups[i].type;
7620 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7621 if (type == offsets[j].type) {
7622 offset = offsets[j].offset;
7627 /* this should never happen in a correct calling sequence */
7629 return ICE_ERR_PARAM;
7631 switch (lkups[i].type) {
7634 len = sizeof(struct ice_ether_hdr);
7637 len = sizeof(struct ice_ethtype_hdr);
7641 len = sizeof(struct ice_vlan_hdr);
7645 len = sizeof(struct ice_ipv4_hdr);
7649 len = sizeof(struct ice_ipv6_hdr);
7654 len = sizeof(struct ice_l4_hdr);
7657 len = sizeof(struct ice_sctp_hdr);
7660 len = sizeof(struct ice_nvgre);
7665 len = sizeof(struct ice_udp_tnl_hdr);
7669 case ICE_GTP_NO_PAY:
7670 len = sizeof(struct ice_udp_gtp_hdr);
7673 len = sizeof(struct ice_pppoe_hdr);
7676 len = sizeof(struct ice_esp_hdr);
7679 len = sizeof(struct ice_nat_t_hdr);
7682 len = sizeof(struct ice_ah_hdr);
7685 len = sizeof(struct ice_l2tpv3_sess_hdr);
7688 return ICE_ERR_PARAM;
7691 /* the length should be a word multiple */
7692 if (len % ICE_BYTES_PER_WORD)
7695 /* We have the offset to the header start, the length, the
7696 * caller's header values and mask. Use this information to
7697 * copy the data into the dummy packet appropriately based on
7698 * the mask. Note that we need to only write the bits as
7699 * indicated by the mask to make sure we don't improperly write
7700 * over any significant packet data.
7702 for (j = 0; j < len / sizeof(u16); j++)
7703 if (((u16 *)&lkups[i].m_u)[j])
7704 ((u16 *)(pkt + offset))[j] =
7705 (((u16 *)(pkt + offset))[j] &
7706 ~((u16 *)&lkups[i].m_u)[j]) |
7707 (((u16 *)&lkups[i].h_u)[j] &
7708 ((u16 *)&lkups[i].m_u)[j]);
7711 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7717 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7718 * @hw: pointer to the hardware structure
7719 * @tun_type: tunnel type
7720 * @pkt: dummy packet to fill in
7721 * @offsets: offset info for the dummy packet
7723 static enum ice_status
7724 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7725 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7730 case ICE_SW_TUN_AND_NON_TUN:
7731 case ICE_SW_TUN_VXLAN_GPE:
7732 case ICE_SW_TUN_VXLAN:
7733 case ICE_SW_TUN_VXLAN_VLAN:
7734 case ICE_SW_TUN_UDP:
7735 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7739 case ICE_SW_TUN_GENEVE:
7740 case ICE_SW_TUN_GENEVE_VLAN:
7741 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7746 /* Nothing needs to be done for this tunnel type */
7750 /* Find the outer UDP protocol header and insert the port number */
7751 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7752 if (offsets[i].type == ICE_UDP_OF) {
7753 struct ice_l4_hdr *hdr;
7756 offset = offsets[i].offset;
7757 hdr = (struct ice_l4_hdr *)&pkt[offset];
7758 hdr->dst_port = CPU_TO_BE16(open_port);
7768 * ice_find_adv_rule_entry - Search a rule entry
7769 * @hw: pointer to the hardware structure
7770 * @lkups: lookup elements or match criteria for the advanced recipe, one
7771 * structure per protocol header
7772 * @lkups_cnt: number of protocols
7773 * @recp_id: recipe ID for which we are finding the rule
7774 * @rinfo: other information regarding the rule e.g. priority and action info
7776 * Helper function to search for a given advance rule entry
7777 * Returns pointer to entry storing the rule if found
7779 static struct ice_adv_fltr_mgmt_list_entry *
7780 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7781 u16 lkups_cnt, u16 recp_id,
7782 struct ice_adv_rule_info *rinfo)
7784 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7785 struct ice_switch_info *sw = hw->switch_info;
7788 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7789 ice_adv_fltr_mgmt_list_entry, list_entry) {
7790 bool lkups_matched = true;
7792 if (lkups_cnt != list_itr->lkups_cnt)
7794 for (i = 0; i < list_itr->lkups_cnt; i++)
7795 if (memcmp(&list_itr->lkups[i], &lkups[i],
7797 lkups_matched = false;
7800 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7801 rinfo->tun_type == list_itr->rule_info.tun_type &&
7809 * ice_adv_add_update_vsi_list
7810 * @hw: pointer to the hardware structure
7811 * @m_entry: pointer to current adv filter management list entry
7812 * @cur_fltr: filter information from the book keeping entry
7813 * @new_fltr: filter information with the new VSI to be added
7815 * Call AQ command to add or update previously created VSI list with new VSI.
7817 * Helper function to do book keeping associated with adding filter information
7818 * The algorithm to do the booking keeping is described below :
7819 * When a VSI needs to subscribe to a given advanced filter
7820 * if only one VSI has been added till now
7821 * Allocate a new VSI list and add two VSIs
7822 * to this list using switch rule command
7823 * Update the previously created switch rule with the
7824 * newly created VSI list ID
7825 * if a VSI list was previously created
7826 * Add the new VSI to the previously created VSI list set
7827 * using the update switch rule command
7829 static enum ice_status
7830 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7831 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7832 struct ice_adv_rule_info *cur_fltr,
7833 struct ice_adv_rule_info *new_fltr)
7835 enum ice_status status;
7836 u16 vsi_list_id = 0;
7838 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7839 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7840 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7841 return ICE_ERR_NOT_IMPL;
7843 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7844 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7845 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7846 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7847 return ICE_ERR_NOT_IMPL;
7849 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7850 /* Only one entry existed in the mapping and it was not already
7851 * a part of a VSI list. So, create a VSI list with the old and
7854 struct ice_fltr_info tmp_fltr;
7855 u16 vsi_handle_arr[2];
7857 /* A rule already exists with the new VSI being added */
7858 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7859 new_fltr->sw_act.fwd_id.hw_vsi_id)
7860 return ICE_ERR_ALREADY_EXISTS;
7862 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7863 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7864 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7870 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7871 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7872 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7873 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7874 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7875 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7877 /* Update the previous switch rule of "forward to VSI" to
7880 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7884 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7885 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7886 m_entry->vsi_list_info =
7887 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7890 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7892 if (!m_entry->vsi_list_info)
7895 /* A rule already exists with the new VSI being added */
7896 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7899 /* Update the previously created VSI list set with
7900 * the new VSI ID passed in
7902 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7904 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7906 ice_aqc_opc_update_sw_rules,
7908 /* update VSI list mapping info with new VSI ID */
7910 ice_set_bit(vsi_handle,
7911 m_entry->vsi_list_info->vsi_map);
7914 m_entry->vsi_count++;
7919 * ice_add_adv_rule - helper function to create an advanced switch rule
7920 * @hw: pointer to the hardware structure
7921 * @lkups: information on the words that needs to be looked up. All words
7922 * together makes one recipe
7923 * @lkups_cnt: num of entries in the lkups array
7924 * @rinfo: other information related to the rule that needs to be programmed
7925 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7926 * ignored is case of error.
7928 * This function can program only 1 rule at a time. The lkups is used to
7929 * describe the all the words that forms the "lookup" portion of the recipe.
7930 * These words can span multiple protocols. Callers to this function need to
7931 * pass in a list of protocol headers with lookup information along and mask
7932 * that determines which words are valid from the given protocol header.
7933 * rinfo describes other information related to this rule such as forwarding
7934 * IDs, priority of this rule, etc.
7937 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7938 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7939 struct ice_rule_query_data *added_entry)
7941 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7942 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7943 const struct ice_dummy_pkt_offsets *pkt_offsets;
7944 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7945 struct LIST_HEAD_TYPE *rule_head;
7946 struct ice_switch_info *sw;
7947 enum ice_status status;
7948 const u8 *pkt = NULL;
7954 /* Initialize profile to result index bitmap */
7955 if (!hw->switch_info->prof_res_bm_init) {
7956 hw->switch_info->prof_res_bm_init = 1;
7957 ice_init_prof_result_bm(hw);
7960 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7961 if (!prof_rule && !lkups_cnt)
7962 return ICE_ERR_PARAM;
7964 /* get # of words we need to match */
7966 for (i = 0; i < lkups_cnt; i++) {
7969 ptr = (u16 *)&lkups[i].m_u;
7970 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7976 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7977 return ICE_ERR_PARAM;
7979 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7980 return ICE_ERR_PARAM;
7983 /* make sure that we can locate a dummy packet */
7984 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7987 status = ICE_ERR_PARAM;
7988 goto err_ice_add_adv_rule;
7991 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7992 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7993 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7994 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7997 vsi_handle = rinfo->sw_act.vsi_handle;
7998 if (!ice_is_vsi_valid(hw, vsi_handle))
7999 return ICE_ERR_PARAM;
8001 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8002 rinfo->sw_act.fwd_id.hw_vsi_id =
8003 ice_get_hw_vsi_num(hw, vsi_handle);
8004 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8005 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8007 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8010 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8012 /* we have to add VSI to VSI_LIST and increment vsi_count.
8013 * Also Update VSI list so that we can change forwarding rule
8014 * if the rule already exists, we will check if it exists with
8015 * same vsi_id, if not then add it to the VSI list if it already
8016 * exists if not then create a VSI list and add the existing VSI
8017 * ID and the new VSI ID to the list
8018 * We will add that VSI to the list
8020 status = ice_adv_add_update_vsi_list(hw, m_entry,
8021 &m_entry->rule_info,
8024 added_entry->rid = rid;
8025 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8026 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8030 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8031 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8033 return ICE_ERR_NO_MEMORY;
8034 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8035 switch (rinfo->sw_act.fltr_act) {
8036 case ICE_FWD_TO_VSI:
8037 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8038 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8039 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8042 act |= ICE_SINGLE_ACT_TO_Q;
8043 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8044 ICE_SINGLE_ACT_Q_INDEX_M;
8046 case ICE_FWD_TO_QGRP:
8047 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8048 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8049 act |= ICE_SINGLE_ACT_TO_Q;
8050 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8051 ICE_SINGLE_ACT_Q_INDEX_M;
8052 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8053 ICE_SINGLE_ACT_Q_REGION_M;
8055 case ICE_DROP_PACKET:
8056 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8057 ICE_SINGLE_ACT_VALID_BIT;
8060 status = ICE_ERR_CFG;
8061 goto err_ice_add_adv_rule;
8064 /* set the rule LOOKUP type based on caller specified 'RX'
8065 * instead of hardcoding it to be either LOOKUP_TX/RX
8067 * for 'RX' set the source to be the port number
8068 * for 'TX' set the source to be the source HW VSI number (determined
8072 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8073 s_rule->pdata.lkup_tx_rx.src =
8074 CPU_TO_LE16(hw->port_info->lport);
8076 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8077 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8080 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8081 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8083 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8084 pkt_len, pkt_offsets);
8086 goto err_ice_add_adv_rule;
8088 if (rinfo->tun_type != ICE_NON_TUN &&
8089 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8090 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8091 s_rule->pdata.lkup_tx_rx.hdr,
8094 goto err_ice_add_adv_rule;
8097 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8098 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8101 goto err_ice_add_adv_rule;
8102 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8103 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8105 status = ICE_ERR_NO_MEMORY;
8106 goto err_ice_add_adv_rule;
8109 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8110 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8111 ICE_NONDMA_TO_NONDMA);
8112 if (!adv_fltr->lkups && !prof_rule) {
8113 status = ICE_ERR_NO_MEMORY;
8114 goto err_ice_add_adv_rule;
8117 adv_fltr->lkups_cnt = lkups_cnt;
8118 adv_fltr->rule_info = *rinfo;
8119 adv_fltr->rule_info.fltr_rule_id =
8120 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8121 sw = hw->switch_info;
8122 sw->recp_list[rid].adv_rule = true;
8123 rule_head = &sw->recp_list[rid].filt_rules;
8125 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8126 adv_fltr->vsi_count = 1;
8128 /* Add rule entry to book keeping list */
8129 LIST_ADD(&adv_fltr->list_entry, rule_head);
8131 added_entry->rid = rid;
8132 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8133 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8135 err_ice_add_adv_rule:
8136 if (status && adv_fltr) {
8137 ice_free(hw, adv_fltr->lkups);
8138 ice_free(hw, adv_fltr);
8141 ice_free(hw, s_rule);
8147 * ice_adv_rem_update_vsi_list
8148 * @hw: pointer to the hardware structure
8149 * @vsi_handle: VSI handle of the VSI to remove
8150 * @fm_list: filter management entry for which the VSI list management needs to
8153 static enum ice_status
8154 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8155 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8157 struct ice_vsi_list_map_info *vsi_list_info;
8158 enum ice_sw_lkup_type lkup_type;
8159 enum ice_status status;
8162 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8163 fm_list->vsi_count == 0)
8164 return ICE_ERR_PARAM;
8166 /* A rule with the VSI being removed does not exist */
8167 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8168 return ICE_ERR_DOES_NOT_EXIST;
8170 lkup_type = ICE_SW_LKUP_LAST;
8171 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8172 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8173 ice_aqc_opc_update_sw_rules,
8178 fm_list->vsi_count--;
8179 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8180 vsi_list_info = fm_list->vsi_list_info;
8181 if (fm_list->vsi_count == 1) {
8182 struct ice_fltr_info tmp_fltr;
8185 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8187 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8188 return ICE_ERR_OUT_OF_RANGE;
8190 /* Make sure VSI list is empty before removing it below */
8191 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8193 ice_aqc_opc_update_sw_rules,
8198 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8199 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8200 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8201 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8202 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8203 tmp_fltr.fwd_id.hw_vsi_id =
8204 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8205 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8206 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8207 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8209 /* Update the previous switch rule of "MAC forward to VSI" to
8210 * "MAC fwd to VSI list"
8212 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8214 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8215 tmp_fltr.fwd_id.hw_vsi_id, status);
8218 fm_list->vsi_list_info->ref_cnt--;
8220 /* Remove the VSI list since it is no longer used */
8221 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8223 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8224 vsi_list_id, status);
8228 LIST_DEL(&vsi_list_info->list_entry);
8229 ice_free(hw, vsi_list_info);
8230 fm_list->vsi_list_info = NULL;
8237 * ice_rem_adv_rule - removes existing advanced switch rule
8238 * @hw: pointer to the hardware structure
8239 * @lkups: information on the words that needs to be looked up. All words
8240 * together makes one recipe
8241 * @lkups_cnt: num of entries in the lkups array
8242 * @rinfo: Its the pointer to the rule information for the rule
8244 * This function can be used to remove 1 rule at a time. The lkups is
8245 * used to describe all the words that forms the "lookup" portion of the
8246 * rule. These words can span multiple protocols. Callers to this function
8247 * need to pass in a list of protocol headers with lookup information along
8248 * and mask that determines which words are valid from the given protocol
8249 * header. rinfo describes other information related to this rule such as
8250 * forwarding IDs, priority of this rule, etc.
8253 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8254 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8256 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8257 struct ice_prot_lkup_ext lkup_exts;
8258 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8259 enum ice_status status = ICE_SUCCESS;
8260 bool remove_rule = false;
8261 u16 i, rid, vsi_handle;
8263 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8264 for (i = 0; i < lkups_cnt; i++) {
8267 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8270 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8275 /* Create any special protocol/offset pairs, such as looking at tunnel
8276 * bits by extracting metadata
8278 status = ice_add_special_words(rinfo, &lkup_exts);
8282 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8283 /* If did not find a recipe that match the existing criteria */
8284 if (rid == ICE_MAX_NUM_RECIPES)
8285 return ICE_ERR_PARAM;
8287 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8288 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8289 /* the rule is already removed */
8292 ice_acquire_lock(rule_lock);
8293 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8295 } else if (list_elem->vsi_count > 1) {
8296 remove_rule = false;
8297 vsi_handle = rinfo->sw_act.vsi_handle;
8298 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8300 vsi_handle = rinfo->sw_act.vsi_handle;
8301 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8303 ice_release_lock(rule_lock);
8306 if (list_elem->vsi_count == 0)
8309 ice_release_lock(rule_lock);
8311 struct ice_aqc_sw_rules_elem *s_rule;
8314 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8315 s_rule = (struct ice_aqc_sw_rules_elem *)
8316 ice_malloc(hw, rule_buf_sz);
8318 return ICE_ERR_NO_MEMORY;
8319 s_rule->pdata.lkup_tx_rx.act = 0;
8320 s_rule->pdata.lkup_tx_rx.index =
8321 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8322 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8323 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8325 ice_aqc_opc_remove_sw_rules, NULL);
8326 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8327 struct ice_switch_info *sw = hw->switch_info;
8329 ice_acquire_lock(rule_lock);
8330 LIST_DEL(&list_elem->list_entry);
8331 ice_free(hw, list_elem->lkups);
8332 ice_free(hw, list_elem);
8333 ice_release_lock(rule_lock);
8334 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8335 sw->recp_list[rid].adv_rule = false;
8337 ice_free(hw, s_rule);
8343 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8344 * @hw: pointer to the hardware structure
8345 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8347 * This function is used to remove 1 rule at a time. The removal is based on
8348 * the remove_entry parameter. This function will remove rule for a given
8349 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8352 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8353 struct ice_rule_query_data *remove_entry)
8355 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8356 struct LIST_HEAD_TYPE *list_head;
8357 struct ice_adv_rule_info rinfo;
8358 struct ice_switch_info *sw;
8360 sw = hw->switch_info;
8361 if (!sw->recp_list[remove_entry->rid].recp_created)
8362 return ICE_ERR_PARAM;
8363 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8364 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8366 if (list_itr->rule_info.fltr_rule_id ==
8367 remove_entry->rule_id) {
8368 rinfo = list_itr->rule_info;
8369 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8370 return ice_rem_adv_rule(hw, list_itr->lkups,
8371 list_itr->lkups_cnt, &rinfo);
8374 /* either list is empty or unable to find rule */
8375 return ICE_ERR_DOES_NOT_EXIST;
8379 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8381 * @hw: pointer to the hardware structure
8382 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8384 * This function is used to remove all the rules for a given VSI and as soon
8385 * as removing a rule fails, it will return immediately with the error code,
8386 * else it will return ICE_SUCCESS
8388 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8390 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8391 struct ice_vsi_list_map_info *map_info;
8392 struct LIST_HEAD_TYPE *list_head;
8393 struct ice_adv_rule_info rinfo;
8394 struct ice_switch_info *sw;
8395 enum ice_status status;
8398 sw = hw->switch_info;
8399 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8400 if (!sw->recp_list[rid].recp_created)
8402 if (!sw->recp_list[rid].adv_rule)
8405 list_head = &sw->recp_list[rid].filt_rules;
8406 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8407 ice_adv_fltr_mgmt_list_entry,
8409 rinfo = list_itr->rule_info;
8411 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8412 map_info = list_itr->vsi_list_info;
8416 if (!ice_is_bit_set(map_info->vsi_map,
8419 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8423 rinfo.sw_act.vsi_handle = vsi_handle;
8424 status = ice_rem_adv_rule(hw, list_itr->lkups,
8425 list_itr->lkups_cnt, &rinfo);
8435 * ice_replay_fltr - Replay all the filters stored by a specific list head
8436 * @hw: pointer to the hardware structure
8437 * @list_head: list for which filters needs to be replayed
8438 * @recp_id: Recipe ID for which rules need to be replayed
8440 static enum ice_status
8441 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8443 struct ice_fltr_mgmt_list_entry *itr;
8444 enum ice_status status = ICE_SUCCESS;
8445 struct ice_sw_recipe *recp_list;
8446 u8 lport = hw->port_info->lport;
8447 struct LIST_HEAD_TYPE l_head;
8449 if (LIST_EMPTY(list_head))
8452 recp_list = &hw->switch_info->recp_list[recp_id];
8453 /* Move entries from the given list_head to a temporary l_head so that
8454 * they can be replayed. Otherwise when trying to re-add the same
8455 * filter, the function will return already exists
8457 LIST_REPLACE_INIT(list_head, &l_head);
8459 /* Mark the given list_head empty by reinitializing it so filters
8460 * could be added again by *handler
8462 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8464 struct ice_fltr_list_entry f_entry;
8467 f_entry.fltr_info = itr->fltr_info;
8468 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8469 status = ice_add_rule_internal(hw, recp_list, lport,
8471 if (status != ICE_SUCCESS)
8476 /* Add a filter per VSI separately */
8477 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8479 if (!ice_is_vsi_valid(hw, vsi_handle))
8482 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8483 f_entry.fltr_info.vsi_handle = vsi_handle;
8484 f_entry.fltr_info.fwd_id.hw_vsi_id =
8485 ice_get_hw_vsi_num(hw, vsi_handle);
8486 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8487 if (recp_id == ICE_SW_LKUP_VLAN)
8488 status = ice_add_vlan_internal(hw, recp_list,
8491 status = ice_add_rule_internal(hw, recp_list,
8494 if (status != ICE_SUCCESS)
8499 /* Clear the filter management list */
8500 ice_rem_sw_rule_info(hw, &l_head);
8505 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8506 * @hw: pointer to the hardware structure
8508 * NOTE: This function does not clean up partially added filters on error.
8509 * It is up to caller of the function to issue a reset or fail early.
8511 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8513 struct ice_switch_info *sw = hw->switch_info;
8514 enum ice_status status = ICE_SUCCESS;
8517 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8518 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8520 status = ice_replay_fltr(hw, i, head);
8521 if (status != ICE_SUCCESS)
8528 * ice_replay_vsi_fltr - Replay filters for requested VSI
8529 * @hw: pointer to the hardware structure
8530 * @pi: pointer to port information structure
8531 * @sw: pointer to switch info struct for which function replays filters
8532 * @vsi_handle: driver VSI handle
8533 * @recp_id: Recipe ID for which rules need to be replayed
8534 * @list_head: list for which filters need to be replayed
8536 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8537 * It is required to pass valid VSI handle.
8539 static enum ice_status
8540 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8541 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8542 struct LIST_HEAD_TYPE *list_head)
8544 struct ice_fltr_mgmt_list_entry *itr;
8545 enum ice_status status = ICE_SUCCESS;
8546 struct ice_sw_recipe *recp_list;
8549 if (LIST_EMPTY(list_head))
8551 recp_list = &sw->recp_list[recp_id];
8552 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8554 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8556 struct ice_fltr_list_entry f_entry;
8558 f_entry.fltr_info = itr->fltr_info;
8559 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8560 itr->fltr_info.vsi_handle == vsi_handle) {
8561 /* update the src in case it is VSI num */
8562 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8563 f_entry.fltr_info.src = hw_vsi_id;
8564 status = ice_add_rule_internal(hw, recp_list,
8567 if (status != ICE_SUCCESS)
8571 if (!itr->vsi_list_info ||
8572 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8574 /* Clearing it so that the logic can add it back */
8575 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8576 f_entry.fltr_info.vsi_handle = vsi_handle;
8577 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8578 /* update the src in case it is VSI num */
8579 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8580 f_entry.fltr_info.src = hw_vsi_id;
8581 if (recp_id == ICE_SW_LKUP_VLAN)
8582 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8584 status = ice_add_rule_internal(hw, recp_list,
8587 if (status != ICE_SUCCESS)
8595 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8596 * @hw: pointer to the hardware structure
8597 * @vsi_handle: driver VSI handle
8598 * @list_head: list for which filters need to be replayed
8600 * Replay the advanced rule for the given VSI.
8602 static enum ice_status
8603 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8604 struct LIST_HEAD_TYPE *list_head)
8606 struct ice_rule_query_data added_entry = { 0 };
8607 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8608 enum ice_status status = ICE_SUCCESS;
8610 if (LIST_EMPTY(list_head))
8612 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8614 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8615 u16 lk_cnt = adv_fltr->lkups_cnt;
8617 if (vsi_handle != rinfo->sw_act.vsi_handle)
8619 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8628 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8629 * @hw: pointer to the hardware structure
8630 * @pi: pointer to port information structure
8631 * @vsi_handle: driver VSI handle
8633 * Replays filters for requested VSI via vsi_handle.
8636 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8639 struct ice_switch_info *sw = hw->switch_info;
8640 enum ice_status status;
8643 /* Update the recipes that were created */
8644 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8645 struct LIST_HEAD_TYPE *head;
8647 head = &sw->recp_list[i].filt_replay_rules;
8648 if (!sw->recp_list[i].adv_rule)
8649 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8652 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8653 if (status != ICE_SUCCESS)
8661 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8662 * @hw: pointer to the HW struct
8663 * @sw: pointer to switch info struct for which function removes filters
8665 * Deletes the filter replay rules for given switch
8667 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8674 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8675 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8676 struct LIST_HEAD_TYPE *l_head;
8678 l_head = &sw->recp_list[i].filt_replay_rules;
8679 if (!sw->recp_list[i].adv_rule)
8680 ice_rem_sw_rule_info(hw, l_head);
8682 ice_rem_adv_rule_info(hw, l_head);
8688 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8689 * @hw: pointer to the HW struct
8691 * Deletes the filter replay rules.
8693 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8695 ice_rm_sw_replay_rule_info(hw, hw->switch_info);