1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
509 { ICE_IPV4_OFOS, 14 },
512 { ICE_PROTOCOL_LAST, 0 },
515 static const u8 dummy_udp_gtp_packet[] = {
516 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
521 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x11, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 0x00, 0x1c, 0x00, 0x00,
530 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x00, 0x85,
534 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 0x00, 0x00, 0x00, 0x00,
538 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
540 { ICE_ETYPE_OL, 12 },
541 { ICE_VLAN_OFOS, 14},
543 { ICE_PROTOCOL_LAST, 0 },
546 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
548 { ICE_ETYPE_OL, 12 },
549 { ICE_VLAN_OFOS, 14},
551 { ICE_IPV4_OFOS, 26 },
552 { ICE_PROTOCOL_LAST, 0 },
555 static const u8 dummy_pppoe_ipv4_packet[] = {
556 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x81, 0x00, /* ICE_ETYPE_OL 12 */
562 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
564 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
567 0x00, 0x21, /* PPP Link Layer 24 */
569 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
579 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
581 { ICE_ETYPE_OL, 12 },
582 { ICE_VLAN_OFOS, 14},
584 { ICE_IPV4_OFOS, 26 },
586 { ICE_PROTOCOL_LAST, 0 },
589 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
590 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
591 0x00, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
594 0x81, 0x00, /* ICE_ETYPE_OL 12 */
596 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
598 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
601 0x00, 0x21, /* PPP Link Layer 24 */
603 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
604 0x00, 0x01, 0x00, 0x00,
605 0x00, 0x06, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x00, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
610 0x00, 0x00, 0x00, 0x00,
611 0x00, 0x00, 0x00, 0x00,
612 0x50, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
619 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
621 { ICE_ETYPE_OL, 12 },
622 { ICE_VLAN_OFOS, 14},
624 { ICE_IPV4_OFOS, 26 },
625 { ICE_UDP_ILOS, 46 },
626 { ICE_PROTOCOL_LAST, 0 },
629 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
630 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
631 0x00, 0x00, 0x00, 0x00,
632 0x00, 0x00, 0x00, 0x00,
634 0x81, 0x00, /* ICE_ETYPE_OL 12 */
636 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
638 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
641 0x00, 0x21, /* PPP Link Layer 24 */
643 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
644 0x00, 0x01, 0x00, 0x00,
645 0x00, 0x11, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
650 0x00, 0x08, 0x00, 0x00,
652 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
655 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
657 { ICE_ETYPE_OL, 12 },
658 { ICE_VLAN_OFOS, 14},
660 { ICE_IPV6_OFOS, 26 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_pppoe_ipv6_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
669 0x81, 0x00, /* ICE_ETYPE_OL 12 */
671 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
673 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
676 0x00, 0x57, /* PPP Link Layer 24 */
678 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
679 0x00, 0x00, 0x3b, 0x00,
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
685 0x00, 0x00, 0x00, 0x00,
686 0x00, 0x00, 0x00, 0x00,
687 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
693 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
695 { ICE_ETYPE_OL, 12 },
696 { ICE_VLAN_OFOS, 14},
698 { ICE_IPV6_OFOS, 26 },
700 { ICE_PROTOCOL_LAST, 0 },
703 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
704 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
705 0x00, 0x00, 0x00, 0x00,
706 0x00, 0x00, 0x00, 0x00,
708 0x81, 0x00, /* ICE_ETYPE_OL 12 */
710 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
712 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
715 0x00, 0x57, /* PPP Link Layer 24 */
717 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
718 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
729 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x00, 0x00,
731 0x50, 0x00, 0x00, 0x00,
732 0x00, 0x00, 0x00, 0x00,
734 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
738 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
740 { ICE_ETYPE_OL, 12 },
741 { ICE_VLAN_OFOS, 14},
743 { ICE_IPV6_OFOS, 26 },
744 { ICE_UDP_ILOS, 66 },
745 { ICE_PROTOCOL_LAST, 0 },
748 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
749 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
753 0x81, 0x00, /* ICE_ETYPE_OL 12 */
755 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
757 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
760 0x00, 0x57, /* PPP Link Layer 24 */
762 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
763 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
774 0x00, 0x08, 0x00, 0x00,
776 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
779 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
781 { ICE_IPV4_OFOS, 14 },
783 { ICE_PROTOCOL_LAST, 0 },
786 static const u8 dummy_ipv4_esp_pkt[] = {
787 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
788 0x00, 0x00, 0x00, 0x00,
789 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
793 0x00, 0x00, 0x40, 0x00,
794 0x40, 0x32, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
803 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
805 { ICE_IPV6_OFOS, 14 },
807 { ICE_PROTOCOL_LAST, 0 },
810 static const u8 dummy_ipv6_esp_pkt[] = {
811 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
812 0x00, 0x00, 0x00, 0x00,
813 0x00, 0x00, 0x00, 0x00,
816 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
817 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
832 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
834 { ICE_IPV4_OFOS, 14 },
836 { ICE_PROTOCOL_LAST, 0 },
839 static const u8 dummy_ipv4_ah_pkt[] = {
840 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
841 0x00, 0x00, 0x00, 0x00,
842 0x00, 0x00, 0x00, 0x00,
845 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
846 0x00, 0x00, 0x40, 0x00,
847 0x40, 0x33, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
857 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
859 { ICE_IPV6_OFOS, 14 },
861 { ICE_PROTOCOL_LAST, 0 },
864 static const u8 dummy_ipv6_ah_pkt[] = {
865 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
866 0x00, 0x00, 0x00, 0x00,
867 0x00, 0x00, 0x00, 0x00,
870 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
871 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
887 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
889 { ICE_IPV4_OFOS, 14 },
890 { ICE_UDP_ILOS, 34 },
892 { ICE_PROTOCOL_LAST, 0 },
895 static const u8 dummy_ipv4_nat_pkt[] = {
896 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
897 0x00, 0x00, 0x00, 0x00,
898 0x00, 0x00, 0x00, 0x00,
901 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
902 0x00, 0x00, 0x40, 0x00,
903 0x40, 0x11, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
908 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
915 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
917 { ICE_IPV6_OFOS, 14 },
918 { ICE_UDP_ILOS, 54 },
920 { ICE_PROTOCOL_LAST, 0 },
923 static const u8 dummy_ipv6_nat_pkt[] = {
924 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
929 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
930 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x00, 0x00,
936 0x00, 0x00, 0x00, 0x00,
937 0x00, 0x00, 0x00, 0x00,
938 0x00, 0x00, 0x00, 0x00,
940 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
941 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, 0x00, 0x00,
945 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
949 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
951 { ICE_IPV4_OFOS, 14 },
953 { ICE_PROTOCOL_LAST, 0 },
956 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
957 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
958 0x00, 0x00, 0x00, 0x00,
959 0x00, 0x00, 0x00, 0x00,
962 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
963 0x00, 0x00, 0x40, 0x00,
964 0x40, 0x73, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
974 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
976 { ICE_IPV6_OFOS, 14 },
978 { ICE_PROTOCOL_LAST, 0 },
981 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
982 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
983 0x00, 0x00, 0x00, 0x00,
984 0x00, 0x00, 0x00, 0x00,
987 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
988 0x00, 0x0c, 0x73, 0x40,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
998 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1004 /* this is a recipe to profile association bitmap */
1005 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1006 ICE_MAX_NUM_PROFILES);
1008 /* this is a profile to recipe association bitmap */
1009 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1010 ICE_MAX_NUM_RECIPES);
1012 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1015 * ice_collect_result_idx - copy result index values
1016 * @buf: buffer that contains the result index
1017 * @recp: the recipe struct to copy data into
1019 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1020 struct ice_sw_recipe *recp)
1022 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1023 ice_set_bit(buf->content.result_indx &
1024 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1028 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1029 * @rid: recipe ID that we are populating
1031 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
1033 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1034 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1035 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1036 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1037 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1038 u16 i, j, profile_num = 0;
1039 bool non_tun_valid = false;
1040 bool pppoe_valid = false;
1041 bool vxlan_valid = false;
1042 bool gre_valid = false;
1043 bool gtp_valid = false;
1044 bool flag_valid = false;
1046 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1047 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1052 for (i = 0; i < 12; i++) {
1053 if (gre_profile[i] == j)
1057 for (i = 0; i < 12; i++) {
1058 if (vxlan_profile[i] == j)
1062 for (i = 0; i < 7; i++) {
1063 if (pppoe_profile[i] == j)
1067 for (i = 0; i < 6; i++) {
1068 if (non_tun_profile[i] == j)
1069 non_tun_valid = true;
1072 if (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1073 j <= ICE_PROFID_IPV6_GTPU_IPV6_OTHER)
1076 if (j >= ICE_PROFID_IPV4_ESP &&
1077 j <= ICE_PROFID_IPV6_PFCP_SESSION)
1081 if (!non_tun_valid && vxlan_valid)
1082 tun_type = ICE_SW_TUN_VXLAN;
1083 else if (!non_tun_valid && gre_valid)
1084 tun_type = ICE_SW_TUN_NVGRE;
1085 else if (!non_tun_valid && pppoe_valid)
1086 tun_type = ICE_SW_TUN_PPPOE;
1087 else if (!non_tun_valid && gtp_valid)
1088 tun_type = ICE_SW_TUN_GTP;
1089 else if ((non_tun_valid && vxlan_valid) ||
1090 (non_tun_valid && gre_valid) ||
1091 (non_tun_valid && gtp_valid) ||
1092 (non_tun_valid && pppoe_valid))
1093 tun_type = ICE_SW_TUN_AND_NON_TUN;
1094 else if ((non_tun_valid && !vxlan_valid) ||
1095 (non_tun_valid && !gre_valid) ||
1096 (non_tun_valid && !gtp_valid) ||
1097 (non_tun_valid && !pppoe_valid))
1098 tun_type = ICE_NON_TUN;
1100 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1101 i = ice_is_bit_set(recipe_to_profile[rid],
1102 ICE_PROFID_PPPOE_IPV4_OTHER);
1103 j = ice_is_bit_set(recipe_to_profile[rid],
1104 ICE_PROFID_PPPOE_IPV6_OTHER);
1106 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1108 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1111 if (profile_num == 1 && (flag_valid || non_tun_valid)) {
1112 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1113 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1115 case ICE_PROFID_IPV4_TCP:
1116 tun_type = ICE_SW_IPV4_TCP;
1118 case ICE_PROFID_IPV4_UDP:
1119 tun_type = ICE_SW_IPV4_UDP;
1121 case ICE_PROFID_IPV6_TCP:
1122 tun_type = ICE_SW_IPV6_TCP;
1124 case ICE_PROFID_IPV6_UDP:
1125 tun_type = ICE_SW_IPV6_UDP;
1127 case ICE_PROFID_PPPOE_PAY:
1128 tun_type = ICE_SW_TUN_PPPOE_PAY;
1130 case ICE_PROFID_PPPOE_IPV4_TCP:
1131 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1133 case ICE_PROFID_PPPOE_IPV4_UDP:
1134 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1136 case ICE_PROFID_PPPOE_IPV4_OTHER:
1137 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1139 case ICE_PROFID_PPPOE_IPV6_TCP:
1140 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1142 case ICE_PROFID_PPPOE_IPV6_UDP:
1143 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1145 case ICE_PROFID_PPPOE_IPV6_OTHER:
1146 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1148 case ICE_PROFID_IPV4_ESP:
1149 tun_type = ICE_SW_TUN_IPV4_ESP;
1151 case ICE_PROFID_IPV6_ESP:
1152 tun_type = ICE_SW_TUN_IPV6_ESP;
1154 case ICE_PROFID_IPV4_AH:
1155 tun_type = ICE_SW_TUN_IPV4_AH;
1157 case ICE_PROFID_IPV6_AH:
1158 tun_type = ICE_SW_TUN_IPV6_AH;
1160 case ICE_PROFID_IPV4_NAT_T:
1161 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1163 case ICE_PROFID_IPV6_NAT_T:
1164 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1166 case ICE_PROFID_IPV4_PFCP_NODE:
1168 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1170 case ICE_PROFID_IPV6_PFCP_NODE:
1172 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1174 case ICE_PROFID_IPV4_PFCP_SESSION:
1176 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1178 case ICE_PROFID_IPV6_PFCP_SESSION:
1180 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1182 case ICE_PROFID_MAC_IPV4_L2TPV3:
1183 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1185 case ICE_PROFID_MAC_IPV6_L2TPV3:
1186 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1201 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1202 * @hw: pointer to hardware structure
1203 * @recps: struct that we need to populate
1204 * @rid: recipe ID that we are populating
1205 * @refresh_required: true if we should get recipe to profile mapping from FW
1207 * This function is used to populate all the necessary entries into our
1208 * bookkeeping so that we have a current list of all the recipes that are
1209 * programmed in the firmware.
1211 static enum ice_status
1212 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1213 bool *refresh_required)
1215 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1216 struct ice_aqc_recipe_data_elem *tmp;
1217 u16 num_recps = ICE_MAX_NUM_RECIPES;
1218 struct ice_prot_lkup_ext *lkup_exts;
1219 enum ice_status status;
1223 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1225 /* we need a buffer big enough to accommodate all the recipes */
1226 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1227 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1229 return ICE_ERR_NO_MEMORY;
1231 tmp[0].recipe_indx = rid;
1232 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1233 /* non-zero status meaning recipe doesn't exist */
1237 /* Get recipe to profile map so that we can get the fv from lkups that
1238 * we read for a recipe from FW. Since we want to minimize the number of
1239 * times we make this FW call, just make one call and cache the copy
1240 * until a new recipe is added. This operation is only required the
1241 * first time to get the changes from FW. Then to search existing
1242 * entries we don't need to update the cache again until another recipe
1245 if (*refresh_required) {
1246 ice_get_recp_to_prof_map(hw);
1247 *refresh_required = false;
1250 /* Start populating all the entries for recps[rid] based on lkups from
1251 * firmware. Note that we are only creating the root recipe in our
1254 lkup_exts = &recps[rid].lkup_exts;
1256 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1257 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1258 struct ice_recp_grp_entry *rg_entry;
1259 u8 i, prof, idx, prot = 0;
1263 rg_entry = (struct ice_recp_grp_entry *)
1264 ice_malloc(hw, sizeof(*rg_entry));
1266 status = ICE_ERR_NO_MEMORY;
1270 idx = root_bufs.recipe_indx;
1271 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1273 /* Mark all result indices in this chain */
1274 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1275 ice_set_bit(root_bufs.content.result_indx &
1276 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1278 /* get the first profile that is associated with rid */
1279 prof = ice_find_first_bit(recipe_to_profile[idx],
1280 ICE_MAX_NUM_PROFILES);
1281 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1282 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1284 rg_entry->fv_idx[i] = lkup_indx;
1285 rg_entry->fv_mask[i] =
1286 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1288 /* If the recipe is a chained recipe then all its
1289 * child recipe's result will have a result index.
1290 * To fill fv_words we should not use those result
1291 * index, we only need the protocol ids and offsets.
1292 * We will skip all the fv_idx which stores result
1293 * index in them. We also need to skip any fv_idx which
1294 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1295 * valid offset value.
1297 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1298 rg_entry->fv_idx[i]) ||
1299 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1300 rg_entry->fv_idx[i] == 0)
1303 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1304 rg_entry->fv_idx[i], &prot, &off);
1305 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1306 lkup_exts->fv_words[fv_word_idx].off = off;
1307 lkup_exts->field_mask[fv_word_idx] =
1308 rg_entry->fv_mask[i];
1311 /* populate rg_list with the data from the child entry of this
1314 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1316 /* Propagate some data to the recipe database */
1317 recps[idx].is_root = !!is_root;
1318 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1319 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1320 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1321 recps[idx].chain_idx = root_bufs.content.result_indx &
1322 ~ICE_AQ_RECIPE_RESULT_EN;
1323 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1325 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1331 /* Only do the following for root recipes entries */
1332 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1333 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1334 recps[idx].root_rid = root_bufs.content.rid &
1335 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1336 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1339 /* Complete initialization of the root recipe entry */
1340 lkup_exts->n_val_words = fv_word_idx;
1341 recps[rid].big_recp = (num_recps > 1);
1342 recps[rid].n_grp_count = (u8)num_recps;
1343 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
1344 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1345 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1346 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1347 if (!recps[rid].root_buf)
1350 /* Copy result indexes */
1351 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1352 recps[rid].recp_created = true;
1360 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1361 * @hw: pointer to hardware structure
1363 * This function is used to populate recipe_to_profile matrix where index to
1364 * this array is the recipe ID and the element is the mapping of which profiles
1365 * is this recipe mapped to.
1367 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1369 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1372 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1375 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1376 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1377 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1379 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1380 ICE_MAX_NUM_RECIPES);
1381 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1382 if (ice_is_bit_set(r_bitmap, j))
1383 ice_set_bit(i, recipe_to_profile[j]);
1388 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1389 * @hw: pointer to the HW struct
1390 * @recp_list: pointer to sw recipe list
1392 * Allocate memory for the entire recipe table and initialize the structures/
1393 * entries corresponding to basic recipes.
1396 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1398 struct ice_sw_recipe *recps;
1401 recps = (struct ice_sw_recipe *)
1402 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1404 return ICE_ERR_NO_MEMORY;
1406 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1407 recps[i].root_rid = i;
1408 INIT_LIST_HEAD(&recps[i].filt_rules);
1409 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1410 INIT_LIST_HEAD(&recps[i].rg_list);
1411 ice_init_lock(&recps[i].filt_rule_lock);
1420 * ice_aq_get_sw_cfg - get switch configuration
1421 * @hw: pointer to the hardware structure
1422 * @buf: pointer to the result buffer
1423 * @buf_size: length of the buffer available for response
1424 * @req_desc: pointer to requested descriptor
1425 * @num_elems: pointer to number of elements
1426 * @cd: pointer to command details structure or NULL
1428 * Get switch configuration (0x0200) to be placed in 'buff'.
1429 * This admin command returns information such as initial VSI/port number
1430 * and switch ID it belongs to.
1432 * NOTE: *req_desc is both an input/output parameter.
1433 * The caller of this function first calls this function with *request_desc set
1434 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1435 * configuration information has been returned; if non-zero (meaning not all
1436 * the information was returned), the caller should call this function again
1437 * with *req_desc set to the previous value returned by f/w to get the
1438 * next block of switch configuration information.
1440 * *num_elems is output only parameter. This reflects the number of elements
1441 * in response buffer. The caller of this function to use *num_elems while
1442 * parsing the response buffer.
1444 static enum ice_status
1445 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1446 u16 buf_size, u16 *req_desc, u16 *num_elems,
1447 struct ice_sq_cd *cd)
1449 struct ice_aqc_get_sw_cfg *cmd;
1450 enum ice_status status;
1451 struct ice_aq_desc desc;
1453 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1454 cmd = &desc.params.get_sw_conf;
1455 cmd->element = CPU_TO_LE16(*req_desc);
1457 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1459 *req_desc = LE16_TO_CPU(cmd->element);
1460 *num_elems = LE16_TO_CPU(cmd->num_elems);
1467 * ice_alloc_sw - allocate resources specific to switch
1468 * @hw: pointer to the HW struct
1469 * @ena_stats: true to turn on VEB stats
1470 * @shared_res: true for shared resource, false for dedicated resource
1471 * @sw_id: switch ID returned
1472 * @counter_id: VEB counter ID returned
1474 * allocates switch resources (SWID and VEB counter) (0x0208)
1477 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1480 struct ice_aqc_alloc_free_res_elem *sw_buf;
1481 struct ice_aqc_res_elem *sw_ele;
1482 enum ice_status status;
1485 buf_len = sizeof(*sw_buf);
1486 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1487 ice_malloc(hw, buf_len);
1489 return ICE_ERR_NO_MEMORY;
1491 /* Prepare buffer for switch ID.
1492 * The number of resource entries in buffer is passed as 1 since only a
1493 * single switch/VEB instance is allocated, and hence a single sw_id
1496 sw_buf->num_elems = CPU_TO_LE16(1);
1498 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1499 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1500 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1502 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1503 ice_aqc_opc_alloc_res, NULL);
1506 goto ice_alloc_sw_exit;
1508 sw_ele = &sw_buf->elem[0];
1509 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1512 /* Prepare buffer for VEB Counter */
1513 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1514 struct ice_aqc_alloc_free_res_elem *counter_buf;
1515 struct ice_aqc_res_elem *counter_ele;
1517 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1518 ice_malloc(hw, buf_len);
1520 status = ICE_ERR_NO_MEMORY;
1521 goto ice_alloc_sw_exit;
1524 /* The number of resource entries in buffer is passed as 1 since
1525 * only a single switch/VEB instance is allocated, and hence a
1526 * single VEB counter is requested.
1528 counter_buf->num_elems = CPU_TO_LE16(1);
1529 counter_buf->res_type =
1530 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1531 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1532 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1536 ice_free(hw, counter_buf);
1537 goto ice_alloc_sw_exit;
1539 counter_ele = &counter_buf->elem[0];
1540 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1541 ice_free(hw, counter_buf);
1545 ice_free(hw, sw_buf);
1550 * ice_free_sw - free resources specific to switch
1551 * @hw: pointer to the HW struct
1552 * @sw_id: switch ID returned
1553 * @counter_id: VEB counter ID returned
1555 * free switch resources (SWID and VEB counter) (0x0209)
1557 * NOTE: This function frees multiple resources. It continues
1558 * releasing other resources even after it encounters error.
1559 * The error code returned is the last error it encountered.
1561 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1563 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1564 enum ice_status status, ret_status;
1567 buf_len = sizeof(*sw_buf);
1568 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1569 ice_malloc(hw, buf_len);
1571 return ICE_ERR_NO_MEMORY;
1573 /* Prepare buffer to free for switch ID res.
1574 * The number of resource entries in buffer is passed as 1 since only a
1575 * single switch/VEB instance is freed, and hence a single sw_id
1578 sw_buf->num_elems = CPU_TO_LE16(1);
1579 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1580 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1582 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1583 ice_aqc_opc_free_res, NULL);
1586 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1588 /* Prepare buffer to free for VEB Counter resource */
1589 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1590 ice_malloc(hw, buf_len);
1592 ice_free(hw, sw_buf);
1593 return ICE_ERR_NO_MEMORY;
1596 /* The number of resource entries in buffer is passed as 1 since only a
1597 * single switch/VEB instance is freed, and hence a single VEB counter
1600 counter_buf->num_elems = CPU_TO_LE16(1);
1601 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1602 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1604 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1605 ice_aqc_opc_free_res, NULL);
1607 ice_debug(hw, ICE_DBG_SW,
1608 "VEB counter resource could not be freed\n");
1609 ret_status = status;
1612 ice_free(hw, counter_buf);
1613 ice_free(hw, sw_buf);
1619 * @hw: pointer to the HW struct
1620 * @vsi_ctx: pointer to a VSI context struct
1621 * @cd: pointer to command details structure or NULL
1623 * Add a VSI context to the hardware (0x0210)
1626 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1627 struct ice_sq_cd *cd)
1629 struct ice_aqc_add_update_free_vsi_resp *res;
1630 struct ice_aqc_add_get_update_free_vsi *cmd;
1631 struct ice_aq_desc desc;
1632 enum ice_status status;
1634 cmd = &desc.params.vsi_cmd;
1635 res = &desc.params.add_update_free_vsi_res;
1637 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1639 if (!vsi_ctx->alloc_from_pool)
1640 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1641 ICE_AQ_VSI_IS_VALID);
1643 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1645 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1647 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1648 sizeof(vsi_ctx->info), cd);
1651 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1652 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1653 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1661 * @hw: pointer to the HW struct
1662 * @vsi_ctx: pointer to a VSI context struct
1663 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1664 * @cd: pointer to command details structure or NULL
1666 * Free VSI context info from hardware (0x0213)
1669 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1670 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1672 struct ice_aqc_add_update_free_vsi_resp *resp;
1673 struct ice_aqc_add_get_update_free_vsi *cmd;
1674 struct ice_aq_desc desc;
1675 enum ice_status status;
1677 cmd = &desc.params.vsi_cmd;
1678 resp = &desc.params.add_update_free_vsi_res;
1680 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1682 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1684 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1686 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1688 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1689 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1697 * @hw: pointer to the HW struct
1698 * @vsi_ctx: pointer to a VSI context struct
1699 * @cd: pointer to command details structure or NULL
1701 * Update VSI context in the hardware (0x0211)
1704 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1705 struct ice_sq_cd *cd)
1707 struct ice_aqc_add_update_free_vsi_resp *resp;
1708 struct ice_aqc_add_get_update_free_vsi *cmd;
1709 struct ice_aq_desc desc;
1710 enum ice_status status;
1712 cmd = &desc.params.vsi_cmd;
1713 resp = &desc.params.add_update_free_vsi_res;
1715 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1717 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1719 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1721 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1722 sizeof(vsi_ctx->info), cd);
1725 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1726 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1733 * ice_is_vsi_valid - check whether the VSI is valid or not
1734 * @hw: pointer to the HW struct
1735 * @vsi_handle: VSI handle
1737 * check whether the VSI is valid or not
1739 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1741 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1745 * ice_get_hw_vsi_num - return the HW VSI number
1746 * @hw: pointer to the HW struct
1747 * @vsi_handle: VSI handle
1749 * return the HW VSI number
1750 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1752 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1754 return hw->vsi_ctx[vsi_handle]->vsi_num;
1758 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1759 * @hw: pointer to the HW struct
1760 * @vsi_handle: VSI handle
1762 * return the VSI context entry for a given VSI handle
1764 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1766 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1770 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1771 * @hw: pointer to the HW struct
1772 * @vsi_handle: VSI handle
1773 * @vsi: VSI context pointer
1775 * save the VSI context entry for a given VSI handle
1778 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1780 hw->vsi_ctx[vsi_handle] = vsi;
1784 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1785 * @hw: pointer to the HW struct
1786 * @vsi_handle: VSI handle
1788 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1790 struct ice_vsi_ctx *vsi;
1793 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1796 ice_for_each_traffic_class(i) {
1797 if (vsi->lan_q_ctx[i]) {
1798 ice_free(hw, vsi->lan_q_ctx[i]);
1799 vsi->lan_q_ctx[i] = NULL;
1805 * ice_clear_vsi_ctx - clear the VSI context entry
1806 * @hw: pointer to the HW struct
1807 * @vsi_handle: VSI handle
1809 * clear the VSI context entry
1811 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1813 struct ice_vsi_ctx *vsi;
1815 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1817 ice_clear_vsi_q_ctx(hw, vsi_handle);
1819 hw->vsi_ctx[vsi_handle] = NULL;
1824 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1825 * @hw: pointer to the HW struct
1827 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1831 for (i = 0; i < ICE_MAX_VSI; i++)
1832 ice_clear_vsi_ctx(hw, i);
1836 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1837 * @hw: pointer to the HW struct
1838 * @vsi_handle: unique VSI handle provided by drivers
1839 * @vsi_ctx: pointer to a VSI context struct
1840 * @cd: pointer to command details structure or NULL
1842 * Add a VSI context to the hardware also add it into the VSI handle list.
1843 * If this function gets called after reset for existing VSIs then update
1844 * with the new HW VSI number in the corresponding VSI handle list entry.
1847 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1848 struct ice_sq_cd *cd)
1850 struct ice_vsi_ctx *tmp_vsi_ctx;
1851 enum ice_status status;
1853 if (vsi_handle >= ICE_MAX_VSI)
1854 return ICE_ERR_PARAM;
1855 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1858 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1860 /* Create a new VSI context */
1861 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1862 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1864 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1865 return ICE_ERR_NO_MEMORY;
1867 *tmp_vsi_ctx = *vsi_ctx;
1869 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1871 /* update with new HW VSI num */
1872 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1879 * ice_free_vsi- free VSI context from hardware and VSI handle list
1880 * @hw: pointer to the HW struct
1881 * @vsi_handle: unique VSI handle
1882 * @vsi_ctx: pointer to a VSI context struct
1883 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1884 * @cd: pointer to command details structure or NULL
1886 * Free VSI context info from hardware as well as from VSI handle list
1889 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1890 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1892 enum ice_status status;
1894 if (!ice_is_vsi_valid(hw, vsi_handle))
1895 return ICE_ERR_PARAM;
1896 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1897 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1899 ice_clear_vsi_ctx(hw, vsi_handle);
1905 * @hw: pointer to the HW struct
1906 * @vsi_handle: unique VSI handle
1907 * @vsi_ctx: pointer to a VSI context struct
1908 * @cd: pointer to command details structure or NULL
1910 * Update VSI context in the hardware
1913 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1914 struct ice_sq_cd *cd)
1916 if (!ice_is_vsi_valid(hw, vsi_handle))
1917 return ICE_ERR_PARAM;
1918 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1919 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1923 * ice_aq_get_vsi_params
1924 * @hw: pointer to the HW struct
1925 * @vsi_ctx: pointer to a VSI context struct
1926 * @cd: pointer to command details structure or NULL
1928 * Get VSI context info from hardware (0x0212)
1931 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1932 struct ice_sq_cd *cd)
1934 struct ice_aqc_add_get_update_free_vsi *cmd;
1935 struct ice_aqc_get_vsi_resp *resp;
1936 struct ice_aq_desc desc;
1937 enum ice_status status;
1939 cmd = &desc.params.vsi_cmd;
1940 resp = &desc.params.get_vsi_resp;
1942 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1944 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1946 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1947 sizeof(vsi_ctx->info), cd);
1949 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1951 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1952 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1959 * ice_aq_add_update_mir_rule - add/update a mirror rule
1960 * @hw: pointer to the HW struct
1961 * @rule_type: Rule Type
1962 * @dest_vsi: VSI number to which packets will be mirrored
1963 * @count: length of the list
1964 * @mr_buf: buffer for list of mirrored VSI numbers
1965 * @cd: pointer to command details structure or NULL
1968 * Add/Update Mirror Rule (0x260).
1971 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1972 u16 count, struct ice_mir_rule_buf *mr_buf,
1973 struct ice_sq_cd *cd, u16 *rule_id)
1975 struct ice_aqc_add_update_mir_rule *cmd;
1976 struct ice_aq_desc desc;
1977 enum ice_status status;
1978 __le16 *mr_list = NULL;
1981 switch (rule_type) {
1982 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1983 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1984 /* Make sure count and mr_buf are set for these rule_types */
1985 if (!(count && mr_buf))
1986 return ICE_ERR_PARAM;
1988 buf_size = count * sizeof(__le16);
1989 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1991 return ICE_ERR_NO_MEMORY;
1993 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1994 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1995 /* Make sure count and mr_buf are not set for these
1998 if (count || mr_buf)
1999 return ICE_ERR_PARAM;
2002 ice_debug(hw, ICE_DBG_SW,
2003 "Error due to unsupported rule_type %u\n", rule_type);
2004 return ICE_ERR_OUT_OF_RANGE;
2007 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2009 /* Pre-process 'mr_buf' items for add/update of virtual port
2010 * ingress/egress mirroring (but not physical port ingress/egress
2016 for (i = 0; i < count; i++) {
2019 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2021 /* Validate specified VSI number, make sure it is less
2022 * than ICE_MAX_VSI, if not return with error.
2024 if (id >= ICE_MAX_VSI) {
2025 ice_debug(hw, ICE_DBG_SW,
2026 "Error VSI index (%u) out-of-range\n",
2028 ice_free(hw, mr_list);
2029 return ICE_ERR_OUT_OF_RANGE;
2032 /* add VSI to mirror rule */
2035 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2036 else /* remove VSI from mirror rule */
2037 mr_list[i] = CPU_TO_LE16(id);
2041 cmd = &desc.params.add_update_rule;
2042 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2043 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2044 ICE_AQC_RULE_ID_VALID_M);
2045 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2046 cmd->num_entries = CPU_TO_LE16(count);
2047 cmd->dest = CPU_TO_LE16(dest_vsi);
2049 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2051 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2053 ice_free(hw, mr_list);
2059 * ice_aq_delete_mir_rule - delete a mirror rule
2060 * @hw: pointer to the HW struct
2061 * @rule_id: Mirror rule ID (to be deleted)
2062 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2063 * otherwise it is returned to the shared pool
2064 * @cd: pointer to command details structure or NULL
2066 * Delete Mirror Rule (0x261).
2069 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2070 struct ice_sq_cd *cd)
2072 struct ice_aqc_delete_mir_rule *cmd;
2073 struct ice_aq_desc desc;
2075 /* rule_id should be in the range 0...63 */
2076 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2077 return ICE_ERR_OUT_OF_RANGE;
2079 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2081 cmd = &desc.params.del_rule;
2082 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2083 cmd->rule_id = CPU_TO_LE16(rule_id);
2086 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2088 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2092 * ice_aq_alloc_free_vsi_list
2093 * @hw: pointer to the HW struct
2094 * @vsi_list_id: VSI list ID returned or used for lookup
2095 * @lkup_type: switch rule filter lookup type
2096 * @opc: switch rules population command type - pass in the command opcode
2098 * allocates or free a VSI list resource
2100 static enum ice_status
2101 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2102 enum ice_sw_lkup_type lkup_type,
2103 enum ice_adminq_opc opc)
2105 struct ice_aqc_alloc_free_res_elem *sw_buf;
2106 struct ice_aqc_res_elem *vsi_ele;
2107 enum ice_status status;
2110 buf_len = sizeof(*sw_buf);
2111 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
2112 ice_malloc(hw, buf_len);
2114 return ICE_ERR_NO_MEMORY;
2115 sw_buf->num_elems = CPU_TO_LE16(1);
2117 if (lkup_type == ICE_SW_LKUP_MAC ||
2118 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2119 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2120 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2121 lkup_type == ICE_SW_LKUP_PROMISC ||
2122 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2123 lkup_type == ICE_SW_LKUP_LAST) {
2124 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2125 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2127 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2129 status = ICE_ERR_PARAM;
2130 goto ice_aq_alloc_free_vsi_list_exit;
2133 if (opc == ice_aqc_opc_free_res)
2134 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2136 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2138 goto ice_aq_alloc_free_vsi_list_exit;
2140 if (opc == ice_aqc_opc_alloc_res) {
2141 vsi_ele = &sw_buf->elem[0];
2142 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2145 ice_aq_alloc_free_vsi_list_exit:
2146 ice_free(hw, sw_buf);
2151 * ice_aq_set_storm_ctrl - Sets storm control configuration
2152 * @hw: pointer to the HW struct
2153 * @bcast_thresh: represents the upper threshold for broadcast storm control
2154 * @mcast_thresh: represents the upper threshold for multicast storm control
2155 * @ctl_bitmask: storm control control knobs
2157 * Sets the storm control configuration (0x0280)
2160 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2163 struct ice_aqc_storm_cfg *cmd;
2164 struct ice_aq_desc desc;
2166 cmd = &desc.params.storm_conf;
2168 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2170 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2171 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2172 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2174 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2178 * ice_aq_get_storm_ctrl - gets storm control configuration
2179 * @hw: pointer to the HW struct
2180 * @bcast_thresh: represents the upper threshold for broadcast storm control
2181 * @mcast_thresh: represents the upper threshold for multicast storm control
2182 * @ctl_bitmask: storm control control knobs
2184 * Gets the storm control configuration (0x0281)
2187 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2190 enum ice_status status;
2191 struct ice_aq_desc desc;
2193 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2195 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2197 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2200 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2203 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2206 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2213 * ice_aq_sw_rules - add/update/remove switch rules
2214 * @hw: pointer to the HW struct
2215 * @rule_list: pointer to switch rule population list
2216 * @rule_list_sz: total size of the rule list in bytes
2217 * @num_rules: number of switch rules in the rule_list
2218 * @opc: switch rules population command type - pass in the command opcode
2219 * @cd: pointer to command details structure or NULL
2221 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2223 static enum ice_status
2224 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2225 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2227 struct ice_aq_desc desc;
2228 enum ice_status status;
2230 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2232 if (opc != ice_aqc_opc_add_sw_rules &&
2233 opc != ice_aqc_opc_update_sw_rules &&
2234 opc != ice_aqc_opc_remove_sw_rules)
2235 return ICE_ERR_PARAM;
2237 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2239 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2240 desc.params.sw_rules.num_rules_fltr_entry_index =
2241 CPU_TO_LE16(num_rules);
2242 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2243 if (opc != ice_aqc_opc_add_sw_rules &&
2244 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2245 status = ICE_ERR_DOES_NOT_EXIST;
2251 * ice_aq_add_recipe - add switch recipe
2252 * @hw: pointer to the HW struct
2253 * @s_recipe_list: pointer to switch rule population list
2254 * @num_recipes: number of switch recipes in the list
2255 * @cd: pointer to command details structure or NULL
2260 ice_aq_add_recipe(struct ice_hw *hw,
2261 struct ice_aqc_recipe_data_elem *s_recipe_list,
2262 u16 num_recipes, struct ice_sq_cd *cd)
2264 struct ice_aqc_add_get_recipe *cmd;
2265 struct ice_aq_desc desc;
2268 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2269 cmd = &desc.params.add_get_recipe;
2270 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2272 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2273 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2275 buf_size = num_recipes * sizeof(*s_recipe_list);
2277 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2281 * ice_aq_get_recipe - get switch recipe
2282 * @hw: pointer to the HW struct
2283 * @s_recipe_list: pointer to switch rule population list
2284 * @num_recipes: pointer to the number of recipes (input and output)
2285 * @recipe_root: root recipe number of recipe(s) to retrieve
2286 * @cd: pointer to command details structure or NULL
2290 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2291 * On output, *num_recipes will equal the number of entries returned in
2294 * The caller must supply enough space in s_recipe_list to hold all possible
2295 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2298 ice_aq_get_recipe(struct ice_hw *hw,
2299 struct ice_aqc_recipe_data_elem *s_recipe_list,
2300 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2302 struct ice_aqc_add_get_recipe *cmd;
2303 struct ice_aq_desc desc;
2304 enum ice_status status;
2307 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2308 return ICE_ERR_PARAM;
2310 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2311 cmd = &desc.params.add_get_recipe;
2312 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2314 cmd->return_index = CPU_TO_LE16(recipe_root);
2315 cmd->num_sub_recipes = 0;
2317 buf_size = *num_recipes * sizeof(*s_recipe_list);
2319 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2320 /* cppcheck-suppress constArgument */
2321 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2327 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2328 * @hw: pointer to the HW struct
2329 * @profile_id: package profile ID to associate the recipe with
2330 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2331 * @cd: pointer to command details structure or NULL
2332 * Recipe to profile association (0x0291)
2335 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2336 struct ice_sq_cd *cd)
2338 struct ice_aqc_recipe_to_profile *cmd;
2339 struct ice_aq_desc desc;
2341 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2342 cmd = &desc.params.recipe_to_profile;
2343 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2344 cmd->profile_id = CPU_TO_LE16(profile_id);
2345 /* Set the recipe ID bit in the bitmask to let the device know which
2346 * profile we are associating the recipe to
2348 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2349 ICE_NONDMA_TO_NONDMA);
2351 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2355 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2356 * @hw: pointer to the HW struct
2357 * @profile_id: package profile ID to associate the recipe with
2358 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2359 * @cd: pointer to command details structure or NULL
2360 * Associate profile ID with given recipe (0x0293)
2363 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2364 struct ice_sq_cd *cd)
2366 struct ice_aqc_recipe_to_profile *cmd;
2367 struct ice_aq_desc desc;
2368 enum ice_status status;
2370 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2371 cmd = &desc.params.recipe_to_profile;
2372 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2373 cmd->profile_id = CPU_TO_LE16(profile_id);
2375 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2377 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2378 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2384 * ice_alloc_recipe - add recipe resource
2385 * @hw: pointer to the hardware structure
2386 * @rid: recipe ID returned as response to AQ call
2388 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2390 struct ice_aqc_alloc_free_res_elem *sw_buf;
2391 enum ice_status status;
2394 buf_len = sizeof(*sw_buf);
2395 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2397 return ICE_ERR_NO_MEMORY;
2399 sw_buf->num_elems = CPU_TO_LE16(1);
2400 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2401 ICE_AQC_RES_TYPE_S) |
2402 ICE_AQC_RES_TYPE_FLAG_SHARED);
2403 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2404 ice_aqc_opc_alloc_res, NULL);
2406 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2407 ice_free(hw, sw_buf);
2412 /* ice_init_port_info - Initialize port_info with switch configuration data
2413 * @pi: pointer to port_info
2414 * @vsi_port_num: VSI number or port number
2415 * @type: Type of switch element (port or VSI)
2416 * @swid: switch ID of the switch the element is attached to
2417 * @pf_vf_num: PF or VF number
2418 * @is_vf: true if the element is a VF, false otherwise
2421 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2422 u16 swid, u16 pf_vf_num, bool is_vf)
2425 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2426 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2428 pi->pf_vf_num = pf_vf_num;
2430 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2431 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2434 ice_debug(pi->hw, ICE_DBG_SW,
2435 "incorrect VSI/port type received\n");
2440 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2441 * @hw: pointer to the hardware structure
2443 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2445 struct ice_aqc_get_sw_cfg_resp *rbuf;
2446 enum ice_status status;
2453 num_total_ports = 1;
2455 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2456 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2459 return ICE_ERR_NO_MEMORY;
2461 /* Multiple calls to ice_aq_get_sw_cfg may be required
2462 * to get all the switch configuration information. The need
2463 * for additional calls is indicated by ice_aq_get_sw_cfg
2464 * writing a non-zero value in req_desc
2467 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2468 &req_desc, &num_elems, NULL);
2473 for (i = 0; i < num_elems; i++) {
2474 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2475 u16 pf_vf_num, swid, vsi_port_num;
2479 ele = rbuf[i].elements;
2480 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2481 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2483 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2484 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2486 swid = LE16_TO_CPU(ele->swid);
2488 if (LE16_TO_CPU(ele->pf_vf_num) &
2489 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2492 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2493 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2496 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2497 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2498 if (j == num_total_ports) {
2499 ice_debug(hw, ICE_DBG_SW,
2500 "more ports than expected\n");
2501 status = ICE_ERR_CFG;
2504 ice_init_port_info(hw->port_info,
2505 vsi_port_num, res_type, swid,
2513 } while (req_desc && !status);
2516 ice_free(hw, (void *)rbuf);
2521 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2522 * @hw: pointer to the hardware structure
2523 * @fi: filter info structure to fill/update
2525 * This helper function populates the lb_en and lan_en elements of the provided
2526 * ice_fltr_info struct using the switch's type and characteristics of the
2527 * switch rule being configured.
2529 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2531 if ((fi->flag & ICE_FLTR_RX) &&
2532 (fi->fltr_act == ICE_FWD_TO_VSI ||
2533 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2534 fi->lkup_type == ICE_SW_LKUP_LAST)
2538 if ((fi->flag & ICE_FLTR_TX) &&
2539 (fi->fltr_act == ICE_FWD_TO_VSI ||
2540 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2541 fi->fltr_act == ICE_FWD_TO_Q ||
2542 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2543 /* Setting LB for prune actions will result in replicated
2544 * packets to the internal switch that will be dropped.
2546 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2549 /* Set lan_en to TRUE if
2550 * 1. The switch is a VEB AND
2552 * 2.1 The lookup is a directional lookup like ethertype,
2553 * promiscuous, ethertype-MAC, promiscuous-VLAN
2554 * and default-port OR
2555 * 2.2 The lookup is VLAN, OR
2556 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2557 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2561 * The switch is a VEPA.
2563 * In all other cases, the LAN enable has to be set to false.
2566 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2567 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2568 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2569 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2570 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2571 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2572 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2573 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2574 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2575 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2584 * ice_fill_sw_rule - Helper function to fill switch rule structure
2585 * @hw: pointer to the hardware structure
2586 * @f_info: entry containing packet forwarding information
2587 * @s_rule: switch rule structure to be filled in based on mac_entry
2588 * @opc: switch rules population command type - pass in the command opcode
2591 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2592 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2594 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2602 if (opc == ice_aqc_opc_remove_sw_rules) {
2603 s_rule->pdata.lkup_tx_rx.act = 0;
2604 s_rule->pdata.lkup_tx_rx.index =
2605 CPU_TO_LE16(f_info->fltr_rule_id);
2606 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2610 eth_hdr_sz = sizeof(dummy_eth_header);
2611 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2613 /* initialize the ether header with a dummy header */
2614 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2615 ice_fill_sw_info(hw, f_info);
2617 switch (f_info->fltr_act) {
2618 case ICE_FWD_TO_VSI:
2619 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2620 ICE_SINGLE_ACT_VSI_ID_M;
2621 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2622 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2623 ICE_SINGLE_ACT_VALID_BIT;
2625 case ICE_FWD_TO_VSI_LIST:
2626 act |= ICE_SINGLE_ACT_VSI_LIST;
2627 act |= (f_info->fwd_id.vsi_list_id <<
2628 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2629 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2630 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2631 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2632 ICE_SINGLE_ACT_VALID_BIT;
2635 act |= ICE_SINGLE_ACT_TO_Q;
2636 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2637 ICE_SINGLE_ACT_Q_INDEX_M;
2639 case ICE_DROP_PACKET:
2640 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2641 ICE_SINGLE_ACT_VALID_BIT;
2643 case ICE_FWD_TO_QGRP:
2644 q_rgn = f_info->qgrp_size > 0 ?
2645 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2646 act |= ICE_SINGLE_ACT_TO_Q;
2647 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2648 ICE_SINGLE_ACT_Q_INDEX_M;
2649 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2650 ICE_SINGLE_ACT_Q_REGION_M;
2657 act |= ICE_SINGLE_ACT_LB_ENABLE;
2659 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2661 switch (f_info->lkup_type) {
2662 case ICE_SW_LKUP_MAC:
2663 daddr = f_info->l_data.mac.mac_addr;
2665 case ICE_SW_LKUP_VLAN:
2666 vlan_id = f_info->l_data.vlan.vlan_id;
2667 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2668 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2669 act |= ICE_SINGLE_ACT_PRUNE;
2670 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2673 case ICE_SW_LKUP_ETHERTYPE_MAC:
2674 daddr = f_info->l_data.ethertype_mac.mac_addr;
2676 case ICE_SW_LKUP_ETHERTYPE:
2677 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2678 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2680 case ICE_SW_LKUP_MAC_VLAN:
2681 daddr = f_info->l_data.mac_vlan.mac_addr;
2682 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2684 case ICE_SW_LKUP_PROMISC_VLAN:
2685 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2687 case ICE_SW_LKUP_PROMISC:
2688 daddr = f_info->l_data.mac_vlan.mac_addr;
2694 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2695 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2696 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2698 /* Recipe set depending on lookup type */
2699 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2700 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2701 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2704 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2705 ICE_NONDMA_TO_NONDMA);
2707 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2708 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2709 *off = CPU_TO_BE16(vlan_id);
2712 /* Create the switch rule with the final dummy Ethernet header */
2713 if (opc != ice_aqc_opc_update_sw_rules)
2714 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2718 * ice_add_marker_act
2719 * @hw: pointer to the hardware structure
2720 * @m_ent: the management entry for which sw marker needs to be added
2721 * @sw_marker: sw marker to tag the Rx descriptor with
2722 * @l_id: large action resource ID
2724 * Create a large action to hold software marker and update the switch rule
2725 * entry pointed by m_ent with newly created large action
2727 static enum ice_status
2728 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2729 u16 sw_marker, u16 l_id)
2731 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2732 /* For software marker we need 3 large actions
2733 * 1. FWD action: FWD TO VSI or VSI LIST
2734 * 2. GENERIC VALUE action to hold the profile ID
2735 * 3. GENERIC VALUE action to hold the software marker ID
2737 const u16 num_lg_acts = 3;
2738 enum ice_status status;
2744 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2745 return ICE_ERR_PARAM;
2747 /* Create two back-to-back switch rules and submit them to the HW using
2748 * one memory buffer:
2752 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2753 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2754 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2756 return ICE_ERR_NO_MEMORY;
2758 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2760 /* Fill in the first switch rule i.e. large action */
2761 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2762 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2763 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2765 /* First action VSI forwarding or VSI list forwarding depending on how
2768 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2769 m_ent->fltr_info.fwd_id.hw_vsi_id;
2771 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2772 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2773 ICE_LG_ACT_VSI_LIST_ID_M;
2774 if (m_ent->vsi_count > 1)
2775 act |= ICE_LG_ACT_VSI_LIST;
2776 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2778 /* Second action descriptor type */
2779 act = ICE_LG_ACT_GENERIC;
2781 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2782 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2784 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2785 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2787 /* Third action Marker value */
2788 act |= ICE_LG_ACT_GENERIC;
2789 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2790 ICE_LG_ACT_GENERIC_VALUE_M;
2792 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2794 /* call the fill switch rule to fill the lookup Tx Rx structure */
2795 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2796 ice_aqc_opc_update_sw_rules);
2798 /* Update the action to point to the large action ID */
2799 rx_tx->pdata.lkup_tx_rx.act =
2800 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2801 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2802 ICE_SINGLE_ACT_PTR_VAL_M));
2804 /* Use the filter rule ID of the previously created rule with single
2805 * act. Once the update happens, hardware will treat this as large
2808 rx_tx->pdata.lkup_tx_rx.index =
2809 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2811 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2812 ice_aqc_opc_update_sw_rules, NULL);
2814 m_ent->lg_act_idx = l_id;
2815 m_ent->sw_marker_id = sw_marker;
2818 ice_free(hw, lg_act);
2823 * ice_add_counter_act - add/update filter rule with counter action
2824 * @hw: pointer to the hardware structure
2825 * @m_ent: the management entry for which counter needs to be added
2826 * @counter_id: VLAN counter ID returned as part of allocate resource
2827 * @l_id: large action resource ID
2829 static enum ice_status
2830 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2831 u16 counter_id, u16 l_id)
2833 struct ice_aqc_sw_rules_elem *lg_act;
2834 struct ice_aqc_sw_rules_elem *rx_tx;
2835 enum ice_status status;
2836 /* 2 actions will be added while adding a large action counter */
2837 const int num_acts = 2;
2844 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2845 return ICE_ERR_PARAM;
2847 /* Create two back-to-back switch rules and submit them to the HW using
2848 * one memory buffer:
2852 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2853 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2854 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2857 return ICE_ERR_NO_MEMORY;
2859 rx_tx = (struct ice_aqc_sw_rules_elem *)
2860 ((u8 *)lg_act + lg_act_size);
2862 /* Fill in the first switch rule i.e. large action */
2863 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2864 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2865 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2867 /* First action VSI forwarding or VSI list forwarding depending on how
2870 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2871 m_ent->fltr_info.fwd_id.hw_vsi_id;
2873 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2874 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2875 ICE_LG_ACT_VSI_LIST_ID_M;
2876 if (m_ent->vsi_count > 1)
2877 act |= ICE_LG_ACT_VSI_LIST;
2878 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2880 /* Second action counter ID */
2881 act = ICE_LG_ACT_STAT_COUNT;
2882 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2883 ICE_LG_ACT_STAT_COUNT_M;
2884 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2886 /* call the fill switch rule to fill the lookup Tx Rx structure */
2887 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2888 ice_aqc_opc_update_sw_rules);
2890 act = ICE_SINGLE_ACT_PTR;
2891 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2892 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2894 /* Use the filter rule ID of the previously created rule with single
2895 * act. Once the update happens, hardware will treat this as large
2898 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2899 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2901 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2902 ice_aqc_opc_update_sw_rules, NULL);
2904 m_ent->lg_act_idx = l_id;
2905 m_ent->counter_index = counter_id;
2908 ice_free(hw, lg_act);
2913 * ice_create_vsi_list_map
2914 * @hw: pointer to the hardware structure
2915 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2916 * @num_vsi: number of VSI handles in the array
2917 * @vsi_list_id: VSI list ID generated as part of allocate resource
2919 * Helper function to create a new entry of VSI list ID to VSI mapping
2920 * using the given VSI list ID
2922 static struct ice_vsi_list_map_info *
2923 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2926 struct ice_switch_info *sw = hw->switch_info;
2927 struct ice_vsi_list_map_info *v_map;
2930 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2935 v_map->vsi_list_id = vsi_list_id;
2937 for (i = 0; i < num_vsi; i++)
2938 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2940 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2945 * ice_update_vsi_list_rule
2946 * @hw: pointer to the hardware structure
2947 * @vsi_handle_arr: array of VSI handles to form a VSI list
2948 * @num_vsi: number of VSI handles in the array
2949 * @vsi_list_id: VSI list ID generated as part of allocate resource
2950 * @remove: Boolean value to indicate if this is a remove action
2951 * @opc: switch rules population command type - pass in the command opcode
2952 * @lkup_type: lookup type of the filter
2954 * Call AQ command to add a new switch rule or update existing switch rule
2955 * using the given VSI list ID
2957 static enum ice_status
2958 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2959 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2960 enum ice_sw_lkup_type lkup_type)
2962 struct ice_aqc_sw_rules_elem *s_rule;
2963 enum ice_status status;
2969 return ICE_ERR_PARAM;
2971 if (lkup_type == ICE_SW_LKUP_MAC ||
2972 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2973 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2974 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2975 lkup_type == ICE_SW_LKUP_PROMISC ||
2976 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2977 lkup_type == ICE_SW_LKUP_LAST)
2978 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2979 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2980 else if (lkup_type == ICE_SW_LKUP_VLAN)
2981 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2982 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2984 return ICE_ERR_PARAM;
2986 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2987 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2989 return ICE_ERR_NO_MEMORY;
2990 for (i = 0; i < num_vsi; i++) {
2991 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2992 status = ICE_ERR_PARAM;
2995 /* AQ call requires hw_vsi_id(s) */
2996 s_rule->pdata.vsi_list.vsi[i] =
2997 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3000 s_rule->type = CPU_TO_LE16(rule_type);
3001 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3002 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3004 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3007 ice_free(hw, s_rule);
3012 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3013 * @hw: pointer to the HW struct
3014 * @vsi_handle_arr: array of VSI handles to form a VSI list
3015 * @num_vsi: number of VSI handles in the array
3016 * @vsi_list_id: stores the ID of the VSI list to be created
3017 * @lkup_type: switch rule filter's lookup type
3019 static enum ice_status
3020 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3021 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3023 enum ice_status status;
3025 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3026 ice_aqc_opc_alloc_res);
3030 /* Update the newly created VSI list to include the specified VSIs */
3031 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3032 *vsi_list_id, false,
3033 ice_aqc_opc_add_sw_rules, lkup_type);
3037 * ice_create_pkt_fwd_rule
3038 * @hw: pointer to the hardware structure
3039 * @recp_list: corresponding filter management list
3040 * @f_entry: entry containing packet forwarding information
3042 * Create switch rule with given filter information and add an entry
3043 * to the corresponding filter management list to track this switch rule
3046 static enum ice_status
3047 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3048 struct ice_fltr_list_entry *f_entry)
3050 struct ice_fltr_mgmt_list_entry *fm_entry;
3051 struct ice_aqc_sw_rules_elem *s_rule;
3052 enum ice_status status;
3054 s_rule = (struct ice_aqc_sw_rules_elem *)
3055 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3057 return ICE_ERR_NO_MEMORY;
3058 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3059 ice_malloc(hw, sizeof(*fm_entry));
3061 status = ICE_ERR_NO_MEMORY;
3062 goto ice_create_pkt_fwd_rule_exit;
3065 fm_entry->fltr_info = f_entry->fltr_info;
3067 /* Initialize all the fields for the management entry */
3068 fm_entry->vsi_count = 1;
3069 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3070 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3071 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3073 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3074 ice_aqc_opc_add_sw_rules);
3076 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3077 ice_aqc_opc_add_sw_rules, NULL);
3079 ice_free(hw, fm_entry);
3080 goto ice_create_pkt_fwd_rule_exit;
3083 f_entry->fltr_info.fltr_rule_id =
3084 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3085 fm_entry->fltr_info.fltr_rule_id =
3086 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3088 /* The book keeping entries will get removed when base driver
3089 * calls remove filter AQ command
3091 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3093 ice_create_pkt_fwd_rule_exit:
3094 ice_free(hw, s_rule);
3099 * ice_update_pkt_fwd_rule
3100 * @hw: pointer to the hardware structure
3101 * @f_info: filter information for switch rule
3103 * Call AQ command to update a previously created switch rule with a
3106 static enum ice_status
3107 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3109 struct ice_aqc_sw_rules_elem *s_rule;
3110 enum ice_status status;
3112 s_rule = (struct ice_aqc_sw_rules_elem *)
3113 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3115 return ICE_ERR_NO_MEMORY;
3117 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3119 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3121 /* Update switch rule with new rule set to forward VSI list */
3122 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3123 ice_aqc_opc_update_sw_rules, NULL);
3125 ice_free(hw, s_rule);
3130 * ice_update_sw_rule_bridge_mode
3131 * @hw: pointer to the HW struct
3133 * Updates unicast switch filter rules based on VEB/VEPA mode
3135 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3137 struct ice_switch_info *sw = hw->switch_info;
3138 struct ice_fltr_mgmt_list_entry *fm_entry;
3139 enum ice_status status = ICE_SUCCESS;
3140 struct LIST_HEAD_TYPE *rule_head;
3141 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3143 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3144 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3146 ice_acquire_lock(rule_lock);
3147 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3149 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3150 u8 *addr = fi->l_data.mac.mac_addr;
3152 /* Update unicast Tx rules to reflect the selected
3155 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3156 (fi->fltr_act == ICE_FWD_TO_VSI ||
3157 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3158 fi->fltr_act == ICE_FWD_TO_Q ||
3159 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3160 status = ice_update_pkt_fwd_rule(hw, fi);
3166 ice_release_lock(rule_lock);
3172 * ice_add_update_vsi_list
3173 * @hw: pointer to the hardware structure
3174 * @m_entry: pointer to current filter management list entry
3175 * @cur_fltr: filter information from the book keeping entry
3176 * @new_fltr: filter information with the new VSI to be added
3178 * Call AQ command to add or update previously created VSI list with new VSI.
3180 * Helper function to do book keeping associated with adding filter information
3181 * The algorithm to do the book keeping is described below :
3182 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3183 * if only one VSI has been added till now
3184 * Allocate a new VSI list and add two VSIs
3185 * to this list using switch rule command
3186 * Update the previously created switch rule with the
3187 * newly created VSI list ID
3188 * if a VSI list was previously created
3189 * Add the new VSI to the previously created VSI list set
3190 * using the update switch rule command
3192 static enum ice_status
3193 ice_add_update_vsi_list(struct ice_hw *hw,
3194 struct ice_fltr_mgmt_list_entry *m_entry,
3195 struct ice_fltr_info *cur_fltr,
3196 struct ice_fltr_info *new_fltr)
3198 enum ice_status status = ICE_SUCCESS;
3199 u16 vsi_list_id = 0;
3201 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3202 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3203 return ICE_ERR_NOT_IMPL;
3205 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3206 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3207 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3208 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3209 return ICE_ERR_NOT_IMPL;
3211 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3212 /* Only one entry existed in the mapping and it was not already
3213 * a part of a VSI list. So, create a VSI list with the old and
3216 struct ice_fltr_info tmp_fltr;
3217 u16 vsi_handle_arr[2];
3219 /* A rule already exists with the new VSI being added */
3220 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3221 return ICE_ERR_ALREADY_EXISTS;
3223 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3224 vsi_handle_arr[1] = new_fltr->vsi_handle;
3225 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3227 new_fltr->lkup_type);
3231 tmp_fltr = *new_fltr;
3232 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3233 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3234 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3235 /* Update the previous switch rule of "MAC forward to VSI" to
3236 * "MAC fwd to VSI list"
3238 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3242 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3243 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3244 m_entry->vsi_list_info =
3245 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3248 /* If this entry was large action then the large action needs
3249 * to be updated to point to FWD to VSI list
3251 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3253 ice_add_marker_act(hw, m_entry,
3254 m_entry->sw_marker_id,
3255 m_entry->lg_act_idx);
3257 u16 vsi_handle = new_fltr->vsi_handle;
3258 enum ice_adminq_opc opcode;
3260 if (!m_entry->vsi_list_info)
3263 /* A rule already exists with the new VSI being added */
3264 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3267 /* Update the previously created VSI list set with
3268 * the new VSI ID passed in
3270 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3271 opcode = ice_aqc_opc_update_sw_rules;
3273 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3274 vsi_list_id, false, opcode,
3275 new_fltr->lkup_type);
3276 /* update VSI list mapping info with new VSI ID */
3278 ice_set_bit(vsi_handle,
3279 m_entry->vsi_list_info->vsi_map);
3282 m_entry->vsi_count++;
3287 * ice_find_rule_entry - Search a rule entry
3288 * @list_head: head of rule list
3289 * @f_info: rule information
3291 * Helper function to search for a given rule entry
3292 * Returns pointer to entry storing the rule if found
3294 static struct ice_fltr_mgmt_list_entry *
3295 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3296 struct ice_fltr_info *f_info)
3298 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3300 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3302 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3303 sizeof(f_info->l_data)) &&
3304 f_info->flag == list_itr->fltr_info.flag) {
3313 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3314 * @recp_list: VSI lists needs to be searched
3315 * @vsi_handle: VSI handle to be found in VSI list
3316 * @vsi_list_id: VSI list ID found containing vsi_handle
3318 * Helper function to search a VSI list with single entry containing given VSI
3319 * handle element. This can be extended further to search VSI list with more
3320 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3322 static struct ice_vsi_list_map_info *
3323 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3326 struct ice_vsi_list_map_info *map_info = NULL;
3327 struct LIST_HEAD_TYPE *list_head;
3329 list_head = &recp_list->filt_rules;
3330 if (recp_list->adv_rule) {
3331 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3333 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3334 ice_adv_fltr_mgmt_list_entry,
3336 if (list_itr->vsi_list_info) {
3337 map_info = list_itr->vsi_list_info;
3338 if (ice_is_bit_set(map_info->vsi_map,
3340 *vsi_list_id = map_info->vsi_list_id;
3346 struct ice_fltr_mgmt_list_entry *list_itr;
3348 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3349 ice_fltr_mgmt_list_entry,
3351 if (list_itr->vsi_count == 1 &&
3352 list_itr->vsi_list_info) {
3353 map_info = list_itr->vsi_list_info;
3354 if (ice_is_bit_set(map_info->vsi_map,
3356 *vsi_list_id = map_info->vsi_list_id;
3366 * ice_add_rule_internal - add rule for a given lookup type
3367 * @hw: pointer to the hardware structure
3368 * @recp_list: recipe list for which rule has to be added
3369 * @lport: logic port number on which function add rule
3370 * @f_entry: structure containing MAC forwarding information
3372 * Adds or updates the rule lists for a given recipe
3374 static enum ice_status
3375 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3376 u8 lport, struct ice_fltr_list_entry *f_entry)
3378 struct ice_fltr_info *new_fltr, *cur_fltr;
3379 struct ice_fltr_mgmt_list_entry *m_entry;
3380 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3381 enum ice_status status = ICE_SUCCESS;
3383 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3384 return ICE_ERR_PARAM;
3386 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3387 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3388 f_entry->fltr_info.fwd_id.hw_vsi_id =
3389 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3391 rule_lock = &recp_list->filt_rule_lock;
3393 ice_acquire_lock(rule_lock);
3394 new_fltr = &f_entry->fltr_info;
3395 if (new_fltr->flag & ICE_FLTR_RX)
3396 new_fltr->src = lport;
3397 else if (new_fltr->flag & ICE_FLTR_TX)
3399 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3401 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3403 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3404 goto exit_add_rule_internal;
3407 cur_fltr = &m_entry->fltr_info;
3408 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3410 exit_add_rule_internal:
3411 ice_release_lock(rule_lock);
3416 * ice_remove_vsi_list_rule
3417 * @hw: pointer to the hardware structure
3418 * @vsi_list_id: VSI list ID generated as part of allocate resource
3419 * @lkup_type: switch rule filter lookup type
3421 * The VSI list should be emptied before this function is called to remove the
3424 static enum ice_status
3425 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3426 enum ice_sw_lkup_type lkup_type)
3428 /* Free the vsi_list resource that we allocated. It is assumed that the
3429 * list is empty at this point.
3431 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3432 ice_aqc_opc_free_res);
3436 * ice_rem_update_vsi_list
3437 * @hw: pointer to the hardware structure
3438 * @vsi_handle: VSI handle of the VSI to remove
3439 * @fm_list: filter management entry for which the VSI list management needs to
3442 static enum ice_status
3443 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3444 struct ice_fltr_mgmt_list_entry *fm_list)
3446 enum ice_sw_lkup_type lkup_type;
3447 enum ice_status status = ICE_SUCCESS;
3450 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3451 fm_list->vsi_count == 0)
3452 return ICE_ERR_PARAM;
3454 /* A rule with the VSI being removed does not exist */
3455 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3456 return ICE_ERR_DOES_NOT_EXIST;
3458 lkup_type = fm_list->fltr_info.lkup_type;
3459 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3460 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3461 ice_aqc_opc_update_sw_rules,
3466 fm_list->vsi_count--;
3467 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3469 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3470 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3471 struct ice_vsi_list_map_info *vsi_list_info =
3472 fm_list->vsi_list_info;
3475 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3477 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3478 return ICE_ERR_OUT_OF_RANGE;
3480 /* Make sure VSI list is empty before removing it below */
3481 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3483 ice_aqc_opc_update_sw_rules,
3488 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3489 tmp_fltr_info.fwd_id.hw_vsi_id =
3490 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3491 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3492 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3494 ice_debug(hw, ICE_DBG_SW,
3495 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3496 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3500 fm_list->fltr_info = tmp_fltr_info;
3503 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3504 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3505 struct ice_vsi_list_map_info *vsi_list_info =
3506 fm_list->vsi_list_info;
3508 /* Remove the VSI list since it is no longer used */
3509 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3511 ice_debug(hw, ICE_DBG_SW,
3512 "Failed to remove VSI list %d, error %d\n",
3513 vsi_list_id, status);
3517 LIST_DEL(&vsi_list_info->list_entry);
3518 ice_free(hw, vsi_list_info);
3519 fm_list->vsi_list_info = NULL;
3526 * ice_remove_rule_internal - Remove a filter rule of a given type
3528 * @hw: pointer to the hardware structure
3529 * @recp_list: recipe list for which the rule needs to removed
3530 * @f_entry: rule entry containing filter information
3532 static enum ice_status
3533 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3534 struct ice_fltr_list_entry *f_entry)
3536 struct ice_fltr_mgmt_list_entry *list_elem;
3537 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3538 enum ice_status status = ICE_SUCCESS;
3539 bool remove_rule = false;
3542 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3543 return ICE_ERR_PARAM;
3544 f_entry->fltr_info.fwd_id.hw_vsi_id =
3545 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3547 rule_lock = &recp_list->filt_rule_lock;
3548 ice_acquire_lock(rule_lock);
3549 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3550 &f_entry->fltr_info);
3552 status = ICE_ERR_DOES_NOT_EXIST;
3556 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3558 } else if (!list_elem->vsi_list_info) {
3559 status = ICE_ERR_DOES_NOT_EXIST;
3561 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3562 /* a ref_cnt > 1 indicates that the vsi_list is being
3563 * shared by multiple rules. Decrement the ref_cnt and
3564 * remove this rule, but do not modify the list, as it
3565 * is in-use by other rules.
3567 list_elem->vsi_list_info->ref_cnt--;
3570 /* a ref_cnt of 1 indicates the vsi_list is only used
3571 * by one rule. However, the original removal request is only
3572 * for a single VSI. Update the vsi_list first, and only
3573 * remove the rule if there are no further VSIs in this list.
3575 vsi_handle = f_entry->fltr_info.vsi_handle;
3576 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3579 /* if VSI count goes to zero after updating the VSI list */
3580 if (list_elem->vsi_count == 0)
3585 /* Remove the lookup rule */
3586 struct ice_aqc_sw_rules_elem *s_rule;
3588 s_rule = (struct ice_aqc_sw_rules_elem *)
3589 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3591 status = ICE_ERR_NO_MEMORY;
3595 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3596 ice_aqc_opc_remove_sw_rules);
3598 status = ice_aq_sw_rules(hw, s_rule,
3599 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3600 ice_aqc_opc_remove_sw_rules, NULL);
3602 /* Remove a book keeping from the list */
3603 ice_free(hw, s_rule);
3608 LIST_DEL(&list_elem->list_entry);
3609 ice_free(hw, list_elem);
3612 ice_release_lock(rule_lock);
3617 * ice_aq_get_res_alloc - get allocated resources
3618 * @hw: pointer to the HW struct
3619 * @num_entries: pointer to u16 to store the number of resource entries returned
3620 * @buf: pointer to user-supplied buffer
3621 * @buf_size: size of buff
3622 * @cd: pointer to command details structure or NULL
3624 * The user-supplied buffer must be large enough to store the resource
3625 * information for all resource types. Each resource type is an
3626 * ice_aqc_get_res_resp_data_elem structure.
3629 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3630 u16 buf_size, struct ice_sq_cd *cd)
3632 struct ice_aqc_get_res_alloc *resp;
3633 enum ice_status status;
3634 struct ice_aq_desc desc;
3637 return ICE_ERR_BAD_PTR;
3639 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3640 return ICE_ERR_INVAL_SIZE;
3642 resp = &desc.params.get_res;
3644 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3645 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3647 if (!status && num_entries)
3648 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3654 * ice_aq_get_res_descs - get allocated resource descriptors
3655 * @hw: pointer to the hardware structure
3656 * @num_entries: number of resource entries in buffer
3657 * @buf: Indirect buffer to hold data parameters and response
3658 * @buf_size: size of buffer for indirect commands
3659 * @res_type: resource type
3660 * @res_shared: is resource shared
3661 * @desc_id: input - first desc ID to start; output - next desc ID
3662 * @cd: pointer to command details structure or NULL
3665 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3666 struct ice_aqc_get_allocd_res_desc_resp *buf,
3667 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3668 struct ice_sq_cd *cd)
3670 struct ice_aqc_get_allocd_res_desc *cmd;
3671 struct ice_aq_desc desc;
3672 enum ice_status status;
3674 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3676 cmd = &desc.params.get_res_desc;
3679 return ICE_ERR_PARAM;
3681 if (buf_size != (num_entries * sizeof(*buf)))
3682 return ICE_ERR_PARAM;
3684 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3686 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3687 ICE_AQC_RES_TYPE_M) | (res_shared ?
3688 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3689 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3691 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3693 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3699 * ice_add_mac_rule - Add a MAC address based filter rule
3700 * @hw: pointer to the hardware structure
3701 * @m_list: list of MAC addresses and forwarding information
3702 * @sw: pointer to switch info struct for which function add rule
3703 * @lport: logic port number on which function add rule
3705 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3706 * multiple unicast addresses, the function assumes that all the
3707 * addresses are unique in a given add_mac call. It doesn't
3708 * check for duplicates in this case, removing duplicates from a given
3709 * list should be taken care of in the caller of this function.
3711 static enum ice_status
3712 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3713 struct ice_switch_info *sw, u8 lport)
3715 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3716 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3717 struct ice_fltr_list_entry *m_list_itr;
3718 struct LIST_HEAD_TYPE *rule_head;
3719 u16 total_elem_left, s_rule_size;
3720 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3721 enum ice_status status = ICE_SUCCESS;
3722 u16 num_unicast = 0;
3726 rule_lock = &recp_list->filt_rule_lock;
3727 rule_head = &recp_list->filt_rules;
3729 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3731 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3735 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3736 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3737 if (!ice_is_vsi_valid(hw, vsi_handle))
3738 return ICE_ERR_PARAM;
3739 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3740 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3741 /* update the src in case it is VSI num */
3742 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3743 return ICE_ERR_PARAM;
3744 m_list_itr->fltr_info.src = hw_vsi_id;
3745 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3746 IS_ZERO_ETHER_ADDR(add))
3747 return ICE_ERR_PARAM;
3748 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3749 /* Don't overwrite the unicast address */
3750 ice_acquire_lock(rule_lock);
3751 if (ice_find_rule_entry(rule_head,
3752 &m_list_itr->fltr_info)) {
3753 ice_release_lock(rule_lock);
3754 return ICE_ERR_ALREADY_EXISTS;
3756 ice_release_lock(rule_lock);
3758 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3759 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3760 m_list_itr->status =
3761 ice_add_rule_internal(hw, recp_list, lport,
3763 if (m_list_itr->status)
3764 return m_list_itr->status;
3768 ice_acquire_lock(rule_lock);
3769 /* Exit if no suitable entries were found for adding bulk switch rule */
3771 status = ICE_SUCCESS;
3772 goto ice_add_mac_exit;
3775 /* Allocate switch rule buffer for the bulk update for unicast */
3776 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3777 s_rule = (struct ice_aqc_sw_rules_elem *)
3778 ice_calloc(hw, num_unicast, s_rule_size);
3780 status = ICE_ERR_NO_MEMORY;
3781 goto ice_add_mac_exit;
3785 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3787 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3788 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3790 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3791 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3792 ice_aqc_opc_add_sw_rules);
3793 r_iter = (struct ice_aqc_sw_rules_elem *)
3794 ((u8 *)r_iter + s_rule_size);
3798 /* Call AQ bulk switch rule update for all unicast addresses */
3800 /* Call AQ switch rule in AQ_MAX chunk */
3801 for (total_elem_left = num_unicast; total_elem_left > 0;
3802 total_elem_left -= elem_sent) {
3803 struct ice_aqc_sw_rules_elem *entry = r_iter;
3805 elem_sent = MIN_T(u8, total_elem_left,
3806 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3807 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3808 elem_sent, ice_aqc_opc_add_sw_rules,
3811 goto ice_add_mac_exit;
3812 r_iter = (struct ice_aqc_sw_rules_elem *)
3813 ((u8 *)r_iter + (elem_sent * s_rule_size));
3816 /* Fill up rule ID based on the value returned from FW */
3818 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3820 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3821 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3822 struct ice_fltr_mgmt_list_entry *fm_entry;
3824 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3825 f_info->fltr_rule_id =
3826 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3827 f_info->fltr_act = ICE_FWD_TO_VSI;
3828 /* Create an entry to track this MAC address */
3829 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3830 ice_malloc(hw, sizeof(*fm_entry));
3832 status = ICE_ERR_NO_MEMORY;
3833 goto ice_add_mac_exit;
3835 fm_entry->fltr_info = *f_info;
3836 fm_entry->vsi_count = 1;
3837 /* The book keeping entries will get removed when
3838 * base driver calls remove filter AQ command
3841 LIST_ADD(&fm_entry->list_entry, rule_head);
3842 r_iter = (struct ice_aqc_sw_rules_elem *)
3843 ((u8 *)r_iter + s_rule_size);
3848 ice_release_lock(rule_lock);
3850 ice_free(hw, s_rule);
3855 * ice_add_mac - Add a MAC address based filter rule
3856 * @hw: pointer to the hardware structure
3857 * @m_list: list of MAC addresses and forwarding information
3859 * Function add MAC rule for logical port from HW struct
3861 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3864 return ICE_ERR_PARAM;
3866 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3867 hw->port_info->lport);
3871 * ice_add_vlan_internal - Add one VLAN based filter rule
3872 * @hw: pointer to the hardware structure
3873 * @recp_list: recipe list for which rule has to be added
3874 * @f_entry: filter entry containing one VLAN information
3876 static enum ice_status
3877 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3878 struct ice_fltr_list_entry *f_entry)
3880 struct ice_fltr_mgmt_list_entry *v_list_itr;
3881 struct ice_fltr_info *new_fltr, *cur_fltr;
3882 enum ice_sw_lkup_type lkup_type;
3883 u16 vsi_list_id = 0, vsi_handle;
3884 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3885 enum ice_status status = ICE_SUCCESS;
3887 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3888 return ICE_ERR_PARAM;
3890 f_entry->fltr_info.fwd_id.hw_vsi_id =
3891 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3892 new_fltr = &f_entry->fltr_info;
3894 /* VLAN ID should only be 12 bits */
3895 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3896 return ICE_ERR_PARAM;
3898 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3899 return ICE_ERR_PARAM;
3901 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3902 lkup_type = new_fltr->lkup_type;
3903 vsi_handle = new_fltr->vsi_handle;
3904 rule_lock = &recp_list->filt_rule_lock;
3905 ice_acquire_lock(rule_lock);
3906 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3908 struct ice_vsi_list_map_info *map_info = NULL;
3910 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3911 /* All VLAN pruning rules use a VSI list. Check if
3912 * there is already a VSI list containing VSI that we
3913 * want to add. If found, use the same vsi_list_id for
3914 * this new VLAN rule or else create a new list.
3916 map_info = ice_find_vsi_list_entry(recp_list,
3920 status = ice_create_vsi_list_rule(hw,
3928 /* Convert the action to forwarding to a VSI list. */
3929 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3930 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3933 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3935 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3938 status = ICE_ERR_DOES_NOT_EXIST;
3941 /* reuse VSI list for new rule and increment ref_cnt */
3943 v_list_itr->vsi_list_info = map_info;
3944 map_info->ref_cnt++;
3946 v_list_itr->vsi_list_info =
3947 ice_create_vsi_list_map(hw, &vsi_handle,
3951 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3952 /* Update existing VSI list to add new VSI ID only if it used
3955 cur_fltr = &v_list_itr->fltr_info;
3956 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3959 /* If VLAN rule exists and VSI list being used by this rule is
3960 * referenced by more than 1 VLAN rule. Then create a new VSI
3961 * list appending previous VSI with new VSI and update existing
3962 * VLAN rule to point to new VSI list ID
3964 struct ice_fltr_info tmp_fltr;
3965 u16 vsi_handle_arr[2];
3968 /* Current implementation only supports reusing VSI list with
3969 * one VSI count. We should never hit below condition
3971 if (v_list_itr->vsi_count > 1 &&
3972 v_list_itr->vsi_list_info->ref_cnt > 1) {
3973 ice_debug(hw, ICE_DBG_SW,
3974 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3975 status = ICE_ERR_CFG;
3980 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3983 /* A rule already exists with the new VSI being added */
3984 if (cur_handle == vsi_handle) {
3985 status = ICE_ERR_ALREADY_EXISTS;
3989 vsi_handle_arr[0] = cur_handle;
3990 vsi_handle_arr[1] = vsi_handle;
3991 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3992 &vsi_list_id, lkup_type);
3996 tmp_fltr = v_list_itr->fltr_info;
3997 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3998 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3999 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4000 /* Update the previous switch rule to a new VSI list which
4001 * includes current VSI that is requested
4003 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4007 /* before overriding VSI list map info. decrement ref_cnt of
4010 v_list_itr->vsi_list_info->ref_cnt--;
4012 /* now update to newly created list */
4013 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4014 v_list_itr->vsi_list_info =
4015 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4017 v_list_itr->vsi_count++;
4021 ice_release_lock(rule_lock);
4026 * ice_add_vlan_rule - Add VLAN based filter rule
4027 * @hw: pointer to the hardware structure
4028 * @v_list: list of VLAN entries and forwarding information
4029 * @sw: pointer to switch info struct for which function add rule
4031 static enum ice_status
4032 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4033 struct ice_switch_info *sw)
4035 struct ice_fltr_list_entry *v_list_itr;
4036 struct ice_sw_recipe *recp_list;
4038 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4039 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4041 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4042 return ICE_ERR_PARAM;
4043 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4044 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4046 if (v_list_itr->status)
4047 return v_list_itr->status;
4053 * ice_add_vlan - Add a VLAN based filter rule
4054 * @hw: pointer to the hardware structure
4055 * @v_list: list of VLAN and forwarding information
4057 * Function add VLAN rule for logical port from HW struct
4059 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4062 return ICE_ERR_PARAM;
4064 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4068 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4069 * @hw: pointer to the hardware structure
4070 * @mv_list: list of MAC and VLAN filters
4071 * @sw: pointer to switch info struct for which function add rule
4072 * @lport: logic port number on which function add rule
4074 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4075 * pruning bits enabled, then it is the responsibility of the caller to make
4076 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4077 * VLAN won't be received on that VSI otherwise.
4079 static enum ice_status
4080 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4081 struct ice_switch_info *sw, u8 lport)
4083 struct ice_fltr_list_entry *mv_list_itr;
4084 struct ice_sw_recipe *recp_list;
4086 if (!mv_list || !hw)
4087 return ICE_ERR_PARAM;
4089 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4090 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4092 enum ice_sw_lkup_type l_type =
4093 mv_list_itr->fltr_info.lkup_type;
4095 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4096 return ICE_ERR_PARAM;
4097 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4098 mv_list_itr->status =
4099 ice_add_rule_internal(hw, recp_list, lport,
4101 if (mv_list_itr->status)
4102 return mv_list_itr->status;
4108 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4109 * @hw: pointer to the hardware structure
4110 * @mv_list: list of MAC VLAN addresses and forwarding information
4112 * Function add MAC VLAN rule for logical port from HW struct
4115 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4117 if (!mv_list || !hw)
4118 return ICE_ERR_PARAM;
4120 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4121 hw->port_info->lport);
4125 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4126 * @hw: pointer to the hardware structure
4127 * @em_list: list of ether type MAC filter, MAC is optional
4128 * @sw: pointer to switch info struct for which function add rule
4129 * @lport: logic port number on which function add rule
4131 * This function requires the caller to populate the entries in
4132 * the filter list with the necessary fields (including flags to
4133 * indicate Tx or Rx rules).
4135 static enum ice_status
4136 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4137 struct ice_switch_info *sw, u8 lport)
4139 struct ice_fltr_list_entry *em_list_itr;
4141 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4143 struct ice_sw_recipe *recp_list;
4144 enum ice_sw_lkup_type l_type;
4146 l_type = em_list_itr->fltr_info.lkup_type;
4147 recp_list = &sw->recp_list[l_type];
4149 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4150 l_type != ICE_SW_LKUP_ETHERTYPE)
4151 return ICE_ERR_PARAM;
4153 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4156 if (em_list_itr->status)
4157 return em_list_itr->status;
4163 * ice_add_eth_mac - Add a ethertype based filter rule
4164 * @hw: pointer to the hardware structure
4165 * @em_list: list of ethertype and forwarding information
4167 * Function add ethertype rule for logical port from HW struct
4170 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4172 if (!em_list || !hw)
4173 return ICE_ERR_PARAM;
4175 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4176 hw->port_info->lport);
4180 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4181 * @hw: pointer to the hardware structure
4182 * @em_list: list of ethertype or ethertype MAC entries
4183 * @sw: pointer to switch info struct for which function add rule
4185 static enum ice_status
4186 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4187 struct ice_switch_info *sw)
4189 struct ice_fltr_list_entry *em_list_itr, *tmp;
4191 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4193 struct ice_sw_recipe *recp_list;
4194 enum ice_sw_lkup_type l_type;
4196 l_type = em_list_itr->fltr_info.lkup_type;
4198 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4199 l_type != ICE_SW_LKUP_ETHERTYPE)
4200 return ICE_ERR_PARAM;
4202 recp_list = &sw->recp_list[l_type];
4203 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4205 if (em_list_itr->status)
4206 return em_list_itr->status;
4212 * ice_remove_eth_mac - remove a ethertype based filter rule
4213 * @hw: pointer to the hardware structure
4214 * @em_list: list of ethertype and forwarding information
4218 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4220 if (!em_list || !hw)
4221 return ICE_ERR_PARAM;
4223 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4227 * ice_rem_sw_rule_info
4228 * @hw: pointer to the hardware structure
4229 * @rule_head: pointer to the switch list structure that we want to delete
4232 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4234 if (!LIST_EMPTY(rule_head)) {
4235 struct ice_fltr_mgmt_list_entry *entry;
4236 struct ice_fltr_mgmt_list_entry *tmp;
4238 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4239 ice_fltr_mgmt_list_entry, list_entry) {
4240 LIST_DEL(&entry->list_entry);
4241 ice_free(hw, entry);
4247 * ice_rem_adv_rule_info
4248 * @hw: pointer to the hardware structure
4249 * @rule_head: pointer to the switch list structure that we want to delete
4252 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4254 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4255 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4257 if (LIST_EMPTY(rule_head))
4260 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4261 ice_adv_fltr_mgmt_list_entry, list_entry) {
4262 LIST_DEL(&lst_itr->list_entry);
4263 ice_free(hw, lst_itr->lkups);
4264 ice_free(hw, lst_itr);
4269 * ice_rem_all_sw_rules_info
4270 * @hw: pointer to the hardware structure
4272 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4274 struct ice_switch_info *sw = hw->switch_info;
4277 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4278 struct LIST_HEAD_TYPE *rule_head;
4280 rule_head = &sw->recp_list[i].filt_rules;
4281 if (!sw->recp_list[i].adv_rule)
4282 ice_rem_sw_rule_info(hw, rule_head);
4284 ice_rem_adv_rule_info(hw, rule_head);
4285 if (sw->recp_list[i].adv_rule &&
4286 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4287 sw->recp_list[i].adv_rule = false;
4292 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4293 * @pi: pointer to the port_info structure
4294 * @vsi_handle: VSI handle to set as default
4295 * @set: true to add the above mentioned switch rule, false to remove it
4296 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4298 * add filter rule to set/unset given VSI as default VSI for the switch
4299 * (represented by swid)
4302 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4305 struct ice_aqc_sw_rules_elem *s_rule;
4306 struct ice_fltr_info f_info;
4307 struct ice_hw *hw = pi->hw;
4308 enum ice_adminq_opc opcode;
4309 enum ice_status status;
4313 if (!ice_is_vsi_valid(hw, vsi_handle))
4314 return ICE_ERR_PARAM;
4315 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4317 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4318 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4319 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4321 return ICE_ERR_NO_MEMORY;
4323 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4325 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4326 f_info.flag = direction;
4327 f_info.fltr_act = ICE_FWD_TO_VSI;
4328 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4330 if (f_info.flag & ICE_FLTR_RX) {
4331 f_info.src = pi->lport;
4332 f_info.src_id = ICE_SRC_ID_LPORT;
4334 f_info.fltr_rule_id =
4335 pi->dflt_rx_vsi_rule_id;
4336 } else if (f_info.flag & ICE_FLTR_TX) {
4337 f_info.src_id = ICE_SRC_ID_VSI;
4338 f_info.src = hw_vsi_id;
4340 f_info.fltr_rule_id =
4341 pi->dflt_tx_vsi_rule_id;
4345 opcode = ice_aqc_opc_add_sw_rules;
4347 opcode = ice_aqc_opc_remove_sw_rules;
4349 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4351 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4352 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4355 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4357 if (f_info.flag & ICE_FLTR_TX) {
4358 pi->dflt_tx_vsi_num = hw_vsi_id;
4359 pi->dflt_tx_vsi_rule_id = index;
4360 } else if (f_info.flag & ICE_FLTR_RX) {
4361 pi->dflt_rx_vsi_num = hw_vsi_id;
4362 pi->dflt_rx_vsi_rule_id = index;
4365 if (f_info.flag & ICE_FLTR_TX) {
4366 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4367 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4368 } else if (f_info.flag & ICE_FLTR_RX) {
4369 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4370 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4375 ice_free(hw, s_rule);
4380 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4381 * @list_head: head of rule list
4382 * @f_info: rule information
4384 * Helper function to search for a unicast rule entry - this is to be used
4385 * to remove unicast MAC filter that is not shared with other VSIs on the
4388 * Returns pointer to entry storing the rule if found
4390 static struct ice_fltr_mgmt_list_entry *
4391 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4392 struct ice_fltr_info *f_info)
4394 struct ice_fltr_mgmt_list_entry *list_itr;
4396 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4398 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4399 sizeof(f_info->l_data)) &&
4400 f_info->fwd_id.hw_vsi_id ==
4401 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4402 f_info->flag == list_itr->fltr_info.flag)
4409 * ice_remove_mac_rule - remove a MAC based filter rule
4410 * @hw: pointer to the hardware structure
4411 * @m_list: list of MAC addresses and forwarding information
4412 * @recp_list: list from which function remove MAC address
4414 * This function removes either a MAC filter rule or a specific VSI from a
4415 * VSI list for a multicast MAC address.
4417 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4418 * ice_add_mac. Caller should be aware that this call will only work if all
4419 * the entries passed into m_list were added previously. It will not attempt to
4420 * do a partial remove of entries that were found.
4422 static enum ice_status
4423 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4424 struct ice_sw_recipe *recp_list)
4426 struct ice_fltr_list_entry *list_itr, *tmp;
4427 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4430 return ICE_ERR_PARAM;
4432 rule_lock = &recp_list->filt_rule_lock;
4433 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4435 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4436 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4439 if (l_type != ICE_SW_LKUP_MAC)
4440 return ICE_ERR_PARAM;
4442 vsi_handle = list_itr->fltr_info.vsi_handle;
4443 if (!ice_is_vsi_valid(hw, vsi_handle))
4444 return ICE_ERR_PARAM;
4446 list_itr->fltr_info.fwd_id.hw_vsi_id =
4447 ice_get_hw_vsi_num(hw, vsi_handle);
4448 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4449 /* Don't remove the unicast address that belongs to
4450 * another VSI on the switch, since it is not being
4453 ice_acquire_lock(rule_lock);
4454 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4455 &list_itr->fltr_info)) {
4456 ice_release_lock(rule_lock);
4457 return ICE_ERR_DOES_NOT_EXIST;
4459 ice_release_lock(rule_lock);
4461 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4463 if (list_itr->status)
4464 return list_itr->status;
4470 * ice_remove_mac - remove a MAC address based filter rule
4471 * @hw: pointer to the hardware structure
4472 * @m_list: list of MAC addresses and forwarding information
4475 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4477 struct ice_sw_recipe *recp_list;
4479 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4480 return ice_remove_mac_rule(hw, m_list, recp_list);
4484 * ice_remove_vlan_rule - Remove VLAN based filter rule
4485 * @hw: pointer to the hardware structure
4486 * @v_list: list of VLAN entries and forwarding information
4487 * @recp_list: list from which function remove VLAN
4489 static enum ice_status
4490 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4491 struct ice_sw_recipe *recp_list)
4493 struct ice_fltr_list_entry *v_list_itr, *tmp;
4495 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4497 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4499 if (l_type != ICE_SW_LKUP_VLAN)
4500 return ICE_ERR_PARAM;
4501 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4503 if (v_list_itr->status)
4504 return v_list_itr->status;
4510 * ice_remove_vlan - remove a VLAN address based filter rule
4511 * @hw: pointer to the hardware structure
4512 * @v_list: list of VLAN and forwarding information
4516 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4518 struct ice_sw_recipe *recp_list;
4521 return ICE_ERR_PARAM;
4523 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4524 return ice_remove_vlan_rule(hw, v_list, recp_list);
4528 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4529 * @hw: pointer to the hardware structure
4530 * @v_list: list of MAC VLAN entries and forwarding information
4531 * @recp_list: list from which function remove MAC VLAN
4533 static enum ice_status
4534 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4535 struct ice_sw_recipe *recp_list)
4537 struct ice_fltr_list_entry *v_list_itr, *tmp;
4539 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4540 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4542 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4544 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4545 return ICE_ERR_PARAM;
4546 v_list_itr->status =
4547 ice_remove_rule_internal(hw, recp_list,
4549 if (v_list_itr->status)
4550 return v_list_itr->status;
4556 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4557 * @hw: pointer to the hardware structure
4558 * @mv_list: list of MAC VLAN and forwarding information
4561 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4563 struct ice_sw_recipe *recp_list;
4565 if (!mv_list || !hw)
4566 return ICE_ERR_PARAM;
4568 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4569 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4573 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4574 * @fm_entry: filter entry to inspect
4575 * @vsi_handle: VSI handle to compare with filter info
4578 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4580 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4581 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4582 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4583 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4588 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4589 * @hw: pointer to the hardware structure
4590 * @vsi_handle: VSI handle to remove filters from
4591 * @vsi_list_head: pointer to the list to add entry to
4592 * @fi: pointer to fltr_info of filter entry to copy & add
4594 * Helper function, used when creating a list of filters to remove from
4595 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4596 * original filter entry, with the exception of fltr_info.fltr_act and
4597 * fltr_info.fwd_id fields. These are set such that later logic can
4598 * extract which VSI to remove the fltr from, and pass on that information.
4600 static enum ice_status
4601 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4602 struct LIST_HEAD_TYPE *vsi_list_head,
4603 struct ice_fltr_info *fi)
4605 struct ice_fltr_list_entry *tmp;
4607 /* this memory is freed up in the caller function
4608 * once filters for this VSI are removed
4610 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4612 return ICE_ERR_NO_MEMORY;
4614 tmp->fltr_info = *fi;
4616 /* Overwrite these fields to indicate which VSI to remove filter from,
4617 * so find and remove logic can extract the information from the
4618 * list entries. Note that original entries will still have proper
4621 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4622 tmp->fltr_info.vsi_handle = vsi_handle;
4623 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4625 LIST_ADD(&tmp->list_entry, vsi_list_head);
4631 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4632 * @hw: pointer to the hardware structure
4633 * @vsi_handle: VSI handle to remove filters from
4634 * @lkup_list_head: pointer to the list that has certain lookup type filters
4635 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4637 * Locates all filters in lkup_list_head that are used by the given VSI,
4638 * and adds COPIES of those entries to vsi_list_head (intended to be used
4639 * to remove the listed filters).
4640 * Note that this means all entries in vsi_list_head must be explicitly
4641 * deallocated by the caller when done with list.
4643 static enum ice_status
4644 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4645 struct LIST_HEAD_TYPE *lkup_list_head,
4646 struct LIST_HEAD_TYPE *vsi_list_head)
4648 struct ice_fltr_mgmt_list_entry *fm_entry;
4649 enum ice_status status = ICE_SUCCESS;
4651 /* check to make sure VSI ID is valid and within boundary */
4652 if (!ice_is_vsi_valid(hw, vsi_handle))
4653 return ICE_ERR_PARAM;
4655 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4656 ice_fltr_mgmt_list_entry, list_entry) {
4657 struct ice_fltr_info *fi;
4659 fi = &fm_entry->fltr_info;
4660 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4663 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4672 * ice_determine_promisc_mask
4673 * @fi: filter info to parse
4675 * Helper function to determine which ICE_PROMISC_ mask corresponds
4676 * to given filter into.
4678 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4680 u16 vid = fi->l_data.mac_vlan.vlan_id;
4681 u8 *macaddr = fi->l_data.mac.mac_addr;
4682 bool is_tx_fltr = false;
4683 u8 promisc_mask = 0;
4685 if (fi->flag == ICE_FLTR_TX)
4688 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4689 promisc_mask |= is_tx_fltr ?
4690 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4691 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4692 promisc_mask |= is_tx_fltr ?
4693 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4694 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4695 promisc_mask |= is_tx_fltr ?
4696 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4698 promisc_mask |= is_tx_fltr ?
4699 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4701 return promisc_mask;
4705 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4706 * @hw: pointer to the hardware structure
4707 * @vsi_handle: VSI handle to retrieve info from
4708 * @promisc_mask: pointer to mask to be filled in
4709 * @vid: VLAN ID of promisc VLAN VSI
4710 * @sw: pointer to switch info struct for which function add rule
4712 static enum ice_status
4713 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4714 u16 *vid, struct ice_switch_info *sw)
4716 struct ice_fltr_mgmt_list_entry *itr;
4717 struct LIST_HEAD_TYPE *rule_head;
4718 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4720 if (!ice_is_vsi_valid(hw, vsi_handle))
4721 return ICE_ERR_PARAM;
4725 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4726 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4728 ice_acquire_lock(rule_lock);
4729 LIST_FOR_EACH_ENTRY(itr, rule_head,
4730 ice_fltr_mgmt_list_entry, list_entry) {
4731 /* Continue if this filter doesn't apply to this VSI or the
4732 * VSI ID is not in the VSI map for this filter
4734 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4737 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4739 ice_release_lock(rule_lock);
4745 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4746 * @hw: pointer to the hardware structure
4747 * @vsi_handle: VSI handle to retrieve info from
4748 * @promisc_mask: pointer to mask to be filled in
4749 * @vid: VLAN ID of promisc VLAN VSI
4752 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4755 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4756 vid, hw->switch_info);
4760 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4761 * @hw: pointer to the hardware structure
4762 * @vsi_handle: VSI handle to retrieve info from
4763 * @promisc_mask: pointer to mask to be filled in
4764 * @vid: VLAN ID of promisc VLAN VSI
4765 * @sw: pointer to switch info struct for which function add rule
4767 static enum ice_status
4768 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4769 u16 *vid, struct ice_switch_info *sw)
4771 struct ice_fltr_mgmt_list_entry *itr;
4772 struct LIST_HEAD_TYPE *rule_head;
4773 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4775 if (!ice_is_vsi_valid(hw, vsi_handle))
4776 return ICE_ERR_PARAM;
4780 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4781 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4783 ice_acquire_lock(rule_lock);
4784 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4786 /* Continue if this filter doesn't apply to this VSI or the
4787 * VSI ID is not in the VSI map for this filter
4789 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4792 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4794 ice_release_lock(rule_lock);
4800 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4801 * @hw: pointer to the hardware structure
4802 * @vsi_handle: VSI handle to retrieve info from
4803 * @promisc_mask: pointer to mask to be filled in
4804 * @vid: VLAN ID of promisc VLAN VSI
4807 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4810 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4811 vid, hw->switch_info);
4815 * ice_remove_promisc - Remove promisc based filter rules
4816 * @hw: pointer to the hardware structure
4817 * @recp_id: recipe ID for which the rule needs to removed
4818 * @v_list: list of promisc entries
4820 static enum ice_status
4821 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4822 struct LIST_HEAD_TYPE *v_list)
4824 struct ice_fltr_list_entry *v_list_itr, *tmp;
4825 struct ice_sw_recipe *recp_list;
4827 recp_list = &hw->switch_info->recp_list[recp_id];
4828 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4830 v_list_itr->status =
4831 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4832 if (v_list_itr->status)
4833 return v_list_itr->status;
4839 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4840 * @hw: pointer to the hardware structure
4841 * @vsi_handle: VSI handle to clear mode
4842 * @promisc_mask: mask of promiscuous config bits to clear
4843 * @vid: VLAN ID to clear VLAN promiscuous
4844 * @sw: pointer to switch info struct for which function add rule
4846 static enum ice_status
4847 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4848 u16 vid, struct ice_switch_info *sw)
4850 struct ice_fltr_list_entry *fm_entry, *tmp;
4851 struct LIST_HEAD_TYPE remove_list_head;
4852 struct ice_fltr_mgmt_list_entry *itr;
4853 struct LIST_HEAD_TYPE *rule_head;
4854 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4855 enum ice_status status = ICE_SUCCESS;
4858 if (!ice_is_vsi_valid(hw, vsi_handle))
4859 return ICE_ERR_PARAM;
4861 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4862 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4864 recipe_id = ICE_SW_LKUP_PROMISC;
4866 rule_head = &sw->recp_list[recipe_id].filt_rules;
4867 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4869 INIT_LIST_HEAD(&remove_list_head);
4871 ice_acquire_lock(rule_lock);
4872 LIST_FOR_EACH_ENTRY(itr, rule_head,
4873 ice_fltr_mgmt_list_entry, list_entry) {
4874 struct ice_fltr_info *fltr_info;
4875 u8 fltr_promisc_mask = 0;
4877 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4879 fltr_info = &itr->fltr_info;
4881 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4882 vid != fltr_info->l_data.mac_vlan.vlan_id)
4885 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4887 /* Skip if filter is not completely specified by given mask */
4888 if (fltr_promisc_mask & ~promisc_mask)
4891 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4895 ice_release_lock(rule_lock);
4896 goto free_fltr_list;
4899 ice_release_lock(rule_lock);
4901 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4904 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4905 ice_fltr_list_entry, list_entry) {
4906 LIST_DEL(&fm_entry->list_entry);
4907 ice_free(hw, fm_entry);
4914 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4915 * @hw: pointer to the hardware structure
4916 * @vsi_handle: VSI handle to clear mode
4917 * @promisc_mask: mask of promiscuous config bits to clear
4918 * @vid: VLAN ID to clear VLAN promiscuous
4921 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4922 u8 promisc_mask, u16 vid)
4924 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4925 vid, hw->switch_info);
4929 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4930 * @hw: pointer to the hardware structure
4931 * @vsi_handle: VSI handle to configure
4932 * @promisc_mask: mask of promiscuous config bits
4933 * @vid: VLAN ID to set VLAN promiscuous
4934 * @lport: logical port number to configure promisc mode
4935 * @sw: pointer to switch info struct for which function add rule
4937 static enum ice_status
4938 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4939 u16 vid, u8 lport, struct ice_switch_info *sw)
4941 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4942 struct ice_fltr_list_entry f_list_entry;
4943 struct ice_fltr_info new_fltr;
4944 enum ice_status status = ICE_SUCCESS;
4950 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4952 if (!ice_is_vsi_valid(hw, vsi_handle))
4953 return ICE_ERR_PARAM;
4954 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4956 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4958 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4959 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4960 new_fltr.l_data.mac_vlan.vlan_id = vid;
4961 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4963 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4964 recipe_id = ICE_SW_LKUP_PROMISC;
4967 /* Separate filters must be set for each direction/packet type
4968 * combination, so we will loop over the mask value, store the
4969 * individual type, and clear it out in the input mask as it
4972 while (promisc_mask) {
4973 struct ice_sw_recipe *recp_list;
4979 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4980 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4981 pkt_type = UCAST_FLTR;
4982 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4983 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4984 pkt_type = UCAST_FLTR;
4986 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4987 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4988 pkt_type = MCAST_FLTR;
4989 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4990 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4991 pkt_type = MCAST_FLTR;
4993 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4994 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4995 pkt_type = BCAST_FLTR;
4996 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4997 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4998 pkt_type = BCAST_FLTR;
5002 /* Check for VLAN promiscuous flag */
5003 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5004 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5005 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5006 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5010 /* Set filter DA based on packet type */
5011 mac_addr = new_fltr.l_data.mac.mac_addr;
5012 if (pkt_type == BCAST_FLTR) {
5013 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5014 } else if (pkt_type == MCAST_FLTR ||
5015 pkt_type == UCAST_FLTR) {
5016 /* Use the dummy ether header DA */
5017 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5018 ICE_NONDMA_TO_NONDMA);
5019 if (pkt_type == MCAST_FLTR)
5020 mac_addr[0] |= 0x1; /* Set multicast bit */
5023 /* Need to reset this to zero for all iterations */
5026 new_fltr.flag |= ICE_FLTR_TX;
5027 new_fltr.src = hw_vsi_id;
5029 new_fltr.flag |= ICE_FLTR_RX;
5030 new_fltr.src = lport;
5033 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5034 new_fltr.vsi_handle = vsi_handle;
5035 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5036 f_list_entry.fltr_info = new_fltr;
5037 recp_list = &sw->recp_list[recipe_id];
5039 status = ice_add_rule_internal(hw, recp_list, lport,
5041 if (status != ICE_SUCCESS)
5042 goto set_promisc_exit;
5050 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5051 * @hw: pointer to the hardware structure
5052 * @vsi_handle: VSI handle to configure
5053 * @promisc_mask: mask of promiscuous config bits
5054 * @vid: VLAN ID to set VLAN promiscuous
5057 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5060 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5061 hw->port_info->lport,
5066 * _ice_set_vlan_vsi_promisc
5067 * @hw: pointer to the hardware structure
5068 * @vsi_handle: VSI handle to configure
5069 * @promisc_mask: mask of promiscuous config bits
5070 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5071 * @lport: logical port number to configure promisc mode
5072 * @sw: pointer to switch info struct for which function add rule
5074 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5076 static enum ice_status
5077 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5078 bool rm_vlan_promisc, u8 lport,
5079 struct ice_switch_info *sw)
5081 struct ice_fltr_list_entry *list_itr, *tmp;
5082 struct LIST_HEAD_TYPE vsi_list_head;
5083 struct LIST_HEAD_TYPE *vlan_head;
5084 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5085 enum ice_status status;
5088 INIT_LIST_HEAD(&vsi_list_head);
5089 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5090 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5091 ice_acquire_lock(vlan_lock);
5092 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5094 ice_release_lock(vlan_lock);
5096 goto free_fltr_list;
5098 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5100 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5101 if (rm_vlan_promisc)
5102 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5106 status = _ice_set_vsi_promisc(hw, vsi_handle,
5107 promisc_mask, vlan_id,
5114 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5115 ice_fltr_list_entry, list_entry) {
5116 LIST_DEL(&list_itr->list_entry);
5117 ice_free(hw, list_itr);
5123 * ice_set_vlan_vsi_promisc
5124 * @hw: pointer to the hardware structure
5125 * @vsi_handle: VSI handle to configure
5126 * @promisc_mask: mask of promiscuous config bits
5127 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5129 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5132 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5133 bool rm_vlan_promisc)
5135 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5136 rm_vlan_promisc, hw->port_info->lport,
5141 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5142 * @hw: pointer to the hardware structure
5143 * @vsi_handle: VSI handle to remove filters from
5144 * @recp_list: recipe list from which function remove fltr
5145 * @lkup: switch rule filter lookup type
5148 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5149 struct ice_sw_recipe *recp_list,
5150 enum ice_sw_lkup_type lkup)
5152 struct ice_fltr_list_entry *fm_entry;
5153 struct LIST_HEAD_TYPE remove_list_head;
5154 struct LIST_HEAD_TYPE *rule_head;
5155 struct ice_fltr_list_entry *tmp;
5156 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5157 enum ice_status status;
5159 INIT_LIST_HEAD(&remove_list_head);
5160 rule_lock = &recp_list[lkup].filt_rule_lock;
5161 rule_head = &recp_list[lkup].filt_rules;
5162 ice_acquire_lock(rule_lock);
5163 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5165 ice_release_lock(rule_lock);
5170 case ICE_SW_LKUP_MAC:
5171 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5173 case ICE_SW_LKUP_VLAN:
5174 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5176 case ICE_SW_LKUP_PROMISC:
5177 case ICE_SW_LKUP_PROMISC_VLAN:
5178 ice_remove_promisc(hw, lkup, &remove_list_head);
5180 case ICE_SW_LKUP_MAC_VLAN:
5181 ice_remove_mac_vlan(hw, &remove_list_head);
5183 case ICE_SW_LKUP_ETHERTYPE:
5184 case ICE_SW_LKUP_ETHERTYPE_MAC:
5185 ice_remove_eth_mac(hw, &remove_list_head);
5187 case ICE_SW_LKUP_DFLT:
5188 ice_debug(hw, ICE_DBG_SW,
5189 "Remove filters for this lookup type hasn't been implemented yet\n");
5191 case ICE_SW_LKUP_LAST:
5192 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5196 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5197 ice_fltr_list_entry, list_entry) {
5198 LIST_DEL(&fm_entry->list_entry);
5199 ice_free(hw, fm_entry);
5204 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5205 * @hw: pointer to the hardware structure
5206 * @vsi_handle: VSI handle to remove filters from
5207 * @sw: pointer to switch info struct
5210 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5211 struct ice_switch_info *sw)
5213 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5215 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5216 sw->recp_list, ICE_SW_LKUP_MAC);
5217 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5218 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5219 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5220 sw->recp_list, ICE_SW_LKUP_PROMISC);
5221 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5222 sw->recp_list, ICE_SW_LKUP_VLAN);
5223 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5224 sw->recp_list, ICE_SW_LKUP_DFLT);
5225 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5226 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5227 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5228 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5229 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5230 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5234 * ice_remove_vsi_fltr - Remove all filters for a VSI
5235 * @hw: pointer to the hardware structure
5236 * @vsi_handle: VSI handle to remove filters from
5238 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5240 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5244 * ice_alloc_res_cntr - allocating resource counter
5245 * @hw: pointer to the hardware structure
5246 * @type: type of resource
5247 * @alloc_shared: if set it is shared else dedicated
5248 * @num_items: number of entries requested for FD resource type
5249 * @counter_id: counter index returned by AQ call
5252 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5255 struct ice_aqc_alloc_free_res_elem *buf;
5256 enum ice_status status;
5259 /* Allocate resource */
5260 buf_len = sizeof(*buf);
5261 buf = (struct ice_aqc_alloc_free_res_elem *)
5262 ice_malloc(hw, buf_len);
5264 return ICE_ERR_NO_MEMORY;
5266 buf->num_elems = CPU_TO_LE16(num_items);
5267 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5268 ICE_AQC_RES_TYPE_M) | alloc_shared);
5270 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5271 ice_aqc_opc_alloc_res, NULL);
5275 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5283 * ice_free_res_cntr - free resource counter
5284 * @hw: pointer to the hardware structure
5285 * @type: type of resource
5286 * @alloc_shared: if set it is shared else dedicated
5287 * @num_items: number of entries to be freed for FD resource type
5288 * @counter_id: counter ID resource which needs to be freed
5291 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5294 struct ice_aqc_alloc_free_res_elem *buf;
5295 enum ice_status status;
5299 buf_len = sizeof(*buf);
5300 buf = (struct ice_aqc_alloc_free_res_elem *)
5301 ice_malloc(hw, buf_len);
5303 return ICE_ERR_NO_MEMORY;
5305 buf->num_elems = CPU_TO_LE16(num_items);
5306 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5307 ICE_AQC_RES_TYPE_M) | alloc_shared);
5308 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5310 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5311 ice_aqc_opc_free_res, NULL);
5313 ice_debug(hw, ICE_DBG_SW,
5314 "counter resource could not be freed\n");
5321 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5322 * @hw: pointer to the hardware structure
5323 * @counter_id: returns counter index
5325 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5327 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5328 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5333 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5334 * @hw: pointer to the hardware structure
5335 * @counter_id: counter index to be freed
5337 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5339 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5340 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5345 * ice_alloc_res_lg_act - add large action resource
5346 * @hw: pointer to the hardware structure
5347 * @l_id: large action ID to fill it in
5348 * @num_acts: number of actions to hold with a large action entry
5350 static enum ice_status
5351 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5353 struct ice_aqc_alloc_free_res_elem *sw_buf;
5354 enum ice_status status;
5357 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5358 return ICE_ERR_PARAM;
5360 /* Allocate resource for large action */
5361 buf_len = sizeof(*sw_buf);
5362 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5363 ice_malloc(hw, buf_len);
5365 return ICE_ERR_NO_MEMORY;
5367 sw_buf->num_elems = CPU_TO_LE16(1);
5369 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5370 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5371 * If num_acts is greater than 2, then use
5372 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5373 * The num_acts cannot exceed 4. This was ensured at the
5374 * beginning of the function.
5377 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5378 else if (num_acts == 2)
5379 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5381 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5383 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5384 ice_aqc_opc_alloc_res, NULL);
5386 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5388 ice_free(hw, sw_buf);
5393 * ice_add_mac_with_sw_marker - add filter with sw marker
5394 * @hw: pointer to the hardware structure
5395 * @f_info: filter info structure containing the MAC filter information
5396 * @sw_marker: sw marker to tag the Rx descriptor with
5399 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5402 struct ice_fltr_mgmt_list_entry *m_entry;
5403 struct ice_fltr_list_entry fl_info;
5404 struct ice_sw_recipe *recp_list;
5405 struct LIST_HEAD_TYPE l_head;
5406 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5407 enum ice_status ret;
5411 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5412 return ICE_ERR_PARAM;
5414 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5415 return ICE_ERR_PARAM;
5417 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5418 return ICE_ERR_PARAM;
5420 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5421 return ICE_ERR_PARAM;
5422 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5424 /* Add filter if it doesn't exist so then the adding of large
5425 * action always results in update
5428 INIT_LIST_HEAD(&l_head);
5429 fl_info.fltr_info = *f_info;
5430 LIST_ADD(&fl_info.list_entry, &l_head);
5432 entry_exists = false;
5433 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5434 hw->port_info->lport);
5435 if (ret == ICE_ERR_ALREADY_EXISTS)
5436 entry_exists = true;
5440 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5441 rule_lock = &recp_list->filt_rule_lock;
5442 ice_acquire_lock(rule_lock);
5443 /* Get the book keeping entry for the filter */
5444 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5448 /* If counter action was enabled for this rule then don't enable
5449 * sw marker large action
5451 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5452 ret = ICE_ERR_PARAM;
5456 /* if same marker was added before */
5457 if (m_entry->sw_marker_id == sw_marker) {
5458 ret = ICE_ERR_ALREADY_EXISTS;
5462 /* Allocate a hardware table entry to hold large act. Three actions
5463 * for marker based large action
5465 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5469 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5472 /* Update the switch rule to add the marker action */
5473 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5475 ice_release_lock(rule_lock);
5480 ice_release_lock(rule_lock);
5481 /* only remove entry if it did not exist previously */
5483 ret = ice_remove_mac(hw, &l_head);
5489 * ice_add_mac_with_counter - add filter with counter enabled
5490 * @hw: pointer to the hardware structure
5491 * @f_info: pointer to filter info structure containing the MAC filter
5495 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5497 struct ice_fltr_mgmt_list_entry *m_entry;
5498 struct ice_fltr_list_entry fl_info;
5499 struct ice_sw_recipe *recp_list;
5500 struct LIST_HEAD_TYPE l_head;
5501 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5502 enum ice_status ret;
5507 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5508 return ICE_ERR_PARAM;
5510 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5511 return ICE_ERR_PARAM;
5513 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5514 return ICE_ERR_PARAM;
5515 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5516 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5518 entry_exist = false;
5520 rule_lock = &recp_list->filt_rule_lock;
5522 /* Add filter if it doesn't exist so then the adding of large
5523 * action always results in update
5525 INIT_LIST_HEAD(&l_head);
5527 fl_info.fltr_info = *f_info;
5528 LIST_ADD(&fl_info.list_entry, &l_head);
5530 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5531 hw->port_info->lport);
5532 if (ret == ICE_ERR_ALREADY_EXISTS)
5537 ice_acquire_lock(rule_lock);
5538 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5540 ret = ICE_ERR_BAD_PTR;
5544 /* Don't enable counter for a filter for which sw marker was enabled */
5545 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5546 ret = ICE_ERR_PARAM;
5550 /* If a counter was already enabled then don't need to add again */
5551 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5552 ret = ICE_ERR_ALREADY_EXISTS;
5556 /* Allocate a hardware table entry to VLAN counter */
5557 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5561 /* Allocate a hardware table entry to hold large act. Two actions for
5562 * counter based large action
5564 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5568 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5571 /* Update the switch rule to add the counter action */
5572 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5574 ice_release_lock(rule_lock);
5579 ice_release_lock(rule_lock);
5580 /* only remove entry if it did not exist previously */
5582 ret = ice_remove_mac(hw, &l_head);
5587 /* This is mapping table entry that maps every word within a given protocol
5588 * structure to the real byte offset as per the specification of that
5590 * for example dst address is 3 words in ethertype header and corresponding
5591 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5592 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5593 * matching entry describing its field. This needs to be updated if new
5594 * structure is added to that union.
5596 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5597 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5598 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5599 { ICE_ETYPE_OL, { 0 } },
5600 { ICE_VLAN_OFOS, { 0, 2 } },
5601 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5602 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5603 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5604 26, 28, 30, 32, 34, 36, 38 } },
5605 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5606 26, 28, 30, 32, 34, 36, 38 } },
5607 { ICE_TCP_IL, { 0, 2 } },
5608 { ICE_UDP_OF, { 0, 2 } },
5609 { ICE_UDP_ILOS, { 0, 2 } },
5610 { ICE_SCTP_IL, { 0, 2 } },
5611 { ICE_VXLAN, { 8, 10, 12, 14 } },
5612 { ICE_GENEVE, { 8, 10, 12, 14 } },
5613 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5614 { ICE_NVGRE, { 0, 2, 4, 6 } },
5615 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5616 { ICE_PPPOE, { 0, 2, 4, 6 } },
5617 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5618 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5619 { ICE_ESP, { 0, 2, 4, 6 } },
5620 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5621 { ICE_NAT_T, { 8, 10, 12, 14 } },
5624 /* The following table describes preferred grouping of recipes.
5625 * If a recipe that needs to be programmed is a superset or matches one of the
5626 * following combinations, then the recipe needs to be chained as per the
5630 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5631 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5632 { ICE_MAC_IL, ICE_MAC_IL_HW },
5633 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5634 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5635 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5636 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5637 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5638 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5639 { ICE_TCP_IL, ICE_TCP_IL_HW },
5640 { ICE_UDP_OF, ICE_UDP_OF_HW },
5641 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5642 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5643 { ICE_VXLAN, ICE_UDP_OF_HW },
5644 { ICE_GENEVE, ICE_UDP_OF_HW },
5645 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5646 { ICE_NVGRE, ICE_GRE_OF_HW },
5647 { ICE_GTP, ICE_UDP_OF_HW },
5648 { ICE_PPPOE, ICE_PPPOE_HW },
5649 { ICE_PFCP, ICE_UDP_ILOS_HW },
5650 { ICE_L2TPV3, ICE_L2TPV3_HW },
5651 { ICE_ESP, ICE_ESP_HW },
5652 { ICE_AH, ICE_AH_HW },
5653 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5657 * ice_find_recp - find a recipe
5658 * @hw: pointer to the hardware structure
5659 * @lkup_exts: extension sequence to match
5661 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5663 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5664 enum ice_sw_tunnel_type tun_type)
5666 bool refresh_required = true;
5667 struct ice_sw_recipe *recp;
5670 /* Walk through existing recipes to find a match */
5671 recp = hw->switch_info->recp_list;
5672 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5673 /* If recipe was not created for this ID, in SW bookkeeping,
5674 * check if FW has an entry for this recipe. If the FW has an
5675 * entry update it in our SW bookkeeping and continue with the
5678 if (!recp[i].recp_created)
5679 if (ice_get_recp_frm_fw(hw,
5680 hw->switch_info->recp_list, i,
5684 /* Skip inverse action recipes */
5685 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5686 ICE_AQ_RECIPE_ACT_INV_ACT)
5689 /* if number of words we are looking for match */
5690 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5691 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5692 struct ice_fv_word *be = lkup_exts->fv_words;
5693 u16 *cr = recp[i].lkup_exts.field_mask;
5694 u16 *de = lkup_exts->field_mask;
5698 /* ar, cr, and qr are related to the recipe words, while
5699 * be, de, and pe are related to the lookup words
5701 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5702 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5704 if (ar[qr].off == be[pe].off &&
5705 ar[qr].prot_id == be[pe].prot_id &&
5707 /* Found the "pe"th word in the
5712 /* After walking through all the words in the
5713 * "i"th recipe if "p"th word was not found then
5714 * this recipe is not what we are looking for.
5715 * So break out from this loop and try the next
5718 if (qr >= recp[i].lkup_exts.n_val_words) {
5723 /* If for "i"th recipe the found was never set to false
5724 * then it means we found our match
5726 if (tun_type == recp[i].tun_type && found)
5727 return i; /* Return the recipe ID */
5730 return ICE_MAX_NUM_RECIPES;
5734 * ice_prot_type_to_id - get protocol ID from protocol type
5735 * @type: protocol type
5736 * @id: pointer to variable that will receive the ID
5738 * Returns true if found, false otherwise
5740 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5744 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5745 if (ice_prot_id_tbl[i].type == type) {
5746 *id = ice_prot_id_tbl[i].protocol_id;
5753 * ice_find_valid_words - count valid words
5754 * @rule: advanced rule with lookup information
5755 * @lkup_exts: byte offset extractions of the words that are valid
5757 * calculate valid words in a lookup rule using mask value
5760 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5761 struct ice_prot_lkup_ext *lkup_exts)
5763 u8 j, word, prot_id, ret_val;
5765 if (!ice_prot_type_to_id(rule->type, &prot_id))
5768 word = lkup_exts->n_val_words;
5770 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5771 if (((u16 *)&rule->m_u)[j] &&
5772 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5773 /* No more space to accommodate */
5774 if (word >= ICE_MAX_CHAIN_WORDS)
5776 lkup_exts->fv_words[word].off =
5777 ice_prot_ext[rule->type].offs[j];
5778 lkup_exts->fv_words[word].prot_id =
5779 ice_prot_id_tbl[rule->type].protocol_id;
5780 lkup_exts->field_mask[word] =
5781 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5785 ret_val = word - lkup_exts->n_val_words;
5786 lkup_exts->n_val_words = word;
5792 * ice_create_first_fit_recp_def - Create a recipe grouping
5793 * @hw: pointer to the hardware structure
5794 * @lkup_exts: an array of protocol header extractions
5795 * @rg_list: pointer to a list that stores new recipe groups
5796 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5798 * Using first fit algorithm, take all the words that are still not done
5799 * and start grouping them in 4-word groups. Each group makes up one
5802 static enum ice_status
5803 ice_create_first_fit_recp_def(struct ice_hw *hw,
5804 struct ice_prot_lkup_ext *lkup_exts,
5805 struct LIST_HEAD_TYPE *rg_list,
5808 struct ice_pref_recipe_group *grp = NULL;
5813 if (!lkup_exts->n_val_words) {
5814 struct ice_recp_grp_entry *entry;
5816 entry = (struct ice_recp_grp_entry *)
5817 ice_malloc(hw, sizeof(*entry));
5819 return ICE_ERR_NO_MEMORY;
5820 LIST_ADD(&entry->l_entry, rg_list);
5821 grp = &entry->r_group;
5823 grp->n_val_pairs = 0;
5826 /* Walk through every word in the rule to check if it is not done. If so
5827 * then this word needs to be part of a new recipe.
5829 for (j = 0; j < lkup_exts->n_val_words; j++)
5830 if (!ice_is_bit_set(lkup_exts->done, j)) {
5832 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5833 struct ice_recp_grp_entry *entry;
5835 entry = (struct ice_recp_grp_entry *)
5836 ice_malloc(hw, sizeof(*entry));
5838 return ICE_ERR_NO_MEMORY;
5839 LIST_ADD(&entry->l_entry, rg_list);
5840 grp = &entry->r_group;
5844 grp->pairs[grp->n_val_pairs].prot_id =
5845 lkup_exts->fv_words[j].prot_id;
5846 grp->pairs[grp->n_val_pairs].off =
5847 lkup_exts->fv_words[j].off;
5848 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5856 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5857 * @hw: pointer to the hardware structure
5858 * @fv_list: field vector with the extraction sequence information
5859 * @rg_list: recipe groupings with protocol-offset pairs
5861 * Helper function to fill in the field vector indices for protocol-offset
5862 * pairs. These indexes are then ultimately programmed into a recipe.
5864 static enum ice_status
5865 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5866 struct LIST_HEAD_TYPE *rg_list)
5868 struct ice_sw_fv_list_entry *fv;
5869 struct ice_recp_grp_entry *rg;
5870 struct ice_fv_word *fv_ext;
5872 if (LIST_EMPTY(fv_list))
5875 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5876 fv_ext = fv->fv_ptr->ew;
5878 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5881 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5882 struct ice_fv_word *pr;
5887 pr = &rg->r_group.pairs[i];
5888 mask = rg->r_group.mask[i];
5890 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5891 if (fv_ext[j].prot_id == pr->prot_id &&
5892 fv_ext[j].off == pr->off) {
5895 /* Store index of field vector */
5897 rg->fv_mask[i] = mask;
5901 /* Protocol/offset could not be found, caller gave an
5905 return ICE_ERR_PARAM;
5913 * ice_find_free_recp_res_idx - find free result indexes for recipe
5914 * @hw: pointer to hardware structure
5915 * @profiles: bitmap of profiles that will be associated with the new recipe
5916 * @free_idx: pointer to variable to receive the free index bitmap
5918 * The algorithm used here is:
5919 * 1. When creating a new recipe, create a set P which contains all
5920 * Profiles that will be associated with our new recipe
5922 * 2. For each Profile p in set P:
5923 * a. Add all recipes associated with Profile p into set R
5924 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5925 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5926 * i. Or just assume they all have the same possible indexes:
5928 * i.e., PossibleIndexes = 0x0000F00000000000
5930 * 3. For each Recipe r in set R:
5931 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5932 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5934 * FreeIndexes will contain the bits indicating the indexes free for use,
5935 * then the code needs to update the recipe[r].used_result_idx_bits to
5936 * indicate which indexes were selected for use by this recipe.
5939 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5940 ice_bitmap_t *free_idx)
5942 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5943 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5944 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5948 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5949 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5950 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5951 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5953 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5954 ice_set_bit(count, possible_idx);
5956 /* For each profile we are going to associate the recipe with, add the
5957 * recipes that are associated with that profile. This will give us
5958 * the set of recipes that our recipe may collide with. Also, determine
5959 * what possible result indexes are usable given this set of profiles.
5962 while (ICE_MAX_NUM_PROFILES >
5963 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5964 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5965 ICE_MAX_NUM_RECIPES);
5966 ice_and_bitmap(possible_idx, possible_idx,
5967 hw->switch_info->prof_res_bm[bit],
5972 /* For each recipe that our new recipe may collide with, determine
5973 * which indexes have been used.
5975 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5976 if (ice_is_bit_set(recipes, bit)) {
5977 ice_or_bitmap(used_idx, used_idx,
5978 hw->switch_info->recp_list[bit].res_idxs,
5982 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5984 /* return number of free indexes */
5987 while (ICE_MAX_FV_WORDS >
5988 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5997 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5998 * @hw: pointer to hardware structure
5999 * @rm: recipe management list entry
6000 * @match_tun_mask: tunnel mask that needs to be programmed
6001 * @profiles: bitmap of profiles that will be associated.
6003 static enum ice_status
6004 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6005 u16 match_tun_mask, ice_bitmap_t *profiles)
6007 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6008 struct ice_aqc_recipe_data_elem *tmp;
6009 struct ice_aqc_recipe_data_elem *buf;
6010 struct ice_recp_grp_entry *entry;
6011 enum ice_status status;
6017 /* When more than one recipe are required, another recipe is needed to
6018 * chain them together. Matching a tunnel metadata ID takes up one of
6019 * the match fields in the chaining recipe reducing the number of
6020 * chained recipes by one.
6022 /* check number of free result indices */
6023 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6024 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6026 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6027 free_res_idx, rm->n_grp_count);
6029 if (rm->n_grp_count > 1) {
6030 if (rm->n_grp_count > free_res_idx)
6031 return ICE_ERR_MAX_LIMIT;
6036 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6037 return ICE_ERR_MAX_LIMIT;
6039 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6040 ICE_MAX_NUM_RECIPES,
6043 return ICE_ERR_NO_MEMORY;
6045 buf = (struct ice_aqc_recipe_data_elem *)
6046 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6048 status = ICE_ERR_NO_MEMORY;
6052 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6053 recipe_count = ICE_MAX_NUM_RECIPES;
6054 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6056 if (status || recipe_count == 0)
6059 /* Allocate the recipe resources, and configure them according to the
6060 * match fields from protocol headers and extracted field vectors.
6062 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6063 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6066 status = ice_alloc_recipe(hw, &entry->rid);
6070 /* Clear the result index of the located recipe, as this will be
6071 * updated, if needed, later in the recipe creation process.
6073 tmp[0].content.result_indx = 0;
6075 buf[recps] = tmp[0];
6076 buf[recps].recipe_indx = (u8)entry->rid;
6077 /* if the recipe is a non-root recipe RID should be programmed
6078 * as 0 for the rules to be applied correctly.
6080 buf[recps].content.rid = 0;
6081 ice_memset(&buf[recps].content.lkup_indx, 0,
6082 sizeof(buf[recps].content.lkup_indx),
6085 /* All recipes use look-up index 0 to match switch ID. */
6086 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6087 buf[recps].content.mask[0] =
6088 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6089 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6092 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6093 buf[recps].content.lkup_indx[i] = 0x80;
6094 buf[recps].content.mask[i] = 0;
6097 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6098 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6099 buf[recps].content.mask[i + 1] =
6100 CPU_TO_LE16(entry->fv_mask[i]);
6103 if (rm->n_grp_count > 1) {
6104 /* Checks to see if there really is a valid result index
6107 if (chain_idx >= ICE_MAX_FV_WORDS) {
6108 ice_debug(hw, ICE_DBG_SW,
6109 "No chain index available\n");
6110 status = ICE_ERR_MAX_LIMIT;
6114 entry->chain_idx = chain_idx;
6115 buf[recps].content.result_indx =
6116 ICE_AQ_RECIPE_RESULT_EN |
6117 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6118 ICE_AQ_RECIPE_RESULT_DATA_M);
6119 ice_clear_bit(chain_idx, result_idx_bm);
6120 chain_idx = ice_find_first_bit(result_idx_bm,
6124 /* fill recipe dependencies */
6125 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6126 ICE_MAX_NUM_RECIPES);
6127 ice_set_bit(buf[recps].recipe_indx,
6128 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6129 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6133 if (rm->n_grp_count == 1) {
6134 rm->root_rid = buf[0].recipe_indx;
6135 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6136 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6137 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6138 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6139 sizeof(buf[0].recipe_bitmap),
6140 ICE_NONDMA_TO_NONDMA);
6142 status = ICE_ERR_BAD_PTR;
6145 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6146 * the recipe which is getting created if specified
6147 * by user. Usually any advanced switch filter, which results
6148 * into new extraction sequence, ended up creating a new recipe
6149 * of type ROOT and usually recipes are associated with profiles
6150 * Switch rule referreing newly created recipe, needs to have
6151 * either/or 'fwd' or 'join' priority, otherwise switch rule
6152 * evaluation will not happen correctly. In other words, if
6153 * switch rule to be evaluated on priority basis, then recipe
6154 * needs to have priority, otherwise it will be evaluated last.
6156 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6158 struct ice_recp_grp_entry *last_chain_entry;
6161 /* Allocate the last recipe that will chain the outcomes of the
6162 * other recipes together
6164 status = ice_alloc_recipe(hw, &rid);
6168 buf[recps].recipe_indx = (u8)rid;
6169 buf[recps].content.rid = (u8)rid;
6170 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6171 /* the new entry created should also be part of rg_list to
6172 * make sure we have complete recipe
6174 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6175 sizeof(*last_chain_entry));
6176 if (!last_chain_entry) {
6177 status = ICE_ERR_NO_MEMORY;
6180 last_chain_entry->rid = rid;
6181 ice_memset(&buf[recps].content.lkup_indx, 0,
6182 sizeof(buf[recps].content.lkup_indx),
6184 /* All recipes use look-up index 0 to match switch ID. */
6185 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6186 buf[recps].content.mask[0] =
6187 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6188 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6189 buf[recps].content.lkup_indx[i] =
6190 ICE_AQ_RECIPE_LKUP_IGNORE;
6191 buf[recps].content.mask[i] = 0;
6195 /* update r_bitmap with the recp that is used for chaining */
6196 ice_set_bit(rid, rm->r_bitmap);
6197 /* this is the recipe that chains all the other recipes so it
6198 * should not have a chaining ID to indicate the same
6200 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6201 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6203 last_chain_entry->fv_idx[i] = entry->chain_idx;
6204 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6205 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6206 ice_set_bit(entry->rid, rm->r_bitmap);
6208 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6209 if (sizeof(buf[recps].recipe_bitmap) >=
6210 sizeof(rm->r_bitmap)) {
6211 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6212 sizeof(buf[recps].recipe_bitmap),
6213 ICE_NONDMA_TO_NONDMA);
6215 status = ICE_ERR_BAD_PTR;
6218 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6220 /* To differentiate among different UDP tunnels, a meta data ID
6223 if (match_tun_mask) {
6224 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
6225 buf[recps].content.mask[i] =
6226 CPU_TO_LE16(match_tun_mask);
6230 rm->root_rid = (u8)rid;
6232 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6236 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6237 ice_release_change_lock(hw);
6241 /* Every recipe that just got created add it to the recipe
6244 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6245 struct ice_switch_info *sw = hw->switch_info;
6246 bool is_root, idx_found = false;
6247 struct ice_sw_recipe *recp;
6248 u16 idx, buf_idx = 0;
6250 /* find buffer index for copying some data */
6251 for (idx = 0; idx < rm->n_grp_count; idx++)
6252 if (buf[idx].recipe_indx == entry->rid) {
6258 status = ICE_ERR_OUT_OF_RANGE;
6262 recp = &sw->recp_list[entry->rid];
6263 is_root = (rm->root_rid == entry->rid);
6264 recp->is_root = is_root;
6266 recp->root_rid = entry->rid;
6267 recp->big_recp = (is_root && rm->n_grp_count > 1);
6269 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6270 entry->r_group.n_val_pairs *
6271 sizeof(struct ice_fv_word),
6272 ICE_NONDMA_TO_NONDMA);
6274 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6275 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6277 /* Copy non-result fv index values and masks to recipe. This
6278 * call will also update the result recipe bitmask.
6280 ice_collect_result_idx(&buf[buf_idx], recp);
6282 /* for non-root recipes, also copy to the root, this allows
6283 * easier matching of a complete chained recipe
6286 ice_collect_result_idx(&buf[buf_idx],
6287 &sw->recp_list[rm->root_rid]);
6289 recp->n_ext_words = entry->r_group.n_val_pairs;
6290 recp->chain_idx = entry->chain_idx;
6291 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6292 recp->n_grp_count = rm->n_grp_count;
6293 recp->tun_type = rm->tun_type;
6294 recp->recp_created = true;
6308 * ice_create_recipe_group - creates recipe group
6309 * @hw: pointer to hardware structure
6310 * @rm: recipe management list entry
6311 * @lkup_exts: lookup elements
6313 static enum ice_status
6314 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6315 struct ice_prot_lkup_ext *lkup_exts)
6317 enum ice_status status;
6320 rm->n_grp_count = 0;
6322 /* Create recipes for words that are marked not done by packing them
6325 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6326 &rm->rg_list, &recp_count);
6328 rm->n_grp_count += recp_count;
6329 rm->n_ext_words = lkup_exts->n_val_words;
6330 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6331 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6332 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6333 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6340 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6341 * @hw: pointer to hardware structure
6342 * @lkups: lookup elements or match criteria for the advanced recipe, one
6343 * structure per protocol header
6344 * @lkups_cnt: number of protocols
6345 * @bm: bitmap of field vectors to consider
6346 * @fv_list: pointer to a list that holds the returned field vectors
6348 static enum ice_status
6349 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6350 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6352 enum ice_status status;
6359 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6361 return ICE_ERR_NO_MEMORY;
6363 for (i = 0; i < lkups_cnt; i++)
6364 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6365 status = ICE_ERR_CFG;
6369 /* Find field vectors that include all specified protocol types */
6370 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6373 ice_free(hw, prot_ids);
6378 * ice_tun_type_match_mask - determine if tun type needs a match mask
6379 * @tun_type: tunnel type
6380 * @mask: mask to be used for the tunnel
6382 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6385 case ICE_SW_TUN_VXLAN_GPE:
6386 case ICE_SW_TUN_GENEVE:
6387 case ICE_SW_TUN_VXLAN:
6388 case ICE_SW_TUN_NVGRE:
6389 case ICE_SW_TUN_UDP:
6390 case ICE_ALL_TUNNELS:
6391 *mask = ICE_TUN_FLAG_MASK;
6394 case ICE_SW_TUN_GENEVE_VLAN:
6395 case ICE_SW_TUN_VXLAN_VLAN:
6396 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6406 * ice_add_special_words - Add words that are not protocols, such as metadata
6407 * @rinfo: other information regarding the rule e.g. priority and action info
6408 * @lkup_exts: lookup word structure
6410 static enum ice_status
6411 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6412 struct ice_prot_lkup_ext *lkup_exts)
6416 /* If this is a tunneled packet, then add recipe index to match the
6417 * tunnel bit in the packet metadata flags.
6419 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6420 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6421 u8 word = lkup_exts->n_val_words++;
6423 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6424 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6425 lkup_exts->field_mask[word] = mask;
6427 return ICE_ERR_MAX_LIMIT;
6434 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6435 * @hw: pointer to hardware structure
6436 * @rinfo: other information regarding the rule e.g. priority and action info
6437 * @bm: pointer to memory for returning the bitmap of field vectors
6440 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6443 enum ice_prof_type prof_type;
6445 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6447 switch (rinfo->tun_type) {
6449 prof_type = ICE_PROF_NON_TUN;
6451 case ICE_ALL_TUNNELS:
6452 prof_type = ICE_PROF_TUN_ALL;
6454 case ICE_SW_TUN_VXLAN_GPE:
6455 case ICE_SW_TUN_GENEVE:
6456 case ICE_SW_TUN_GENEVE_VLAN:
6457 case ICE_SW_TUN_VXLAN:
6458 case ICE_SW_TUN_VXLAN_VLAN:
6459 case ICE_SW_TUN_UDP:
6460 case ICE_SW_TUN_GTP:
6461 prof_type = ICE_PROF_TUN_UDP;
6463 case ICE_SW_TUN_NVGRE:
6464 prof_type = ICE_PROF_TUN_GRE;
6466 case ICE_SW_TUN_PPPOE:
6467 prof_type = ICE_PROF_TUN_PPPOE;
6469 case ICE_SW_TUN_PPPOE_PAY:
6470 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6472 case ICE_SW_TUN_PPPOE_IPV4:
6473 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6474 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6475 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6477 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6478 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6480 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6481 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6483 case ICE_SW_TUN_PPPOE_IPV6:
6484 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6485 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6486 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6488 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6489 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6491 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6492 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6494 case ICE_SW_TUN_PROFID_IPV6_ESP:
6495 case ICE_SW_TUN_IPV6_ESP:
6496 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6498 case ICE_SW_TUN_PROFID_IPV6_AH:
6499 case ICE_SW_TUN_IPV6_AH:
6500 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6502 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6503 case ICE_SW_TUN_IPV6_L2TPV3:
6504 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6506 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6507 case ICE_SW_TUN_IPV6_NAT_T:
6508 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6510 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6511 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6513 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6514 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6516 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6517 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6519 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6520 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6522 case ICE_SW_TUN_IPV4_NAT_T:
6523 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6525 case ICE_SW_TUN_IPV4_L2TPV3:
6526 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6528 case ICE_SW_TUN_IPV4_ESP:
6529 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6531 case ICE_SW_TUN_IPV4_AH:
6532 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6534 case ICE_SW_IPV4_TCP:
6535 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6537 case ICE_SW_IPV4_UDP:
6538 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6540 case ICE_SW_IPV6_TCP:
6541 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6543 case ICE_SW_IPV6_UDP:
6544 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6546 case ICE_SW_TUN_AND_NON_TUN:
6548 prof_type = ICE_PROF_ALL;
6552 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6556 * ice_is_prof_rule - determine if rule type is a profile rule
6557 * @type: the rule type
6559 * if the rule type is a profile rule, that means that there no field value
6560 * match required, in this case just a profile hit is required.
6562 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6565 case ICE_SW_TUN_PROFID_IPV6_ESP:
6566 case ICE_SW_TUN_PROFID_IPV6_AH:
6567 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6568 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6569 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6570 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6571 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6572 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6582 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6583 * @hw: pointer to hardware structure
6584 * @lkups: lookup elements or match criteria for the advanced recipe, one
6585 * structure per protocol header
6586 * @lkups_cnt: number of protocols
6587 * @rinfo: other information regarding the rule e.g. priority and action info
6588 * @rid: return the recipe ID of the recipe created
6590 static enum ice_status
6591 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6592 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6594 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6595 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6596 struct ice_prot_lkup_ext *lkup_exts;
6597 struct ice_recp_grp_entry *r_entry;
6598 struct ice_sw_fv_list_entry *fvit;
6599 struct ice_recp_grp_entry *r_tmp;
6600 struct ice_sw_fv_list_entry *tmp;
6601 enum ice_status status = ICE_SUCCESS;
6602 struct ice_sw_recipe *rm;
6603 u16 match_tun_mask = 0;
6607 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6608 return ICE_ERR_PARAM;
6610 lkup_exts = (struct ice_prot_lkup_ext *)
6611 ice_malloc(hw, sizeof(*lkup_exts));
6613 return ICE_ERR_NO_MEMORY;
6615 /* Determine the number of words to be matched and if it exceeds a
6616 * recipe's restrictions
6618 for (i = 0; i < lkups_cnt; i++) {
6621 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6622 status = ICE_ERR_CFG;
6623 goto err_free_lkup_exts;
6626 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6628 status = ICE_ERR_CFG;
6629 goto err_free_lkup_exts;
6633 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6635 status = ICE_ERR_NO_MEMORY;
6636 goto err_free_lkup_exts;
6639 /* Get field vectors that contain fields extracted from all the protocol
6640 * headers being programmed.
6642 INIT_LIST_HEAD(&rm->fv_list);
6643 INIT_LIST_HEAD(&rm->rg_list);
6645 /* Get bitmap of field vectors (profiles) that are compatible with the
6646 * rule request; only these will be searched in the subsequent call to
6649 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6651 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6655 /* Group match words into recipes using preferred recipe grouping
6658 status = ice_create_recipe_group(hw, rm, lkup_exts);
6662 /* For certain tunnel types it is necessary to use a metadata ID flag to
6663 * differentiate different tunnel types. A separate recipe needs to be
6664 * used for the metadata.
6666 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6667 rm->n_grp_count > 1)
6668 match_tun_mask = mask;
6670 /* set the recipe priority if specified */
6671 rm->priority = (u8)rinfo->priority;
6673 /* Find offsets from the field vector. Pick the first one for all the
6676 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6680 /* An empty FV list means to use all the profiles returned in the
6683 if (LIST_EMPTY(&rm->fv_list)) {
6686 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6687 if (ice_is_bit_set(fv_bitmap, j)) {
6688 struct ice_sw_fv_list_entry *fvl;
6690 fvl = (struct ice_sw_fv_list_entry *)
6691 ice_malloc(hw, sizeof(*fvl));
6695 fvl->profile_id = j;
6696 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6700 /* get bitmap of all profiles the recipe will be associated with */
6701 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6702 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6704 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6705 ice_set_bit((u16)fvit->profile_id, profiles);
6708 /* Create any special protocol/offset pairs, such as looking at tunnel
6709 * bits by extracting metadata
6711 status = ice_add_special_words(rinfo, lkup_exts);
6713 goto err_free_lkup_exts;
6715 /* Look for a recipe which matches our requested fv / mask list */
6716 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6717 if (*rid < ICE_MAX_NUM_RECIPES)
6718 /* Success if found a recipe that match the existing criteria */
6721 rm->tun_type = rinfo->tun_type;
6722 /* Recipe we need does not exist, add a recipe */
6723 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
6727 /* Associate all the recipes created with all the profiles in the
6728 * common field vector.
6730 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6732 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6735 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6736 (u8 *)r_bitmap, NULL);
6740 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6741 ICE_MAX_NUM_RECIPES);
6742 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6746 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6749 ice_release_change_lock(hw);
6754 /* Update profile to recipe bitmap array */
6755 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6756 ICE_MAX_NUM_RECIPES);
6758 /* Update recipe to profile bitmap array */
6759 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6760 if (ice_is_bit_set(r_bitmap, j))
6761 ice_set_bit((u16)fvit->profile_id,
6762 recipe_to_profile[j]);
6765 *rid = rm->root_rid;
6766 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6767 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6769 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6770 ice_recp_grp_entry, l_entry) {
6771 LIST_DEL(&r_entry->l_entry);
6772 ice_free(hw, r_entry);
6775 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6777 LIST_DEL(&fvit->list_entry);
6782 ice_free(hw, rm->root_buf);
6787 ice_free(hw, lkup_exts);
6793 * ice_find_dummy_packet - find dummy packet by tunnel type
6795 * @lkups: lookup elements or match criteria for the advanced recipe, one
6796 * structure per protocol header
6797 * @lkups_cnt: number of protocols
6798 * @tun_type: tunnel type from the match criteria
6799 * @pkt: dummy packet to fill according to filter match criteria
6800 * @pkt_len: packet length of dummy packet
6801 * @offsets: pointer to receive the pointer to the offsets for the packet
6804 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6805 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6807 const struct ice_dummy_pkt_offsets **offsets)
6809 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6813 for (i = 0; i < lkups_cnt; i++) {
6814 if (lkups[i].type == ICE_UDP_ILOS)
6816 else if (lkups[i].type == ICE_TCP_IL)
6818 else if (lkups[i].type == ICE_IPV6_OFOS)
6820 else if (lkups[i].type == ICE_VLAN_OFOS)
6822 else if (lkups[i].type == ICE_IPV4_OFOS &&
6823 lkups[i].h_u.ipv4_hdr.protocol ==
6824 ICE_IPV4_NVGRE_PROTO_ID &&
6825 lkups[i].m_u.ipv4_hdr.protocol ==
6828 else if (lkups[i].type == ICE_PPPOE &&
6829 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6830 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6831 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6834 else if (lkups[i].type == ICE_ETYPE_OL &&
6835 lkups[i].h_u.ethertype.ethtype_id ==
6836 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6837 lkups[i].m_u.ethertype.ethtype_id ==
6840 else if (lkups[i].type == ICE_IPV4_IL &&
6841 lkups[i].h_u.ipv4_hdr.protocol ==
6843 lkups[i].m_u.ipv4_hdr.protocol ==
6848 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6849 *pkt = dummy_ipv4_esp_pkt;
6850 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6851 *offsets = dummy_ipv4_esp_packet_offsets;
6855 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6856 *pkt = dummy_ipv6_esp_pkt;
6857 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6858 *offsets = dummy_ipv6_esp_packet_offsets;
6862 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6863 *pkt = dummy_ipv4_ah_pkt;
6864 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6865 *offsets = dummy_ipv4_ah_packet_offsets;
6869 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6870 *pkt = dummy_ipv6_ah_pkt;
6871 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6872 *offsets = dummy_ipv6_ah_packet_offsets;
6876 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6877 *pkt = dummy_ipv4_nat_pkt;
6878 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6879 *offsets = dummy_ipv4_nat_packet_offsets;
6883 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6884 *pkt = dummy_ipv6_nat_pkt;
6885 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6886 *offsets = dummy_ipv6_nat_packet_offsets;
6890 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6891 *pkt = dummy_ipv4_l2tpv3_pkt;
6892 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6893 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6897 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6898 *pkt = dummy_ipv6_l2tpv3_pkt;
6899 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6900 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6904 if (tun_type == ICE_SW_TUN_GTP) {
6905 *pkt = dummy_udp_gtp_packet;
6906 *pkt_len = sizeof(dummy_udp_gtp_packet);
6907 *offsets = dummy_udp_gtp_packet_offsets;
6911 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6912 *pkt = dummy_pppoe_ipv6_packet;
6913 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6914 *offsets = dummy_pppoe_packet_offsets;
6916 } else if (tun_type == ICE_SW_TUN_PPPOE ||
6917 tun_type == ICE_SW_TUN_PPPOE_PAY) {
6918 *pkt = dummy_pppoe_ipv4_packet;
6919 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6920 *offsets = dummy_pppoe_packet_offsets;
6924 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
6925 *pkt = dummy_pppoe_ipv4_packet;
6926 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6927 *offsets = dummy_pppoe_packet_ipv4_offsets;
6931 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
6932 *pkt = dummy_pppoe_ipv4_tcp_packet;
6933 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
6934 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
6938 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
6939 *pkt = dummy_pppoe_ipv4_udp_packet;
6940 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
6941 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
6945 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
6946 *pkt = dummy_pppoe_ipv6_packet;
6947 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6948 *offsets = dummy_pppoe_packet_ipv6_offsets;
6952 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
6953 *pkt = dummy_pppoe_ipv6_tcp_packet;
6954 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
6955 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
6959 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
6960 *pkt = dummy_pppoe_ipv6_udp_packet;
6961 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
6962 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
6966 if (tun_type == ICE_SW_IPV4_TCP) {
6967 *pkt = dummy_tcp_packet;
6968 *pkt_len = sizeof(dummy_tcp_packet);
6969 *offsets = dummy_tcp_packet_offsets;
6973 if (tun_type == ICE_SW_IPV4_UDP) {
6974 *pkt = dummy_udp_packet;
6975 *pkt_len = sizeof(dummy_udp_packet);
6976 *offsets = dummy_udp_packet_offsets;
6980 if (tun_type == ICE_SW_IPV6_TCP) {
6981 *pkt = dummy_tcp_ipv6_packet;
6982 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6983 *offsets = dummy_tcp_ipv6_packet_offsets;
6987 if (tun_type == ICE_SW_IPV6_UDP) {
6988 *pkt = dummy_udp_ipv6_packet;
6989 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6990 *offsets = dummy_udp_ipv6_packet_offsets;
6994 if (tun_type == ICE_ALL_TUNNELS) {
6995 *pkt = dummy_gre_udp_packet;
6996 *pkt_len = sizeof(dummy_gre_udp_packet);
6997 *offsets = dummy_gre_udp_packet_offsets;
7001 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7003 *pkt = dummy_gre_tcp_packet;
7004 *pkt_len = sizeof(dummy_gre_tcp_packet);
7005 *offsets = dummy_gre_tcp_packet_offsets;
7009 *pkt = dummy_gre_udp_packet;
7010 *pkt_len = sizeof(dummy_gre_udp_packet);
7011 *offsets = dummy_gre_udp_packet_offsets;
7015 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7016 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7017 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7018 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7020 *pkt = dummy_udp_tun_tcp_packet;
7021 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7022 *offsets = dummy_udp_tun_tcp_packet_offsets;
7026 *pkt = dummy_udp_tun_udp_packet;
7027 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7028 *offsets = dummy_udp_tun_udp_packet_offsets;
7034 *pkt = dummy_vlan_udp_packet;
7035 *pkt_len = sizeof(dummy_vlan_udp_packet);
7036 *offsets = dummy_vlan_udp_packet_offsets;
7039 *pkt = dummy_udp_packet;
7040 *pkt_len = sizeof(dummy_udp_packet);
7041 *offsets = dummy_udp_packet_offsets;
7043 } else if (udp && ipv6) {
7045 *pkt = dummy_vlan_udp_ipv6_packet;
7046 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7047 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7050 *pkt = dummy_udp_ipv6_packet;
7051 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7052 *offsets = dummy_udp_ipv6_packet_offsets;
7054 } else if ((tcp && ipv6) || ipv6) {
7056 *pkt = dummy_vlan_tcp_ipv6_packet;
7057 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7058 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7061 *pkt = dummy_tcp_ipv6_packet;
7062 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7063 *offsets = dummy_tcp_ipv6_packet_offsets;
7068 *pkt = dummy_vlan_tcp_packet;
7069 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7070 *offsets = dummy_vlan_tcp_packet_offsets;
7072 *pkt = dummy_tcp_packet;
7073 *pkt_len = sizeof(dummy_tcp_packet);
7074 *offsets = dummy_tcp_packet_offsets;
7079 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7081 * @lkups: lookup elements or match criteria for the advanced recipe, one
7082 * structure per protocol header
7083 * @lkups_cnt: number of protocols
7084 * @s_rule: stores rule information from the match criteria
7085 * @dummy_pkt: dummy packet to fill according to filter match criteria
7086 * @pkt_len: packet length of dummy packet
7087 * @offsets: offset info for the dummy packet
7089 static enum ice_status
7090 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7091 struct ice_aqc_sw_rules_elem *s_rule,
7092 const u8 *dummy_pkt, u16 pkt_len,
7093 const struct ice_dummy_pkt_offsets *offsets)
7098 /* Start with a packet with a pre-defined/dummy content. Then, fill
7099 * in the header values to be looked up or matched.
7101 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7103 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7105 for (i = 0; i < lkups_cnt; i++) {
7106 enum ice_protocol_type type;
7107 u16 offset = 0, len = 0, j;
7110 /* find the start of this layer; it should be found since this
7111 * was already checked when search for the dummy packet
7113 type = lkups[i].type;
7114 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7115 if (type == offsets[j].type) {
7116 offset = offsets[j].offset;
7121 /* this should never happen in a correct calling sequence */
7123 return ICE_ERR_PARAM;
7125 switch (lkups[i].type) {
7128 len = sizeof(struct ice_ether_hdr);
7131 len = sizeof(struct ice_ethtype_hdr);
7134 len = sizeof(struct ice_vlan_hdr);
7138 len = sizeof(struct ice_ipv4_hdr);
7142 len = sizeof(struct ice_ipv6_hdr);
7147 len = sizeof(struct ice_l4_hdr);
7150 len = sizeof(struct ice_sctp_hdr);
7153 len = sizeof(struct ice_nvgre);
7158 len = sizeof(struct ice_udp_tnl_hdr);
7162 len = sizeof(struct ice_udp_gtp_hdr);
7165 len = sizeof(struct ice_pppoe_hdr);
7168 len = sizeof(struct ice_esp_hdr);
7171 len = sizeof(struct ice_nat_t_hdr);
7174 len = sizeof(struct ice_ah_hdr);
7177 len = sizeof(struct ice_l2tpv3_sess_hdr);
7180 return ICE_ERR_PARAM;
7183 /* the length should be a word multiple */
7184 if (len % ICE_BYTES_PER_WORD)
7187 /* We have the offset to the header start, the length, the
7188 * caller's header values and mask. Use this information to
7189 * copy the data into the dummy packet appropriately based on
7190 * the mask. Note that we need to only write the bits as
7191 * indicated by the mask to make sure we don't improperly write
7192 * over any significant packet data.
7194 for (j = 0; j < len / sizeof(u16); j++)
7195 if (((u16 *)&lkups[i].m_u)[j])
7196 ((u16 *)(pkt + offset))[j] =
7197 (((u16 *)(pkt + offset))[j] &
7198 ~((u16 *)&lkups[i].m_u)[j]) |
7199 (((u16 *)&lkups[i].h_u)[j] &
7200 ((u16 *)&lkups[i].m_u)[j]);
7203 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7209 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7210 * @hw: pointer to the hardware structure
7211 * @tun_type: tunnel type
7212 * @pkt: dummy packet to fill in
7213 * @offsets: offset info for the dummy packet
7215 static enum ice_status
7216 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7217 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7222 case ICE_SW_TUN_AND_NON_TUN:
7223 case ICE_SW_TUN_VXLAN_GPE:
7224 case ICE_SW_TUN_VXLAN:
7225 case ICE_SW_TUN_VXLAN_VLAN:
7226 case ICE_SW_TUN_UDP:
7227 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7231 case ICE_SW_TUN_GENEVE:
7232 case ICE_SW_TUN_GENEVE_VLAN:
7233 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7238 /* Nothing needs to be done for this tunnel type */
7242 /* Find the outer UDP protocol header and insert the port number */
7243 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7244 if (offsets[i].type == ICE_UDP_OF) {
7245 struct ice_l4_hdr *hdr;
7248 offset = offsets[i].offset;
7249 hdr = (struct ice_l4_hdr *)&pkt[offset];
7250 hdr->dst_port = CPU_TO_BE16(open_port);
7260 * ice_find_adv_rule_entry - Search a rule entry
7261 * @hw: pointer to the hardware structure
7262 * @lkups: lookup elements or match criteria for the advanced recipe, one
7263 * structure per protocol header
7264 * @lkups_cnt: number of protocols
7265 * @recp_id: recipe ID for which we are finding the rule
7266 * @rinfo: other information regarding the rule e.g. priority and action info
7268 * Helper function to search for a given advance rule entry
7269 * Returns pointer to entry storing the rule if found
7271 static struct ice_adv_fltr_mgmt_list_entry *
7272 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7273 u16 lkups_cnt, u16 recp_id,
7274 struct ice_adv_rule_info *rinfo)
7276 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7277 struct ice_switch_info *sw = hw->switch_info;
7280 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7281 ice_adv_fltr_mgmt_list_entry, list_entry) {
7282 bool lkups_matched = true;
7284 if (lkups_cnt != list_itr->lkups_cnt)
7286 for (i = 0; i < list_itr->lkups_cnt; i++)
7287 if (memcmp(&list_itr->lkups[i], &lkups[i],
7289 lkups_matched = false;
7292 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7293 rinfo->tun_type == list_itr->rule_info.tun_type &&
7301 * ice_adv_add_update_vsi_list
7302 * @hw: pointer to the hardware structure
7303 * @m_entry: pointer to current adv filter management list entry
7304 * @cur_fltr: filter information from the book keeping entry
7305 * @new_fltr: filter information with the new VSI to be added
7307 * Call AQ command to add or update previously created VSI list with new VSI.
7309 * Helper function to do book keeping associated with adding filter information
7310 * The algorithm to do the booking keeping is described below :
7311 * When a VSI needs to subscribe to a given advanced filter
7312 * if only one VSI has been added till now
7313 * Allocate a new VSI list and add two VSIs
7314 * to this list using switch rule command
7315 * Update the previously created switch rule with the
7316 * newly created VSI list ID
7317 * if a VSI list was previously created
7318 * Add the new VSI to the previously created VSI list set
7319 * using the update switch rule command
7321 static enum ice_status
7322 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7323 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7324 struct ice_adv_rule_info *cur_fltr,
7325 struct ice_adv_rule_info *new_fltr)
7327 enum ice_status status;
7328 u16 vsi_list_id = 0;
7330 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7331 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7332 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7333 return ICE_ERR_NOT_IMPL;
7335 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7336 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7337 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7338 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7339 return ICE_ERR_NOT_IMPL;
7341 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7342 /* Only one entry existed in the mapping and it was not already
7343 * a part of a VSI list. So, create a VSI list with the old and
7346 struct ice_fltr_info tmp_fltr;
7347 u16 vsi_handle_arr[2];
7349 /* A rule already exists with the new VSI being added */
7350 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7351 new_fltr->sw_act.fwd_id.hw_vsi_id)
7352 return ICE_ERR_ALREADY_EXISTS;
7354 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7355 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7356 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7362 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7363 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7364 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7365 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7366 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7367 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7369 /* Update the previous switch rule of "forward to VSI" to
7372 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7376 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7377 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7378 m_entry->vsi_list_info =
7379 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7382 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7384 if (!m_entry->vsi_list_info)
7387 /* A rule already exists with the new VSI being added */
7388 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7391 /* Update the previously created VSI list set with
7392 * the new VSI ID passed in
7394 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7396 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7398 ice_aqc_opc_update_sw_rules,
7400 /* update VSI list mapping info with new VSI ID */
7402 ice_set_bit(vsi_handle,
7403 m_entry->vsi_list_info->vsi_map);
7406 m_entry->vsi_count++;
7411 * ice_add_adv_rule - helper function to create an advanced switch rule
7412 * @hw: pointer to the hardware structure
7413 * @lkups: information on the words that needs to be looked up. All words
7414 * together makes one recipe
7415 * @lkups_cnt: num of entries in the lkups array
7416 * @rinfo: other information related to the rule that needs to be programmed
7417 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7418 * ignored is case of error.
7420 * This function can program only 1 rule at a time. The lkups is used to
7421 * describe the all the words that forms the "lookup" portion of the recipe.
7422 * These words can span multiple protocols. Callers to this function need to
7423 * pass in a list of protocol headers with lookup information along and mask
7424 * that determines which words are valid from the given protocol header.
7425 * rinfo describes other information related to this rule such as forwarding
7426 * IDs, priority of this rule, etc.
7429 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7430 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7431 struct ice_rule_query_data *added_entry)
7433 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7434 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7435 const struct ice_dummy_pkt_offsets *pkt_offsets;
7436 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7437 struct LIST_HEAD_TYPE *rule_head;
7438 struct ice_switch_info *sw;
7439 enum ice_status status;
7440 const u8 *pkt = NULL;
7446 /* Initialize profile to result index bitmap */
7447 if (!hw->switch_info->prof_res_bm_init) {
7448 hw->switch_info->prof_res_bm_init = 1;
7449 ice_init_prof_result_bm(hw);
7452 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7453 if (!prof_rule && !lkups_cnt)
7454 return ICE_ERR_PARAM;
7456 /* get # of words we need to match */
7458 for (i = 0; i < lkups_cnt; i++) {
7461 ptr = (u16 *)&lkups[i].m_u;
7462 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7468 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7469 return ICE_ERR_PARAM;
7471 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7472 return ICE_ERR_PARAM;
7475 /* make sure that we can locate a dummy packet */
7476 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7479 status = ICE_ERR_PARAM;
7480 goto err_ice_add_adv_rule;
7483 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7484 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7485 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7486 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7489 vsi_handle = rinfo->sw_act.vsi_handle;
7490 if (!ice_is_vsi_valid(hw, vsi_handle))
7491 return ICE_ERR_PARAM;
7493 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7494 rinfo->sw_act.fwd_id.hw_vsi_id =
7495 ice_get_hw_vsi_num(hw, vsi_handle);
7496 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7497 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7499 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7502 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7504 /* we have to add VSI to VSI_LIST and increment vsi_count.
7505 * Also Update VSI list so that we can change forwarding rule
7506 * if the rule already exists, we will check if it exists with
7507 * same vsi_id, if not then add it to the VSI list if it already
7508 * exists if not then create a VSI list and add the existing VSI
7509 * ID and the new VSI ID to the list
7510 * We will add that VSI to the list
7512 status = ice_adv_add_update_vsi_list(hw, m_entry,
7513 &m_entry->rule_info,
7516 added_entry->rid = rid;
7517 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7518 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7522 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7523 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7525 return ICE_ERR_NO_MEMORY;
7526 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7527 switch (rinfo->sw_act.fltr_act) {
7528 case ICE_FWD_TO_VSI:
7529 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7530 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7531 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7534 act |= ICE_SINGLE_ACT_TO_Q;
7535 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7536 ICE_SINGLE_ACT_Q_INDEX_M;
7538 case ICE_FWD_TO_QGRP:
7539 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7540 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7541 act |= ICE_SINGLE_ACT_TO_Q;
7542 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7543 ICE_SINGLE_ACT_Q_INDEX_M;
7544 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7545 ICE_SINGLE_ACT_Q_REGION_M;
7547 case ICE_DROP_PACKET:
7548 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7549 ICE_SINGLE_ACT_VALID_BIT;
7552 status = ICE_ERR_CFG;
7553 goto err_ice_add_adv_rule;
7556 /* set the rule LOOKUP type based on caller specified 'RX'
7557 * instead of hardcoding it to be either LOOKUP_TX/RX
7559 * for 'RX' set the source to be the port number
7560 * for 'TX' set the source to be the source HW VSI number (determined
7564 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7565 s_rule->pdata.lkup_tx_rx.src =
7566 CPU_TO_LE16(hw->port_info->lport);
7568 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7569 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7572 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7573 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7575 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7576 pkt_len, pkt_offsets);
7578 goto err_ice_add_adv_rule;
7580 if (rinfo->tun_type != ICE_NON_TUN &&
7581 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7582 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7583 s_rule->pdata.lkup_tx_rx.hdr,
7586 goto err_ice_add_adv_rule;
7589 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7590 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7593 goto err_ice_add_adv_rule;
7594 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7595 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7597 status = ICE_ERR_NO_MEMORY;
7598 goto err_ice_add_adv_rule;
7601 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7602 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7603 ICE_NONDMA_TO_NONDMA);
7604 if (!adv_fltr->lkups && !prof_rule) {
7605 status = ICE_ERR_NO_MEMORY;
7606 goto err_ice_add_adv_rule;
7609 adv_fltr->lkups_cnt = lkups_cnt;
7610 adv_fltr->rule_info = *rinfo;
7611 adv_fltr->rule_info.fltr_rule_id =
7612 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7613 sw = hw->switch_info;
7614 sw->recp_list[rid].adv_rule = true;
7615 rule_head = &sw->recp_list[rid].filt_rules;
7617 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7618 adv_fltr->vsi_count = 1;
7620 /* Add rule entry to book keeping list */
7621 LIST_ADD(&adv_fltr->list_entry, rule_head);
7623 added_entry->rid = rid;
7624 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7625 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7627 err_ice_add_adv_rule:
7628 if (status && adv_fltr) {
7629 ice_free(hw, adv_fltr->lkups);
7630 ice_free(hw, adv_fltr);
7633 ice_free(hw, s_rule);
7639 * ice_adv_rem_update_vsi_list
7640 * @hw: pointer to the hardware structure
7641 * @vsi_handle: VSI handle of the VSI to remove
7642 * @fm_list: filter management entry for which the VSI list management needs to
7645 static enum ice_status
7646 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7647 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7649 struct ice_vsi_list_map_info *vsi_list_info;
7650 enum ice_sw_lkup_type lkup_type;
7651 enum ice_status status;
7654 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7655 fm_list->vsi_count == 0)
7656 return ICE_ERR_PARAM;
7658 /* A rule with the VSI being removed does not exist */
7659 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7660 return ICE_ERR_DOES_NOT_EXIST;
7662 lkup_type = ICE_SW_LKUP_LAST;
7663 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7664 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7665 ice_aqc_opc_update_sw_rules,
7670 fm_list->vsi_count--;
7671 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7672 vsi_list_info = fm_list->vsi_list_info;
7673 if (fm_list->vsi_count == 1) {
7674 struct ice_fltr_info tmp_fltr;
7677 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7679 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7680 return ICE_ERR_OUT_OF_RANGE;
7682 /* Make sure VSI list is empty before removing it below */
7683 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7685 ice_aqc_opc_update_sw_rules,
7690 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7691 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7692 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7693 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7694 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7695 tmp_fltr.fwd_id.hw_vsi_id =
7696 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7697 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7698 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7699 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7701 /* Update the previous switch rule of "MAC forward to VSI" to
7702 * "MAC fwd to VSI list"
7704 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7706 ice_debug(hw, ICE_DBG_SW,
7707 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7708 tmp_fltr.fwd_id.hw_vsi_id, status);
7711 fm_list->vsi_list_info->ref_cnt--;
7713 /* Remove the VSI list since it is no longer used */
7714 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7716 ice_debug(hw, ICE_DBG_SW,
7717 "Failed to remove VSI list %d, error %d\n",
7718 vsi_list_id, status);
7722 LIST_DEL(&vsi_list_info->list_entry);
7723 ice_free(hw, vsi_list_info);
7724 fm_list->vsi_list_info = NULL;
7731 * ice_rem_adv_rule - removes existing advanced switch rule
7732 * @hw: pointer to the hardware structure
7733 * @lkups: information on the words that needs to be looked up. All words
7734 * together makes one recipe
7735 * @lkups_cnt: num of entries in the lkups array
7736 * @rinfo: Its the pointer to the rule information for the rule
7738 * This function can be used to remove 1 rule at a time. The lkups is
7739 * used to describe all the words that forms the "lookup" portion of the
7740 * rule. These words can span multiple protocols. Callers to this function
7741 * need to pass in a list of protocol headers with lookup information along
7742 * and mask that determines which words are valid from the given protocol
7743 * header. rinfo describes other information related to this rule such as
7744 * forwarding IDs, priority of this rule, etc.
7747 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7748 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7750 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7751 struct ice_prot_lkup_ext lkup_exts;
7752 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7753 enum ice_status status = ICE_SUCCESS;
7754 bool remove_rule = false;
7755 u16 i, rid, vsi_handle;
7757 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7758 for (i = 0; i < lkups_cnt; i++) {
7761 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7764 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7769 /* Create any special protocol/offset pairs, such as looking at tunnel
7770 * bits by extracting metadata
7772 status = ice_add_special_words(rinfo, &lkup_exts);
7776 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7777 /* If did not find a recipe that match the existing criteria */
7778 if (rid == ICE_MAX_NUM_RECIPES)
7779 return ICE_ERR_PARAM;
7781 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7782 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7783 /* the rule is already removed */
7786 ice_acquire_lock(rule_lock);
7787 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7789 } else if (list_elem->vsi_count > 1) {
7790 remove_rule = false;
7791 vsi_handle = rinfo->sw_act.vsi_handle;
7792 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7794 vsi_handle = rinfo->sw_act.vsi_handle;
7795 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7797 ice_release_lock(rule_lock);
7800 if (list_elem->vsi_count == 0)
7803 ice_release_lock(rule_lock);
7805 struct ice_aqc_sw_rules_elem *s_rule;
7808 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7810 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7813 return ICE_ERR_NO_MEMORY;
7814 s_rule->pdata.lkup_tx_rx.act = 0;
7815 s_rule->pdata.lkup_tx_rx.index =
7816 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7817 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7818 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7820 ice_aqc_opc_remove_sw_rules, NULL);
7821 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
7822 struct ice_switch_info *sw = hw->switch_info;
7824 ice_acquire_lock(rule_lock);
7825 LIST_DEL(&list_elem->list_entry);
7826 ice_free(hw, list_elem->lkups);
7827 ice_free(hw, list_elem);
7828 ice_release_lock(rule_lock);
7829 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
7830 sw->recp_list[rid].adv_rule = false;
7832 ice_free(hw, s_rule);
7838 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7839 * @hw: pointer to the hardware structure
7840 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7842 * This function is used to remove 1 rule at a time. The removal is based on
7843 * the remove_entry parameter. This function will remove rule for a given
7844 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7847 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7848 struct ice_rule_query_data *remove_entry)
7850 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7851 struct LIST_HEAD_TYPE *list_head;
7852 struct ice_adv_rule_info rinfo;
7853 struct ice_switch_info *sw;
7855 sw = hw->switch_info;
7856 if (!sw->recp_list[remove_entry->rid].recp_created)
7857 return ICE_ERR_PARAM;
7858 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7859 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7861 if (list_itr->rule_info.fltr_rule_id ==
7862 remove_entry->rule_id) {
7863 rinfo = list_itr->rule_info;
7864 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7865 return ice_rem_adv_rule(hw, list_itr->lkups,
7866 list_itr->lkups_cnt, &rinfo);
7869 /* either list is empty or unable to find rule */
7870 return ICE_ERR_DOES_NOT_EXIST;
7874 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7876 * @hw: pointer to the hardware structure
7877 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7879 * This function is used to remove all the rules for a given VSI and as soon
7880 * as removing a rule fails, it will return immediately with the error code,
7881 * else it will return ICE_SUCCESS
7883 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7885 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7886 struct ice_vsi_list_map_info *map_info;
7887 struct LIST_HEAD_TYPE *list_head;
7888 struct ice_adv_rule_info rinfo;
7889 struct ice_switch_info *sw;
7890 enum ice_status status;
7891 u16 vsi_list_id = 0;
7894 sw = hw->switch_info;
7895 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7896 if (!sw->recp_list[rid].recp_created)
7898 if (!sw->recp_list[rid].adv_rule)
7900 list_head = &sw->recp_list[rid].filt_rules;
7902 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7903 ice_adv_fltr_mgmt_list_entry, list_entry) {
7904 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7909 rinfo = list_itr->rule_info;
7910 rinfo.sw_act.vsi_handle = vsi_handle;
7911 status = ice_rem_adv_rule(hw, list_itr->lkups,
7912 list_itr->lkups_cnt, &rinfo);
7922 * ice_replay_fltr - Replay all the filters stored by a specific list head
7923 * @hw: pointer to the hardware structure
7924 * @list_head: list for which filters needs to be replayed
7925 * @recp_id: Recipe ID for which rules need to be replayed
7927 static enum ice_status
7928 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7930 struct ice_fltr_mgmt_list_entry *itr;
7931 enum ice_status status = ICE_SUCCESS;
7932 struct ice_sw_recipe *recp_list;
7933 u8 lport = hw->port_info->lport;
7934 struct LIST_HEAD_TYPE l_head;
7936 if (LIST_EMPTY(list_head))
7939 recp_list = &hw->switch_info->recp_list[recp_id];
7940 /* Move entries from the given list_head to a temporary l_head so that
7941 * they can be replayed. Otherwise when trying to re-add the same
7942 * filter, the function will return already exists
7944 LIST_REPLACE_INIT(list_head, &l_head);
7946 /* Mark the given list_head empty by reinitializing it so filters
7947 * could be added again by *handler
7949 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7951 struct ice_fltr_list_entry f_entry;
7953 f_entry.fltr_info = itr->fltr_info;
7954 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7955 status = ice_add_rule_internal(hw, recp_list, lport,
7957 if (status != ICE_SUCCESS)
7962 /* Add a filter per VSI separately */
7967 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7969 if (!ice_is_vsi_valid(hw, vsi_handle))
7972 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7973 f_entry.fltr_info.vsi_handle = vsi_handle;
7974 f_entry.fltr_info.fwd_id.hw_vsi_id =
7975 ice_get_hw_vsi_num(hw, vsi_handle);
7976 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7977 if (recp_id == ICE_SW_LKUP_VLAN)
7978 status = ice_add_vlan_internal(hw, recp_list,
7981 status = ice_add_rule_internal(hw, recp_list,
7984 if (status != ICE_SUCCESS)
7989 /* Clear the filter management list */
7990 ice_rem_sw_rule_info(hw, &l_head);
7995 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7996 * @hw: pointer to the hardware structure
7998 * NOTE: This function does not clean up partially added filters on error.
7999 * It is up to caller of the function to issue a reset or fail early.
8001 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8003 struct ice_switch_info *sw = hw->switch_info;
8004 enum ice_status status = ICE_SUCCESS;
8007 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8008 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8010 status = ice_replay_fltr(hw, i, head);
8011 if (status != ICE_SUCCESS)
8018 * ice_replay_vsi_fltr - Replay filters for requested VSI
8019 * @hw: pointer to the hardware structure
8020 * @pi: pointer to port information structure
8021 * @sw: pointer to switch info struct for which function replays filters
8022 * @vsi_handle: driver VSI handle
8023 * @recp_id: Recipe ID for which rules need to be replayed
8024 * @list_head: list for which filters need to be replayed
8026 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8027 * It is required to pass valid VSI handle.
8029 static enum ice_status
8030 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8031 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8032 struct LIST_HEAD_TYPE *list_head)
8034 struct ice_fltr_mgmt_list_entry *itr;
8035 enum ice_status status = ICE_SUCCESS;
8036 struct ice_sw_recipe *recp_list;
8039 if (LIST_EMPTY(list_head))
8041 recp_list = &sw->recp_list[recp_id];
8042 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8044 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8046 struct ice_fltr_list_entry f_entry;
8048 f_entry.fltr_info = itr->fltr_info;
8049 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8050 itr->fltr_info.vsi_handle == vsi_handle) {
8051 /* update the src in case it is VSI num */
8052 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8053 f_entry.fltr_info.src = hw_vsi_id;
8054 status = ice_add_rule_internal(hw, recp_list,
8057 if (status != ICE_SUCCESS)
8061 if (!itr->vsi_list_info ||
8062 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8064 /* Clearing it so that the logic can add it back */
8065 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8066 f_entry.fltr_info.vsi_handle = vsi_handle;
8067 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8068 /* update the src in case it is VSI num */
8069 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8070 f_entry.fltr_info.src = hw_vsi_id;
8071 if (recp_id == ICE_SW_LKUP_VLAN)
8072 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8074 status = ice_add_rule_internal(hw, recp_list,
8077 if (status != ICE_SUCCESS)
8085 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8086 * @hw: pointer to the hardware structure
8087 * @vsi_handle: driver VSI handle
8088 * @list_head: list for which filters need to be replayed
8090 * Replay the advanced rule for the given VSI.
8092 static enum ice_status
8093 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8094 struct LIST_HEAD_TYPE *list_head)
8096 struct ice_rule_query_data added_entry = { 0 };
8097 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8098 enum ice_status status = ICE_SUCCESS;
8100 if (LIST_EMPTY(list_head))
8102 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8104 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8105 u16 lk_cnt = adv_fltr->lkups_cnt;
8107 if (vsi_handle != rinfo->sw_act.vsi_handle)
8109 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8118 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8119 * @hw: pointer to the hardware structure
8120 * @pi: pointer to port information structure
8121 * @vsi_handle: driver VSI handle
8123 * Replays filters for requested VSI via vsi_handle.
8126 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8129 struct ice_switch_info *sw = hw->switch_info;
8130 enum ice_status status;
8133 /* Update the recipes that were created */
8134 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8135 struct LIST_HEAD_TYPE *head;
8137 head = &sw->recp_list[i].filt_replay_rules;
8138 if (!sw->recp_list[i].adv_rule)
8139 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8142 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8143 if (status != ICE_SUCCESS)
8151 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8152 * @hw: pointer to the HW struct
8153 * @sw: pointer to switch info struct for which function removes filters
8155 * Deletes the filter replay rules for given switch
8157 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8164 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8165 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8166 struct LIST_HEAD_TYPE *l_head;
8168 l_head = &sw->recp_list[i].filt_replay_rules;
8169 if (!sw->recp_list[i].adv_rule)
8170 ice_rem_sw_rule_info(hw, l_head);
8172 ice_rem_adv_rule_info(hw, l_head);
8178 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8179 * @hw: pointer to the HW struct
8181 * Deletes the filter replay rules.
8183 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8185 ice_rm_sw_replay_rule_info(hw, hw->switch_info);