1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x08, 0x00, 0x00,
462 0x00, 0x00, /* 2 bytes for 4 byte alignment */
465 /* C-tag (802.1Q): IPv6 + UDP */
466 static const struct ice_dummy_pkt_offsets
467 dummy_vlan_udp_ipv6_packet_offsets[] = {
469 { ICE_ETYPE_OL, 12 },
470 { ICE_VLAN_OFOS, 14 },
471 { ICE_IPV6_OFOS, 18 },
472 { ICE_UDP_ILOS, 58 },
473 { ICE_PROTOCOL_LAST, 0 },
476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
477 static const u8 dummy_vlan_udp_ipv6_packet[] = {
478 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
482 0x81, 0x00, /* ICE_ETYPE_OL 12 */
484 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
487 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
498 0x00, 0x08, 0x00, 0x00,
500 0x00, 0x00, /* 2 bytes for 4 byte alignment */
503 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
505 { ICE_IPV4_OFOS, 14 },
508 { ICE_PROTOCOL_LAST, 0 },
511 static const u8 dummy_udp_gtp_packet[] = {
512 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
517 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
518 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x11, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
524 0x00, 0x1c, 0x00, 0x00,
526 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
527 0x00, 0x00, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x85,
530 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
531 0x00, 0x00, 0x00, 0x00,
534 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
536 { ICE_ETYPE_OL, 12 },
537 { ICE_VLAN_OFOS, 14},
539 { ICE_PROTOCOL_LAST, 0 },
542 static const u8 dummy_pppoe_ipv4_packet[] = {
543 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x81, 0x00, /* ICE_ETYPE_OL 12 */
549 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
551 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
554 0x00, 0x21, /* PPP Link Layer 24 */
556 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
559 0x00, 0x00, 0x00, 0x00,
560 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
565 static const u8 dummy_pppoe_ipv6_packet[] = {
566 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
567 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00,
570 0x81, 0x00, /* ICE_ETYPE_OL 12 */
572 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
574 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
577 0x00, 0x57, /* PPP Link Layer 24 */
579 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
593 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
595 { ICE_IPV4_OFOS, 14 },
597 { ICE_PROTOCOL_LAST, 0 },
600 static const u8 dummy_ipv4_esp_pkt[] = {
601 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
602 0x00, 0x00, 0x00, 0x00,
603 0x00, 0x00, 0x00, 0x00,
606 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
607 0x00, 0x00, 0x40, 0x00,
608 0x40, 0x32, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
617 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
619 { ICE_IPV6_OFOS, 14 },
621 { ICE_PROTOCOL_LAST, 0 },
624 static const u8 dummy_ipv6_esp_pkt[] = {
625 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
626 0x00, 0x00, 0x00, 0x00,
627 0x00, 0x00, 0x00, 0x00,
630 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
631 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
632 0x00, 0x00, 0x00, 0x00,
633 0x00, 0x00, 0x00, 0x00,
634 0x00, 0x00, 0x00, 0x00,
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
646 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
648 { ICE_IPV4_OFOS, 14 },
650 { ICE_PROTOCOL_LAST, 0 },
653 static const u8 dummy_ipv4_ah_pkt[] = {
654 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x00,
659 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
660 0x00, 0x00, 0x40, 0x00,
661 0x40, 0x33, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
671 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
673 { ICE_IPV6_OFOS, 14 },
675 { ICE_PROTOCOL_LAST, 0 },
678 static const u8 dummy_ipv6_ah_pkt[] = {
679 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
684 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
685 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
686 0x00, 0x00, 0x00, 0x00,
687 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
696 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
701 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
703 { ICE_IPV4_OFOS, 14 },
704 { ICE_UDP_ILOS, 34 },
706 { ICE_PROTOCOL_LAST, 0 },
709 static const u8 dummy_ipv4_nat_pkt[] = {
710 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
715 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
716 0x00, 0x00, 0x40, 0x00,
717 0x40, 0x11, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
722 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
729 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
731 { ICE_IPV6_OFOS, 14 },
732 { ICE_UDP_ILOS, 54 },
734 { ICE_PROTOCOL_LAST, 0 },
737 static const u8 dummy_ipv6_nat_pkt[] = {
738 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
739 0x00, 0x00, 0x00, 0x00,
740 0x00, 0x00, 0x00, 0x00,
743 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
744 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00,
754 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
755 0x00, 0x00, 0x00, 0x00,
757 0x00, 0x00, 0x00, 0x00,
758 0x00, 0x00, 0x00, 0x00,
759 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
763 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
765 { ICE_IPV4_OFOS, 14 },
767 { ICE_PROTOCOL_LAST, 0 },
770 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
771 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
776 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
777 0x00, 0x00, 0x40, 0x00,
778 0x40, 0x73, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
783 0x00, 0x00, 0x00, 0x00,
784 0x00, 0x00, 0x00, 0x00,
785 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
788 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
790 { ICE_IPV6_OFOS, 14 },
792 { ICE_PROTOCOL_LAST, 0 },
795 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
796 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
797 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00,
801 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
802 0x00, 0x0c, 0x73, 0x40,
803 0x00, 0x00, 0x00, 0x00,
804 0x00, 0x00, 0x00, 0x00,
805 0x00, 0x00, 0x00, 0x00,
806 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
809 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
813 0x00, 0x00, 0x00, 0x00,
814 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
818 /* this is a recipe to profile association bitmap */
819 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
820 ICE_MAX_NUM_PROFILES);
822 /* this is a profile to recipe association bitmap */
823 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
824 ICE_MAX_NUM_RECIPES);
826 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
829 * ice_collect_result_idx - copy result index values
830 * @buf: buffer that contains the result index
831 * @recp: the recipe struct to copy data into
833 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
834 struct ice_sw_recipe *recp)
836 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
837 ice_set_bit(buf->content.result_indx &
838 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
842 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
843 * @hw: pointer to hardware structure
844 * @recps: struct that we need to populate
845 * @rid: recipe ID that we are populating
846 * @refresh_required: true if we should get recipe to profile mapping from FW
848 * This function is used to populate all the necessary entries into our
849 * bookkeeping so that we have a current list of all the recipes that are
850 * programmed in the firmware.
852 static enum ice_status
853 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
854 bool *refresh_required)
856 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
857 struct ice_aqc_recipe_data_elem *tmp;
858 u16 num_recps = ICE_MAX_NUM_RECIPES;
859 struct ice_prot_lkup_ext *lkup_exts;
860 enum ice_status status;
864 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
866 /* we need a buffer big enough to accommodate all the recipes */
867 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
868 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
870 return ICE_ERR_NO_MEMORY;
872 tmp[0].recipe_indx = rid;
873 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
874 /* non-zero status meaning recipe doesn't exist */
878 /* Get recipe to profile map so that we can get the fv from lkups that
879 * we read for a recipe from FW. Since we want to minimize the number of
880 * times we make this FW call, just make one call and cache the copy
881 * until a new recipe is added. This operation is only required the
882 * first time to get the changes from FW. Then to search existing
883 * entries we don't need to update the cache again until another recipe
886 if (*refresh_required) {
887 ice_get_recp_to_prof_map(hw);
888 *refresh_required = false;
891 /* Start populating all the entries for recps[rid] based on lkups from
892 * firmware. Note that we are only creating the root recipe in our
895 lkup_exts = &recps[rid].lkup_exts;
897 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
898 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
899 struct ice_recp_grp_entry *rg_entry;
900 u8 i, prof, idx, prot = 0;
904 rg_entry = (struct ice_recp_grp_entry *)
905 ice_malloc(hw, sizeof(*rg_entry));
907 status = ICE_ERR_NO_MEMORY;
911 idx = root_bufs.recipe_indx;
912 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
914 /* Mark all result indices in this chain */
915 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
916 ice_set_bit(root_bufs.content.result_indx &
917 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
919 /* get the first profile that is associated with rid */
920 prof = ice_find_first_bit(recipe_to_profile[idx],
921 ICE_MAX_NUM_PROFILES);
922 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
923 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
925 rg_entry->fv_idx[i] = lkup_indx;
926 rg_entry->fv_mask[i] =
927 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
929 /* If the recipe is a chained recipe then all its
930 * child recipe's result will have a result index.
931 * To fill fv_words we should not use those result
932 * index, we only need the protocol ids and offsets.
933 * We will skip all the fv_idx which stores result
934 * index in them. We also need to skip any fv_idx which
935 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
936 * valid offset value.
938 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
939 rg_entry->fv_idx[i]) ||
940 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
941 rg_entry->fv_idx[i] == 0)
944 ice_find_prot_off(hw, ICE_BLK_SW, prof,
945 rg_entry->fv_idx[i], &prot, &off);
946 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
947 lkup_exts->fv_words[fv_word_idx].off = off;
950 /* populate rg_list with the data from the child entry of this
953 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
955 /* Propagate some data to the recipe database */
956 recps[idx].is_root = !!is_root;
957 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
958 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
959 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
960 recps[idx].chain_idx = root_bufs.content.result_indx &
961 ~ICE_AQ_RECIPE_RESULT_EN;
962 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
964 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
970 /* Only do the following for root recipes entries */
971 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
972 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
973 recps[idx].root_rid = root_bufs.content.rid &
974 ~ICE_AQ_RECIPE_ID_IS_ROOT;
975 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
978 /* Complete initialization of the root recipe entry */
979 lkup_exts->n_val_words = fv_word_idx;
980 recps[rid].big_recp = (num_recps > 1);
981 recps[rid].n_grp_count = (u8)num_recps;
982 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
983 ice_memdup(hw, tmp, recps[rid].n_grp_count *
984 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
985 if (!recps[rid].root_buf)
988 /* Copy result indexes */
989 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
990 recps[rid].recp_created = true;
998 * ice_get_recp_to_prof_map - updates recipe to profile mapping
999 * @hw: pointer to hardware structure
1001 * This function is used to populate recipe_to_profile matrix where index to
1002 * this array is the recipe ID and the element is the mapping of which profiles
1003 * is this recipe mapped to.
1006 ice_get_recp_to_prof_map(struct ice_hw *hw)
1008 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1011 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1014 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1015 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1016 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1018 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1019 ICE_MAX_NUM_RECIPES);
1020 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1021 if (ice_is_bit_set(r_bitmap, j))
1022 ice_set_bit(i, recipe_to_profile[j]);
1027 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1028 * @hw: pointer to the HW struct
1029 * @recp_list: pointer to sw recipe list
1031 * Allocate memory for the entire recipe table and initialize the structures/
1032 * entries corresponding to basic recipes.
1035 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1037 struct ice_sw_recipe *recps;
1040 recps = (struct ice_sw_recipe *)
1041 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1043 return ICE_ERR_NO_MEMORY;
1045 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1046 recps[i].root_rid = i;
1047 INIT_LIST_HEAD(&recps[i].filt_rules);
1048 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1049 INIT_LIST_HEAD(&recps[i].rg_list);
1050 ice_init_lock(&recps[i].filt_rule_lock);
1059 * ice_aq_get_sw_cfg - get switch configuration
1060 * @hw: pointer to the hardware structure
1061 * @buf: pointer to the result buffer
1062 * @buf_size: length of the buffer available for response
1063 * @req_desc: pointer to requested descriptor
1064 * @num_elems: pointer to number of elements
1065 * @cd: pointer to command details structure or NULL
1067 * Get switch configuration (0x0200) to be placed in 'buff'.
1068 * This admin command returns information such as initial VSI/port number
1069 * and switch ID it belongs to.
1071 * NOTE: *req_desc is both an input/output parameter.
1072 * The caller of this function first calls this function with *request_desc set
1073 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1074 * configuration information has been returned; if non-zero (meaning not all
1075 * the information was returned), the caller should call this function again
1076 * with *req_desc set to the previous value returned by f/w to get the
1077 * next block of switch configuration information.
1079 * *num_elems is output only parameter. This reflects the number of elements
1080 * in response buffer. The caller of this function to use *num_elems while
1081 * parsing the response buffer.
1083 static enum ice_status
1084 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1085 u16 buf_size, u16 *req_desc, u16 *num_elems,
1086 struct ice_sq_cd *cd)
1088 struct ice_aqc_get_sw_cfg *cmd;
1089 enum ice_status status;
1090 struct ice_aq_desc desc;
1092 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1093 cmd = &desc.params.get_sw_conf;
1094 cmd->element = CPU_TO_LE16(*req_desc);
1096 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1098 *req_desc = LE16_TO_CPU(cmd->element);
1099 *num_elems = LE16_TO_CPU(cmd->num_elems);
1106 * ice_alloc_sw - allocate resources specific to switch
1107 * @hw: pointer to the HW struct
1108 * @ena_stats: true to turn on VEB stats
1109 * @shared_res: true for shared resource, false for dedicated resource
1110 * @sw_id: switch ID returned
1111 * @counter_id: VEB counter ID returned
1113 * allocates switch resources (SWID and VEB counter) (0x0208)
1116 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1119 struct ice_aqc_alloc_free_res_elem *sw_buf;
1120 struct ice_aqc_res_elem *sw_ele;
1121 enum ice_status status;
1124 buf_len = sizeof(*sw_buf);
1125 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1126 ice_malloc(hw, buf_len);
1128 return ICE_ERR_NO_MEMORY;
1130 /* Prepare buffer for switch ID.
1131 * The number of resource entries in buffer is passed as 1 since only a
1132 * single switch/VEB instance is allocated, and hence a single sw_id
1135 sw_buf->num_elems = CPU_TO_LE16(1);
1137 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1138 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1139 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1141 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1142 ice_aqc_opc_alloc_res, NULL);
1145 goto ice_alloc_sw_exit;
1147 sw_ele = &sw_buf->elem[0];
1148 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1151 /* Prepare buffer for VEB Counter */
1152 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1153 struct ice_aqc_alloc_free_res_elem *counter_buf;
1154 struct ice_aqc_res_elem *counter_ele;
1156 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1157 ice_malloc(hw, buf_len);
1159 status = ICE_ERR_NO_MEMORY;
1160 goto ice_alloc_sw_exit;
1163 /* The number of resource entries in buffer is passed as 1 since
1164 * only a single switch/VEB instance is allocated, and hence a
1165 * single VEB counter is requested.
1167 counter_buf->num_elems = CPU_TO_LE16(1);
1168 counter_buf->res_type =
1169 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1170 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1171 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1175 ice_free(hw, counter_buf);
1176 goto ice_alloc_sw_exit;
1178 counter_ele = &counter_buf->elem[0];
1179 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1180 ice_free(hw, counter_buf);
1184 ice_free(hw, sw_buf);
1189 * ice_free_sw - free resources specific to switch
1190 * @hw: pointer to the HW struct
1191 * @sw_id: switch ID returned
1192 * @counter_id: VEB counter ID returned
1194 * free switch resources (SWID and VEB counter) (0x0209)
1196 * NOTE: This function frees multiple resources. It continues
1197 * releasing other resources even after it encounters error.
1198 * The error code returned is the last error it encountered.
1200 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1202 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1203 enum ice_status status, ret_status;
1206 buf_len = sizeof(*sw_buf);
1207 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1208 ice_malloc(hw, buf_len);
1210 return ICE_ERR_NO_MEMORY;
1212 /* Prepare buffer to free for switch ID res.
1213 * The number of resource entries in buffer is passed as 1 since only a
1214 * single switch/VEB instance is freed, and hence a single sw_id
1217 sw_buf->num_elems = CPU_TO_LE16(1);
1218 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1219 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1221 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1222 ice_aqc_opc_free_res, NULL);
1225 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1227 /* Prepare buffer to free for VEB Counter resource */
1228 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1229 ice_malloc(hw, buf_len);
1231 ice_free(hw, sw_buf);
1232 return ICE_ERR_NO_MEMORY;
1235 /* The number of resource entries in buffer is passed as 1 since only a
1236 * single switch/VEB instance is freed, and hence a single VEB counter
1239 counter_buf->num_elems = CPU_TO_LE16(1);
1240 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1241 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1243 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1244 ice_aqc_opc_free_res, NULL);
1246 ice_debug(hw, ICE_DBG_SW,
1247 "VEB counter resource could not be freed\n");
1248 ret_status = status;
1251 ice_free(hw, counter_buf);
1252 ice_free(hw, sw_buf);
1258 * @hw: pointer to the HW struct
1259 * @vsi_ctx: pointer to a VSI context struct
1260 * @cd: pointer to command details structure or NULL
1262 * Add a VSI context to the hardware (0x0210)
1265 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1266 struct ice_sq_cd *cd)
1268 struct ice_aqc_add_update_free_vsi_resp *res;
1269 struct ice_aqc_add_get_update_free_vsi *cmd;
1270 struct ice_aq_desc desc;
1271 enum ice_status status;
1273 cmd = &desc.params.vsi_cmd;
1274 res = &desc.params.add_update_free_vsi_res;
1276 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1278 if (!vsi_ctx->alloc_from_pool)
1279 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1280 ICE_AQ_VSI_IS_VALID);
1282 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1284 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1286 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1287 sizeof(vsi_ctx->info), cd);
1290 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1291 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1292 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1300 * @hw: pointer to the HW struct
1301 * @vsi_ctx: pointer to a VSI context struct
1302 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1303 * @cd: pointer to command details structure or NULL
1305 * Free VSI context info from hardware (0x0213)
1308 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1309 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1311 struct ice_aqc_add_update_free_vsi_resp *resp;
1312 struct ice_aqc_add_get_update_free_vsi *cmd;
1313 struct ice_aq_desc desc;
1314 enum ice_status status;
1316 cmd = &desc.params.vsi_cmd;
1317 resp = &desc.params.add_update_free_vsi_res;
1319 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1321 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1323 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1325 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1327 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1328 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1336 * @hw: pointer to the HW struct
1337 * @vsi_ctx: pointer to a VSI context struct
1338 * @cd: pointer to command details structure or NULL
1340 * Update VSI context in the hardware (0x0211)
1343 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1344 struct ice_sq_cd *cd)
1346 struct ice_aqc_add_update_free_vsi_resp *resp;
1347 struct ice_aqc_add_get_update_free_vsi *cmd;
1348 struct ice_aq_desc desc;
1349 enum ice_status status;
1351 cmd = &desc.params.vsi_cmd;
1352 resp = &desc.params.add_update_free_vsi_res;
1354 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1356 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1358 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1360 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1361 sizeof(vsi_ctx->info), cd);
1364 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1365 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1372 * ice_is_vsi_valid - check whether the VSI is valid or not
1373 * @hw: pointer to the HW struct
1374 * @vsi_handle: VSI handle
1376 * check whether the VSI is valid or not
1378 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1380 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1384 * ice_get_hw_vsi_num - return the HW VSI number
1385 * @hw: pointer to the HW struct
1386 * @vsi_handle: VSI handle
1388 * return the HW VSI number
1389 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1391 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1393 return hw->vsi_ctx[vsi_handle]->vsi_num;
1397 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1398 * @hw: pointer to the HW struct
1399 * @vsi_handle: VSI handle
1401 * return the VSI context entry for a given VSI handle
1403 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1405 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1409 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1410 * @hw: pointer to the HW struct
1411 * @vsi_handle: VSI handle
1412 * @vsi: VSI context pointer
1414 * save the VSI context entry for a given VSI handle
1417 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1419 hw->vsi_ctx[vsi_handle] = vsi;
1423 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1424 * @hw: pointer to the HW struct
1425 * @vsi_handle: VSI handle
1427 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1429 struct ice_vsi_ctx *vsi;
1432 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1435 ice_for_each_traffic_class(i) {
1436 if (vsi->lan_q_ctx[i]) {
1437 ice_free(hw, vsi->lan_q_ctx[i]);
1438 vsi->lan_q_ctx[i] = NULL;
1444 * ice_clear_vsi_ctx - clear the VSI context entry
1445 * @hw: pointer to the HW struct
1446 * @vsi_handle: VSI handle
1448 * clear the VSI context entry
1450 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1452 struct ice_vsi_ctx *vsi;
1454 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1456 ice_clear_vsi_q_ctx(hw, vsi_handle);
1458 hw->vsi_ctx[vsi_handle] = NULL;
1463 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1464 * @hw: pointer to the HW struct
1466 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1470 for (i = 0; i < ICE_MAX_VSI; i++)
1471 ice_clear_vsi_ctx(hw, i);
1475 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1476 * @hw: pointer to the HW struct
1477 * @vsi_handle: unique VSI handle provided by drivers
1478 * @vsi_ctx: pointer to a VSI context struct
1479 * @cd: pointer to command details structure or NULL
1481 * Add a VSI context to the hardware also add it into the VSI handle list.
1482 * If this function gets called after reset for existing VSIs then update
1483 * with the new HW VSI number in the corresponding VSI handle list entry.
1486 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1487 struct ice_sq_cd *cd)
1489 struct ice_vsi_ctx *tmp_vsi_ctx;
1490 enum ice_status status;
1492 if (vsi_handle >= ICE_MAX_VSI)
1493 return ICE_ERR_PARAM;
1494 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1497 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1499 /* Create a new VSI context */
1500 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1501 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1503 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1504 return ICE_ERR_NO_MEMORY;
1506 *tmp_vsi_ctx = *vsi_ctx;
1508 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1510 /* update with new HW VSI num */
1511 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1518 * ice_free_vsi- free VSI context from hardware and VSI handle list
1519 * @hw: pointer to the HW struct
1520 * @vsi_handle: unique VSI handle
1521 * @vsi_ctx: pointer to a VSI context struct
1522 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1523 * @cd: pointer to command details structure or NULL
1525 * Free VSI context info from hardware as well as from VSI handle list
1528 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1529 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1531 enum ice_status status;
1533 if (!ice_is_vsi_valid(hw, vsi_handle))
1534 return ICE_ERR_PARAM;
1535 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1536 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1538 ice_clear_vsi_ctx(hw, vsi_handle);
1544 * @hw: pointer to the HW struct
1545 * @vsi_handle: unique VSI handle
1546 * @vsi_ctx: pointer to a VSI context struct
1547 * @cd: pointer to command details structure or NULL
1549 * Update VSI context in the hardware
1552 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1553 struct ice_sq_cd *cd)
1555 if (!ice_is_vsi_valid(hw, vsi_handle))
1556 return ICE_ERR_PARAM;
1557 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1558 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1562 * ice_aq_get_vsi_params
1563 * @hw: pointer to the HW struct
1564 * @vsi_ctx: pointer to a VSI context struct
1565 * @cd: pointer to command details structure or NULL
1567 * Get VSI context info from hardware (0x0212)
1570 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1571 struct ice_sq_cd *cd)
1573 struct ice_aqc_add_get_update_free_vsi *cmd;
1574 struct ice_aqc_get_vsi_resp *resp;
1575 struct ice_aq_desc desc;
1576 enum ice_status status;
1578 cmd = &desc.params.vsi_cmd;
1579 resp = &desc.params.get_vsi_resp;
1581 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1583 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1585 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1586 sizeof(vsi_ctx->info), cd);
1588 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1590 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1591 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1598 * ice_aq_add_update_mir_rule - add/update a mirror rule
1599 * @hw: pointer to the HW struct
1600 * @rule_type: Rule Type
1601 * @dest_vsi: VSI number to which packets will be mirrored
1602 * @count: length of the list
1603 * @mr_buf: buffer for list of mirrored VSI numbers
1604 * @cd: pointer to command details structure or NULL
1607 * Add/Update Mirror Rule (0x260).
1610 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1611 u16 count, struct ice_mir_rule_buf *mr_buf,
1612 struct ice_sq_cd *cd, u16 *rule_id)
1614 struct ice_aqc_add_update_mir_rule *cmd;
1615 struct ice_aq_desc desc;
1616 enum ice_status status;
1617 __le16 *mr_list = NULL;
1620 switch (rule_type) {
1621 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1622 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1623 /* Make sure count and mr_buf are set for these rule_types */
1624 if (!(count && mr_buf))
1625 return ICE_ERR_PARAM;
1627 buf_size = count * sizeof(__le16);
1628 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1630 return ICE_ERR_NO_MEMORY;
1632 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1633 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1634 /* Make sure count and mr_buf are not set for these
1637 if (count || mr_buf)
1638 return ICE_ERR_PARAM;
1641 ice_debug(hw, ICE_DBG_SW,
1642 "Error due to unsupported rule_type %u\n", rule_type);
1643 return ICE_ERR_OUT_OF_RANGE;
1646 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1648 /* Pre-process 'mr_buf' items for add/update of virtual port
1649 * ingress/egress mirroring (but not physical port ingress/egress
1655 for (i = 0; i < count; i++) {
1658 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1660 /* Validate specified VSI number, make sure it is less
1661 * than ICE_MAX_VSI, if not return with error.
1663 if (id >= ICE_MAX_VSI) {
1664 ice_debug(hw, ICE_DBG_SW,
1665 "Error VSI index (%u) out-of-range\n",
1667 ice_free(hw, mr_list);
1668 return ICE_ERR_OUT_OF_RANGE;
1671 /* add VSI to mirror rule */
1674 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1675 else /* remove VSI from mirror rule */
1676 mr_list[i] = CPU_TO_LE16(id);
1680 cmd = &desc.params.add_update_rule;
1681 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1682 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1683 ICE_AQC_RULE_ID_VALID_M);
1684 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1685 cmd->num_entries = CPU_TO_LE16(count);
1686 cmd->dest = CPU_TO_LE16(dest_vsi);
1688 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1690 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1692 ice_free(hw, mr_list);
1698 * ice_aq_delete_mir_rule - delete a mirror rule
1699 * @hw: pointer to the HW struct
1700 * @rule_id: Mirror rule ID (to be deleted)
1701 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1702 * otherwise it is returned to the shared pool
1703 * @cd: pointer to command details structure or NULL
1705 * Delete Mirror Rule (0x261).
1708 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1709 struct ice_sq_cd *cd)
1711 struct ice_aqc_delete_mir_rule *cmd;
1712 struct ice_aq_desc desc;
1714 /* rule_id should be in the range 0...63 */
1715 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1716 return ICE_ERR_OUT_OF_RANGE;
1718 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1720 cmd = &desc.params.del_rule;
1721 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1722 cmd->rule_id = CPU_TO_LE16(rule_id);
1725 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1727 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1731 * ice_aq_alloc_free_vsi_list
1732 * @hw: pointer to the HW struct
1733 * @vsi_list_id: VSI list ID returned or used for lookup
1734 * @lkup_type: switch rule filter lookup type
1735 * @opc: switch rules population command type - pass in the command opcode
1737 * allocates or free a VSI list resource
1739 static enum ice_status
1740 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1741 enum ice_sw_lkup_type lkup_type,
1742 enum ice_adminq_opc opc)
1744 struct ice_aqc_alloc_free_res_elem *sw_buf;
1745 struct ice_aqc_res_elem *vsi_ele;
1746 enum ice_status status;
1749 buf_len = sizeof(*sw_buf);
1750 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1751 ice_malloc(hw, buf_len);
1753 return ICE_ERR_NO_MEMORY;
1754 sw_buf->num_elems = CPU_TO_LE16(1);
1756 if (lkup_type == ICE_SW_LKUP_MAC ||
1757 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1758 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1759 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1760 lkup_type == ICE_SW_LKUP_PROMISC ||
1761 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1762 lkup_type == ICE_SW_LKUP_LAST) {
1763 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1764 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1766 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1768 status = ICE_ERR_PARAM;
1769 goto ice_aq_alloc_free_vsi_list_exit;
1772 if (opc == ice_aqc_opc_free_res)
1773 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1775 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1777 goto ice_aq_alloc_free_vsi_list_exit;
1779 if (opc == ice_aqc_opc_alloc_res) {
1780 vsi_ele = &sw_buf->elem[0];
1781 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1784 ice_aq_alloc_free_vsi_list_exit:
1785 ice_free(hw, sw_buf);
1790 * ice_aq_set_storm_ctrl - Sets storm control configuration
1791 * @hw: pointer to the HW struct
1792 * @bcast_thresh: represents the upper threshold for broadcast storm control
1793 * @mcast_thresh: represents the upper threshold for multicast storm control
1794 * @ctl_bitmask: storm control control knobs
1796 * Sets the storm control configuration (0x0280)
1799 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1802 struct ice_aqc_storm_cfg *cmd;
1803 struct ice_aq_desc desc;
1805 cmd = &desc.params.storm_conf;
1807 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1809 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1810 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1811 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1813 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1817 * ice_aq_get_storm_ctrl - gets storm control configuration
1818 * @hw: pointer to the HW struct
1819 * @bcast_thresh: represents the upper threshold for broadcast storm control
1820 * @mcast_thresh: represents the upper threshold for multicast storm control
1821 * @ctl_bitmask: storm control control knobs
1823 * Gets the storm control configuration (0x0281)
1826 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1829 enum ice_status status;
1830 struct ice_aq_desc desc;
1832 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1834 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1836 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1839 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1842 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1845 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1852 * ice_aq_sw_rules - add/update/remove switch rules
1853 * @hw: pointer to the HW struct
1854 * @rule_list: pointer to switch rule population list
1855 * @rule_list_sz: total size of the rule list in bytes
1856 * @num_rules: number of switch rules in the rule_list
1857 * @opc: switch rules population command type - pass in the command opcode
1858 * @cd: pointer to command details structure or NULL
1860 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1862 static enum ice_status
1863 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1864 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1866 struct ice_aq_desc desc;
1868 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1870 if (opc != ice_aqc_opc_add_sw_rules &&
1871 opc != ice_aqc_opc_update_sw_rules &&
1872 opc != ice_aqc_opc_remove_sw_rules)
1873 return ICE_ERR_PARAM;
1875 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1877 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1878 desc.params.sw_rules.num_rules_fltr_entry_index =
1879 CPU_TO_LE16(num_rules);
1880 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1884 * ice_aq_add_recipe - add switch recipe
1885 * @hw: pointer to the HW struct
1886 * @s_recipe_list: pointer to switch rule population list
1887 * @num_recipes: number of switch recipes in the list
1888 * @cd: pointer to command details structure or NULL
1893 ice_aq_add_recipe(struct ice_hw *hw,
1894 struct ice_aqc_recipe_data_elem *s_recipe_list,
1895 u16 num_recipes, struct ice_sq_cd *cd)
1897 struct ice_aqc_add_get_recipe *cmd;
1898 struct ice_aq_desc desc;
1901 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1902 cmd = &desc.params.add_get_recipe;
1903 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1905 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1906 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1908 buf_size = num_recipes * sizeof(*s_recipe_list);
1910 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1914 * ice_aq_get_recipe - get switch recipe
1915 * @hw: pointer to the HW struct
1916 * @s_recipe_list: pointer to switch rule population list
1917 * @num_recipes: pointer to the number of recipes (input and output)
1918 * @recipe_root: root recipe number of recipe(s) to retrieve
1919 * @cd: pointer to command details structure or NULL
1923 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1924 * On output, *num_recipes will equal the number of entries returned in
1927 * The caller must supply enough space in s_recipe_list to hold all possible
1928 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1931 ice_aq_get_recipe(struct ice_hw *hw,
1932 struct ice_aqc_recipe_data_elem *s_recipe_list,
1933 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1935 struct ice_aqc_add_get_recipe *cmd;
1936 struct ice_aq_desc desc;
1937 enum ice_status status;
1940 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1941 return ICE_ERR_PARAM;
1943 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1944 cmd = &desc.params.add_get_recipe;
1945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1947 cmd->return_index = CPU_TO_LE16(recipe_root);
1948 cmd->num_sub_recipes = 0;
1950 buf_size = *num_recipes * sizeof(*s_recipe_list);
1952 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1953 /* cppcheck-suppress constArgument */
1954 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1960 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1961 * @hw: pointer to the HW struct
1962 * @profile_id: package profile ID to associate the recipe with
1963 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1964 * @cd: pointer to command details structure or NULL
1965 * Recipe to profile association (0x0291)
1968 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1969 struct ice_sq_cd *cd)
1971 struct ice_aqc_recipe_to_profile *cmd;
1972 struct ice_aq_desc desc;
1974 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1975 cmd = &desc.params.recipe_to_profile;
1976 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1977 cmd->profile_id = CPU_TO_LE16(profile_id);
1978 /* Set the recipe ID bit in the bitmask to let the device know which
1979 * profile we are associating the recipe to
1981 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1982 ICE_NONDMA_TO_NONDMA);
1984 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1988 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1989 * @hw: pointer to the HW struct
1990 * @profile_id: package profile ID to associate the recipe with
1991 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1992 * @cd: pointer to command details structure or NULL
1993 * Associate profile ID with given recipe (0x0293)
1996 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1997 struct ice_sq_cd *cd)
1999 struct ice_aqc_recipe_to_profile *cmd;
2000 struct ice_aq_desc desc;
2001 enum ice_status status;
2003 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2004 cmd = &desc.params.recipe_to_profile;
2005 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2006 cmd->profile_id = CPU_TO_LE16(profile_id);
2008 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2010 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2011 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2017 * ice_alloc_recipe - add recipe resource
2018 * @hw: pointer to the hardware structure
2019 * @rid: recipe ID returned as response to AQ call
2021 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2023 struct ice_aqc_alloc_free_res_elem *sw_buf;
2024 enum ice_status status;
2027 buf_len = sizeof(*sw_buf);
2028 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2030 return ICE_ERR_NO_MEMORY;
2032 sw_buf->num_elems = CPU_TO_LE16(1);
2033 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2034 ICE_AQC_RES_TYPE_S) |
2035 ICE_AQC_RES_TYPE_FLAG_SHARED);
2036 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2037 ice_aqc_opc_alloc_res, NULL);
2039 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2040 ice_free(hw, sw_buf);
2045 /* ice_init_port_info - Initialize port_info with switch configuration data
2046 * @pi: pointer to port_info
2047 * @vsi_port_num: VSI number or port number
2048 * @type: Type of switch element (port or VSI)
2049 * @swid: switch ID of the switch the element is attached to
2050 * @pf_vf_num: PF or VF number
2051 * @is_vf: true if the element is a VF, false otherwise
2054 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2055 u16 swid, u16 pf_vf_num, bool is_vf)
2058 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2059 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2061 pi->pf_vf_num = pf_vf_num;
2063 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2064 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2067 ice_debug(pi->hw, ICE_DBG_SW,
2068 "incorrect VSI/port type received\n");
2073 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2074 * @hw: pointer to the hardware structure
2076 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2078 struct ice_aqc_get_sw_cfg_resp *rbuf;
2079 enum ice_status status;
2086 num_total_ports = 1;
2088 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2089 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2092 return ICE_ERR_NO_MEMORY;
2094 /* Multiple calls to ice_aq_get_sw_cfg may be required
2095 * to get all the switch configuration information. The need
2096 * for additional calls is indicated by ice_aq_get_sw_cfg
2097 * writing a non-zero value in req_desc
2100 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2101 &req_desc, &num_elems, NULL);
2106 for (i = 0; i < num_elems; i++) {
2107 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2108 u16 pf_vf_num, swid, vsi_port_num;
2112 ele = rbuf[i].elements;
2113 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2114 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2116 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2117 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2119 swid = LE16_TO_CPU(ele->swid);
2121 if (LE16_TO_CPU(ele->pf_vf_num) &
2122 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2125 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2126 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2129 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2130 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2131 if (j == num_total_ports) {
2132 ice_debug(hw, ICE_DBG_SW,
2133 "more ports than expected\n");
2134 status = ICE_ERR_CFG;
2137 ice_init_port_info(hw->port_info,
2138 vsi_port_num, res_type, swid,
2146 } while (req_desc && !status);
2149 ice_free(hw, (void *)rbuf);
2154 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2155 * @hw: pointer to the hardware structure
2156 * @fi: filter info structure to fill/update
2158 * This helper function populates the lb_en and lan_en elements of the provided
2159 * ice_fltr_info struct using the switch's type and characteristics of the
2160 * switch rule being configured.
2162 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2167 if ((fi->flag & ICE_FLTR_RX) &&
2168 (fi->fltr_act == ICE_FWD_TO_VSI ||
2169 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2170 fi->lkup_type == ICE_SW_LKUP_LAST)
2173 if ((fi->flag & ICE_FLTR_TX) &&
2174 (fi->fltr_act == ICE_FWD_TO_VSI ||
2175 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2176 fi->fltr_act == ICE_FWD_TO_Q ||
2177 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2178 /* Setting LB for prune actions will result in replicated
2179 * packets to the internal switch that will be dropped.
2181 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2184 /* Set lan_en to TRUE if
2185 * 1. The switch is a VEB AND
2187 * 2.1 The lookup is a directional lookup like ethertype,
2188 * promiscuous, ethertype-MAC, promiscuous-VLAN
2189 * and default-port OR
2190 * 2.2 The lookup is VLAN, OR
2191 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2192 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2196 * The switch is a VEPA.
2198 * In all other cases, the LAN enable has to be set to false.
2201 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2202 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2203 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2204 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2205 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2206 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2207 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2208 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2209 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2210 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2219 * ice_fill_sw_rule - Helper function to fill switch rule structure
2220 * @hw: pointer to the hardware structure
2221 * @f_info: entry containing packet forwarding information
2222 * @s_rule: switch rule structure to be filled in based on mac_entry
2223 * @opc: switch rules population command type - pass in the command opcode
2226 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2227 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2229 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2237 if (opc == ice_aqc_opc_remove_sw_rules) {
2238 s_rule->pdata.lkup_tx_rx.act = 0;
2239 s_rule->pdata.lkup_tx_rx.index =
2240 CPU_TO_LE16(f_info->fltr_rule_id);
2241 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2245 eth_hdr_sz = sizeof(dummy_eth_header);
2246 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2248 /* initialize the ether header with a dummy header */
2249 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2250 ice_fill_sw_info(hw, f_info);
2252 switch (f_info->fltr_act) {
2253 case ICE_FWD_TO_VSI:
2254 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2255 ICE_SINGLE_ACT_VSI_ID_M;
2256 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2257 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2258 ICE_SINGLE_ACT_VALID_BIT;
2260 case ICE_FWD_TO_VSI_LIST:
2261 act |= ICE_SINGLE_ACT_VSI_LIST;
2262 act |= (f_info->fwd_id.vsi_list_id <<
2263 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2264 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2265 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2266 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2267 ICE_SINGLE_ACT_VALID_BIT;
2270 act |= ICE_SINGLE_ACT_TO_Q;
2271 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2272 ICE_SINGLE_ACT_Q_INDEX_M;
2274 case ICE_DROP_PACKET:
2275 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2276 ICE_SINGLE_ACT_VALID_BIT;
2278 case ICE_FWD_TO_QGRP:
2279 q_rgn = f_info->qgrp_size > 0 ?
2280 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2281 act |= ICE_SINGLE_ACT_TO_Q;
2282 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2283 ICE_SINGLE_ACT_Q_INDEX_M;
2284 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2285 ICE_SINGLE_ACT_Q_REGION_M;
2292 act |= ICE_SINGLE_ACT_LB_ENABLE;
2294 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2296 switch (f_info->lkup_type) {
2297 case ICE_SW_LKUP_MAC:
2298 daddr = f_info->l_data.mac.mac_addr;
2300 case ICE_SW_LKUP_VLAN:
2301 vlan_id = f_info->l_data.vlan.vlan_id;
2302 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2303 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2304 act |= ICE_SINGLE_ACT_PRUNE;
2305 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2308 case ICE_SW_LKUP_ETHERTYPE_MAC:
2309 daddr = f_info->l_data.ethertype_mac.mac_addr;
2311 case ICE_SW_LKUP_ETHERTYPE:
2312 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2313 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2315 case ICE_SW_LKUP_MAC_VLAN:
2316 daddr = f_info->l_data.mac_vlan.mac_addr;
2317 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2319 case ICE_SW_LKUP_PROMISC_VLAN:
2320 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2322 case ICE_SW_LKUP_PROMISC:
2323 daddr = f_info->l_data.mac_vlan.mac_addr;
2329 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2330 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2331 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2333 /* Recipe set depending on lookup type */
2334 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2335 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2336 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2339 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2340 ICE_NONDMA_TO_NONDMA);
2342 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2343 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2344 *off = CPU_TO_BE16(vlan_id);
2347 /* Create the switch rule with the final dummy Ethernet header */
2348 if (opc != ice_aqc_opc_update_sw_rules)
2349 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2353 * ice_add_marker_act
2354 * @hw: pointer to the hardware structure
2355 * @m_ent: the management entry for which sw marker needs to be added
2356 * @sw_marker: sw marker to tag the Rx descriptor with
2357 * @l_id: large action resource ID
2359 * Create a large action to hold software marker and update the switch rule
2360 * entry pointed by m_ent with newly created large action
2362 static enum ice_status
2363 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2364 u16 sw_marker, u16 l_id)
2366 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2367 /* For software marker we need 3 large actions
2368 * 1. FWD action: FWD TO VSI or VSI LIST
2369 * 2. GENERIC VALUE action to hold the profile ID
2370 * 3. GENERIC VALUE action to hold the software marker ID
2372 const u16 num_lg_acts = 3;
2373 enum ice_status status;
2379 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2380 return ICE_ERR_PARAM;
2382 /* Create two back-to-back switch rules and submit them to the HW using
2383 * one memory buffer:
2387 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2388 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2389 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2391 return ICE_ERR_NO_MEMORY;
2393 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2395 /* Fill in the first switch rule i.e. large action */
2396 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2397 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2398 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2400 /* First action VSI forwarding or VSI list forwarding depending on how
2403 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2404 m_ent->fltr_info.fwd_id.hw_vsi_id;
2406 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2407 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2408 ICE_LG_ACT_VSI_LIST_ID_M;
2409 if (m_ent->vsi_count > 1)
2410 act |= ICE_LG_ACT_VSI_LIST;
2411 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2413 /* Second action descriptor type */
2414 act = ICE_LG_ACT_GENERIC;
2416 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2417 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2419 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2420 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2422 /* Third action Marker value */
2423 act |= ICE_LG_ACT_GENERIC;
2424 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2425 ICE_LG_ACT_GENERIC_VALUE_M;
2427 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2429 /* call the fill switch rule to fill the lookup Tx Rx structure */
2430 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2431 ice_aqc_opc_update_sw_rules);
2433 /* Update the action to point to the large action ID */
2434 rx_tx->pdata.lkup_tx_rx.act =
2435 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2436 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2437 ICE_SINGLE_ACT_PTR_VAL_M));
2439 /* Use the filter rule ID of the previously created rule with single
2440 * act. Once the update happens, hardware will treat this as large
2443 rx_tx->pdata.lkup_tx_rx.index =
2444 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2446 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2447 ice_aqc_opc_update_sw_rules, NULL);
2449 m_ent->lg_act_idx = l_id;
2450 m_ent->sw_marker_id = sw_marker;
2453 ice_free(hw, lg_act);
2458 * ice_add_counter_act - add/update filter rule with counter action
2459 * @hw: pointer to the hardware structure
2460 * @m_ent: the management entry for which counter needs to be added
2461 * @counter_id: VLAN counter ID returned as part of allocate resource
2462 * @l_id: large action resource ID
2464 static enum ice_status
2465 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2466 u16 counter_id, u16 l_id)
2468 struct ice_aqc_sw_rules_elem *lg_act;
2469 struct ice_aqc_sw_rules_elem *rx_tx;
2470 enum ice_status status;
2471 /* 2 actions will be added while adding a large action counter */
2472 const int num_acts = 2;
2479 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2480 return ICE_ERR_PARAM;
2482 /* Create two back-to-back switch rules and submit them to the HW using
2483 * one memory buffer:
2487 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2488 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2489 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2492 return ICE_ERR_NO_MEMORY;
2494 rx_tx = (struct ice_aqc_sw_rules_elem *)
2495 ((u8 *)lg_act + lg_act_size);
2497 /* Fill in the first switch rule i.e. large action */
2498 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2499 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2500 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2502 /* First action VSI forwarding or VSI list forwarding depending on how
2505 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2506 m_ent->fltr_info.fwd_id.hw_vsi_id;
2508 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2509 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2510 ICE_LG_ACT_VSI_LIST_ID_M;
2511 if (m_ent->vsi_count > 1)
2512 act |= ICE_LG_ACT_VSI_LIST;
2513 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2515 /* Second action counter ID */
2516 act = ICE_LG_ACT_STAT_COUNT;
2517 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2518 ICE_LG_ACT_STAT_COUNT_M;
2519 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2521 /* call the fill switch rule to fill the lookup Tx Rx structure */
2522 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2523 ice_aqc_opc_update_sw_rules);
2525 act = ICE_SINGLE_ACT_PTR;
2526 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2527 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2529 /* Use the filter rule ID of the previously created rule with single
2530 * act. Once the update happens, hardware will treat this as large
2533 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2534 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2536 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2537 ice_aqc_opc_update_sw_rules, NULL);
2539 m_ent->lg_act_idx = l_id;
2540 m_ent->counter_index = counter_id;
2543 ice_free(hw, lg_act);
2548 * ice_create_vsi_list_map
2549 * @hw: pointer to the hardware structure
2550 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2551 * @num_vsi: number of VSI handles in the array
2552 * @vsi_list_id: VSI list ID generated as part of allocate resource
2554 * Helper function to create a new entry of VSI list ID to VSI mapping
2555 * using the given VSI list ID
2557 static struct ice_vsi_list_map_info *
2558 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2561 struct ice_switch_info *sw = hw->switch_info;
2562 struct ice_vsi_list_map_info *v_map;
2565 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2570 v_map->vsi_list_id = vsi_list_id;
2572 for (i = 0; i < num_vsi; i++)
2573 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2575 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2580 * ice_update_vsi_list_rule
2581 * @hw: pointer to the hardware structure
2582 * @vsi_handle_arr: array of VSI handles to form a VSI list
2583 * @num_vsi: number of VSI handles in the array
2584 * @vsi_list_id: VSI list ID generated as part of allocate resource
2585 * @remove: Boolean value to indicate if this is a remove action
2586 * @opc: switch rules population command type - pass in the command opcode
2587 * @lkup_type: lookup type of the filter
2589 * Call AQ command to add a new switch rule or update existing switch rule
2590 * using the given VSI list ID
2592 static enum ice_status
2593 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2594 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2595 enum ice_sw_lkup_type lkup_type)
2597 struct ice_aqc_sw_rules_elem *s_rule;
2598 enum ice_status status;
2604 return ICE_ERR_PARAM;
2606 if (lkup_type == ICE_SW_LKUP_MAC ||
2607 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2608 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2609 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2610 lkup_type == ICE_SW_LKUP_PROMISC ||
2611 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2612 lkup_type == ICE_SW_LKUP_LAST)
2613 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2614 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2615 else if (lkup_type == ICE_SW_LKUP_VLAN)
2616 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2617 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2619 return ICE_ERR_PARAM;
2621 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2622 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2624 return ICE_ERR_NO_MEMORY;
2625 for (i = 0; i < num_vsi; i++) {
2626 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2627 status = ICE_ERR_PARAM;
2630 /* AQ call requires hw_vsi_id(s) */
2631 s_rule->pdata.vsi_list.vsi[i] =
2632 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2635 s_rule->type = CPU_TO_LE16(rule_type);
2636 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2637 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2639 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2642 ice_free(hw, s_rule);
2647 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2648 * @hw: pointer to the HW struct
2649 * @vsi_handle_arr: array of VSI handles to form a VSI list
2650 * @num_vsi: number of VSI handles in the array
2651 * @vsi_list_id: stores the ID of the VSI list to be created
2652 * @lkup_type: switch rule filter's lookup type
2654 static enum ice_status
2655 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2656 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2658 enum ice_status status;
2660 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2661 ice_aqc_opc_alloc_res);
2665 /* Update the newly created VSI list to include the specified VSIs */
2666 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2667 *vsi_list_id, false,
2668 ice_aqc_opc_add_sw_rules, lkup_type);
2672 * ice_create_pkt_fwd_rule
2673 * @hw: pointer to the hardware structure
2674 * @recp_list: corresponding filter management list
2675 * @f_entry: entry containing packet forwarding information
2677 * Create switch rule with given filter information and add an entry
2678 * to the corresponding filter management list to track this switch rule
2681 static enum ice_status
2682 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2683 struct ice_fltr_list_entry *f_entry)
2685 struct ice_fltr_mgmt_list_entry *fm_entry;
2686 struct ice_aqc_sw_rules_elem *s_rule;
2687 enum ice_status status;
2689 s_rule = (struct ice_aqc_sw_rules_elem *)
2690 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2692 return ICE_ERR_NO_MEMORY;
2693 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2694 ice_malloc(hw, sizeof(*fm_entry));
2696 status = ICE_ERR_NO_MEMORY;
2697 goto ice_create_pkt_fwd_rule_exit;
2700 fm_entry->fltr_info = f_entry->fltr_info;
2702 /* Initialize all the fields for the management entry */
2703 fm_entry->vsi_count = 1;
2704 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2705 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2706 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2708 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2709 ice_aqc_opc_add_sw_rules);
2711 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2712 ice_aqc_opc_add_sw_rules, NULL);
2714 ice_free(hw, fm_entry);
2715 goto ice_create_pkt_fwd_rule_exit;
2718 f_entry->fltr_info.fltr_rule_id =
2719 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2720 fm_entry->fltr_info.fltr_rule_id =
2721 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2723 /* The book keeping entries will get removed when base driver
2724 * calls remove filter AQ command
2726 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2728 ice_create_pkt_fwd_rule_exit:
2729 ice_free(hw, s_rule);
2734 * ice_update_pkt_fwd_rule
2735 * @hw: pointer to the hardware structure
2736 * @f_info: filter information for switch rule
2738 * Call AQ command to update a previously created switch rule with a
2741 static enum ice_status
2742 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2744 struct ice_aqc_sw_rules_elem *s_rule;
2745 enum ice_status status;
2747 s_rule = (struct ice_aqc_sw_rules_elem *)
2748 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2750 return ICE_ERR_NO_MEMORY;
2752 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2754 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2756 /* Update switch rule with new rule set to forward VSI list */
2757 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2758 ice_aqc_opc_update_sw_rules, NULL);
2760 ice_free(hw, s_rule);
2765 * ice_update_sw_rule_bridge_mode
2766 * @hw: pointer to the HW struct
2768 * Updates unicast switch filter rules based on VEB/VEPA mode
2770 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2772 struct ice_switch_info *sw = hw->switch_info;
2773 struct ice_fltr_mgmt_list_entry *fm_entry;
2774 enum ice_status status = ICE_SUCCESS;
2775 struct LIST_HEAD_TYPE *rule_head;
2776 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2778 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2779 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2781 ice_acquire_lock(rule_lock);
2782 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2784 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2785 u8 *addr = fi->l_data.mac.mac_addr;
2787 /* Update unicast Tx rules to reflect the selected
2790 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2791 (fi->fltr_act == ICE_FWD_TO_VSI ||
2792 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2793 fi->fltr_act == ICE_FWD_TO_Q ||
2794 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2795 status = ice_update_pkt_fwd_rule(hw, fi);
2801 ice_release_lock(rule_lock);
2807 * ice_add_update_vsi_list
2808 * @hw: pointer to the hardware structure
2809 * @m_entry: pointer to current filter management list entry
2810 * @cur_fltr: filter information from the book keeping entry
2811 * @new_fltr: filter information with the new VSI to be added
2813 * Call AQ command to add or update previously created VSI list with new VSI.
2815 * Helper function to do book keeping associated with adding filter information
2816 * The algorithm to do the book keeping is described below :
2817 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2818 * if only one VSI has been added till now
2819 * Allocate a new VSI list and add two VSIs
2820 * to this list using switch rule command
2821 * Update the previously created switch rule with the
2822 * newly created VSI list ID
2823 * if a VSI list was previously created
2824 * Add the new VSI to the previously created VSI list set
2825 * using the update switch rule command
2827 static enum ice_status
2828 ice_add_update_vsi_list(struct ice_hw *hw,
2829 struct ice_fltr_mgmt_list_entry *m_entry,
2830 struct ice_fltr_info *cur_fltr,
2831 struct ice_fltr_info *new_fltr)
2833 enum ice_status status = ICE_SUCCESS;
2834 u16 vsi_list_id = 0;
2836 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2837 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2838 return ICE_ERR_NOT_IMPL;
2840 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2841 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2842 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2843 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2844 return ICE_ERR_NOT_IMPL;
2846 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2847 /* Only one entry existed in the mapping and it was not already
2848 * a part of a VSI list. So, create a VSI list with the old and
2851 struct ice_fltr_info tmp_fltr;
2852 u16 vsi_handle_arr[2];
2854 /* A rule already exists with the new VSI being added */
2855 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2856 return ICE_ERR_ALREADY_EXISTS;
2858 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2859 vsi_handle_arr[1] = new_fltr->vsi_handle;
2860 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2862 new_fltr->lkup_type);
2866 tmp_fltr = *new_fltr;
2867 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2868 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2869 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2870 /* Update the previous switch rule of "MAC forward to VSI" to
2871 * "MAC fwd to VSI list"
2873 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2877 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2878 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2879 m_entry->vsi_list_info =
2880 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2883 /* If this entry was large action then the large action needs
2884 * to be updated to point to FWD to VSI list
2886 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2888 ice_add_marker_act(hw, m_entry,
2889 m_entry->sw_marker_id,
2890 m_entry->lg_act_idx);
2892 u16 vsi_handle = new_fltr->vsi_handle;
2893 enum ice_adminq_opc opcode;
2895 if (!m_entry->vsi_list_info)
2898 /* A rule already exists with the new VSI being added */
2899 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2902 /* Update the previously created VSI list set with
2903 * the new VSI ID passed in
2905 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2906 opcode = ice_aqc_opc_update_sw_rules;
2908 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2909 vsi_list_id, false, opcode,
2910 new_fltr->lkup_type);
2911 /* update VSI list mapping info with new VSI ID */
2913 ice_set_bit(vsi_handle,
2914 m_entry->vsi_list_info->vsi_map);
2917 m_entry->vsi_count++;
2922 * ice_find_rule_entry - Search a rule entry
2923 * @list_head: head of rule list
2924 * @f_info: rule information
2926 * Helper function to search for a given rule entry
2927 * Returns pointer to entry storing the rule if found
2929 static struct ice_fltr_mgmt_list_entry *
2930 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2931 struct ice_fltr_info *f_info)
2933 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2935 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2937 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2938 sizeof(f_info->l_data)) &&
2939 f_info->flag == list_itr->fltr_info.flag) {
2948 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2949 * @recp_list: VSI lists needs to be searched
2950 * @vsi_handle: VSI handle to be found in VSI list
2951 * @vsi_list_id: VSI list ID found containing vsi_handle
2953 * Helper function to search a VSI list with single entry containing given VSI
2954 * handle element. This can be extended further to search VSI list with more
2955 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2957 static struct ice_vsi_list_map_info *
2958 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2961 struct ice_vsi_list_map_info *map_info = NULL;
2962 struct LIST_HEAD_TYPE *list_head;
2964 list_head = &recp_list->filt_rules;
2965 if (recp_list->adv_rule) {
2966 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2968 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2969 ice_adv_fltr_mgmt_list_entry,
2971 if (list_itr->vsi_list_info) {
2972 map_info = list_itr->vsi_list_info;
2973 if (ice_is_bit_set(map_info->vsi_map,
2975 *vsi_list_id = map_info->vsi_list_id;
2981 struct ice_fltr_mgmt_list_entry *list_itr;
2983 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2984 ice_fltr_mgmt_list_entry,
2986 if (list_itr->vsi_count == 1 &&
2987 list_itr->vsi_list_info) {
2988 map_info = list_itr->vsi_list_info;
2989 if (ice_is_bit_set(map_info->vsi_map,
2991 *vsi_list_id = map_info->vsi_list_id;
3001 * ice_add_rule_internal - add rule for a given lookup type
3002 * @hw: pointer to the hardware structure
3003 * @recp_list: recipe list for which rule has to be added
3004 * @lport: logic port number on which function add rule
3005 * @f_entry: structure containing MAC forwarding information
3007 * Adds or updates the rule lists for a given recipe
3009 static enum ice_status
3010 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3011 u8 lport, struct ice_fltr_list_entry *f_entry)
3013 struct ice_fltr_info *new_fltr, *cur_fltr;
3014 struct ice_fltr_mgmt_list_entry *m_entry;
3015 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3016 enum ice_status status = ICE_SUCCESS;
3018 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3019 return ICE_ERR_PARAM;
3021 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3022 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3023 f_entry->fltr_info.fwd_id.hw_vsi_id =
3024 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3026 rule_lock = &recp_list->filt_rule_lock;
3028 ice_acquire_lock(rule_lock);
3029 new_fltr = &f_entry->fltr_info;
3030 if (new_fltr->flag & ICE_FLTR_RX)
3031 new_fltr->src = lport;
3032 else if (new_fltr->flag & ICE_FLTR_TX)
3034 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3036 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3038 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3039 goto exit_add_rule_internal;
3042 cur_fltr = &m_entry->fltr_info;
3043 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3045 exit_add_rule_internal:
3046 ice_release_lock(rule_lock);
3051 * ice_remove_vsi_list_rule
3052 * @hw: pointer to the hardware structure
3053 * @vsi_list_id: VSI list ID generated as part of allocate resource
3054 * @lkup_type: switch rule filter lookup type
3056 * The VSI list should be emptied before this function is called to remove the
3059 static enum ice_status
3060 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3061 enum ice_sw_lkup_type lkup_type)
3063 struct ice_aqc_sw_rules_elem *s_rule;
3064 enum ice_status status;
3067 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3068 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3070 return ICE_ERR_NO_MEMORY;
3072 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3073 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3075 /* Free the vsi_list resource that we allocated. It is assumed that the
3076 * list is empty at this point.
3078 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3079 ice_aqc_opc_free_res);
3081 ice_free(hw, s_rule);
3086 * ice_rem_update_vsi_list
3087 * @hw: pointer to the hardware structure
3088 * @vsi_handle: VSI handle of the VSI to remove
3089 * @fm_list: filter management entry for which the VSI list management needs to
3092 static enum ice_status
3093 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3094 struct ice_fltr_mgmt_list_entry *fm_list)
3096 enum ice_sw_lkup_type lkup_type;
3097 enum ice_status status = ICE_SUCCESS;
3100 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3101 fm_list->vsi_count == 0)
3102 return ICE_ERR_PARAM;
3104 /* A rule with the VSI being removed does not exist */
3105 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3106 return ICE_ERR_DOES_NOT_EXIST;
3108 lkup_type = fm_list->fltr_info.lkup_type;
3109 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3110 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3111 ice_aqc_opc_update_sw_rules,
3116 fm_list->vsi_count--;
3117 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3119 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3120 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3121 struct ice_vsi_list_map_info *vsi_list_info =
3122 fm_list->vsi_list_info;
3125 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3127 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3128 return ICE_ERR_OUT_OF_RANGE;
3130 /* Make sure VSI list is empty before removing it below */
3131 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3133 ice_aqc_opc_update_sw_rules,
3138 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3139 tmp_fltr_info.fwd_id.hw_vsi_id =
3140 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3141 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3142 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3144 ice_debug(hw, ICE_DBG_SW,
3145 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3146 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3150 fm_list->fltr_info = tmp_fltr_info;
3153 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3154 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3155 struct ice_vsi_list_map_info *vsi_list_info =
3156 fm_list->vsi_list_info;
3158 /* Remove the VSI list since it is no longer used */
3159 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3161 ice_debug(hw, ICE_DBG_SW,
3162 "Failed to remove VSI list %d, error %d\n",
3163 vsi_list_id, status);
3167 LIST_DEL(&vsi_list_info->list_entry);
3168 ice_free(hw, vsi_list_info);
3169 fm_list->vsi_list_info = NULL;
3176 * ice_remove_rule_internal - Remove a filter rule of a given type
3178 * @hw: pointer to the hardware structure
3179 * @recp_list: recipe list for which the rule needs to removed
3180 * @f_entry: rule entry containing filter information
3182 static enum ice_status
3183 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3184 struct ice_fltr_list_entry *f_entry)
3186 struct ice_fltr_mgmt_list_entry *list_elem;
3187 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3188 enum ice_status status = ICE_SUCCESS;
3189 bool remove_rule = false;
3192 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3193 return ICE_ERR_PARAM;
3194 f_entry->fltr_info.fwd_id.hw_vsi_id =
3195 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3197 rule_lock = &recp_list->filt_rule_lock;
3198 ice_acquire_lock(rule_lock);
3199 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3200 &f_entry->fltr_info);
3202 status = ICE_ERR_DOES_NOT_EXIST;
3206 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3208 } else if (!list_elem->vsi_list_info) {
3209 status = ICE_ERR_DOES_NOT_EXIST;
3211 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3212 /* a ref_cnt > 1 indicates that the vsi_list is being
3213 * shared by multiple rules. Decrement the ref_cnt and
3214 * remove this rule, but do not modify the list, as it
3215 * is in-use by other rules.
3217 list_elem->vsi_list_info->ref_cnt--;
3220 /* a ref_cnt of 1 indicates the vsi_list is only used
3221 * by one rule. However, the original removal request is only
3222 * for a single VSI. Update the vsi_list first, and only
3223 * remove the rule if there are no further VSIs in this list.
3225 vsi_handle = f_entry->fltr_info.vsi_handle;
3226 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3229 /* if VSI count goes to zero after updating the VSI list */
3230 if (list_elem->vsi_count == 0)
3235 /* Remove the lookup rule */
3236 struct ice_aqc_sw_rules_elem *s_rule;
3238 s_rule = (struct ice_aqc_sw_rules_elem *)
3239 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3241 status = ICE_ERR_NO_MEMORY;
3245 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3246 ice_aqc_opc_remove_sw_rules);
3248 status = ice_aq_sw_rules(hw, s_rule,
3249 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3250 ice_aqc_opc_remove_sw_rules, NULL);
3252 /* Remove a book keeping from the list */
3253 ice_free(hw, s_rule);
3258 LIST_DEL(&list_elem->list_entry);
3259 ice_free(hw, list_elem);
3262 ice_release_lock(rule_lock);
3267 * ice_aq_get_res_alloc - get allocated resources
3268 * @hw: pointer to the HW struct
3269 * @num_entries: pointer to u16 to store the number of resource entries returned
3270 * @buf: pointer to user-supplied buffer
3271 * @buf_size: size of buff
3272 * @cd: pointer to command details structure or NULL
3274 * The user-supplied buffer must be large enough to store the resource
3275 * information for all resource types. Each resource type is an
3276 * ice_aqc_get_res_resp_data_elem structure.
3279 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3280 u16 buf_size, struct ice_sq_cd *cd)
3282 struct ice_aqc_get_res_alloc *resp;
3283 enum ice_status status;
3284 struct ice_aq_desc desc;
3287 return ICE_ERR_BAD_PTR;
3289 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3290 return ICE_ERR_INVAL_SIZE;
3292 resp = &desc.params.get_res;
3294 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3295 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3297 if (!status && num_entries)
3298 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3304 * ice_aq_get_res_descs - get allocated resource descriptors
3305 * @hw: pointer to the hardware structure
3306 * @num_entries: number of resource entries in buffer
3307 * @buf: Indirect buffer to hold data parameters and response
3308 * @buf_size: size of buffer for indirect commands
3309 * @res_type: resource type
3310 * @res_shared: is resource shared
3311 * @desc_id: input - first desc ID to start; output - next desc ID
3312 * @cd: pointer to command details structure or NULL
3315 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3316 struct ice_aqc_get_allocd_res_desc_resp *buf,
3317 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3318 struct ice_sq_cd *cd)
3320 struct ice_aqc_get_allocd_res_desc *cmd;
3321 struct ice_aq_desc desc;
3322 enum ice_status status;
3324 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3326 cmd = &desc.params.get_res_desc;
3329 return ICE_ERR_PARAM;
3331 if (buf_size != (num_entries * sizeof(*buf)))
3332 return ICE_ERR_PARAM;
3334 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3336 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3337 ICE_AQC_RES_TYPE_M) | (res_shared ?
3338 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3339 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3341 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3343 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3349 * ice_add_mac_rule - Add a MAC address based filter rule
3350 * @hw: pointer to the hardware structure
3351 * @m_list: list of MAC addresses and forwarding information
3352 * @sw: pointer to switch info struct for which function add rule
3353 * @lport: logic port number on which function add rule
3355 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3356 * multiple unicast addresses, the function assumes that all the
3357 * addresses are unique in a given add_mac call. It doesn't
3358 * check for duplicates in this case, removing duplicates from a given
3359 * list should be taken care of in the caller of this function.
3361 static enum ice_status
3362 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3363 struct ice_switch_info *sw, u8 lport)
3365 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3366 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3367 struct ice_fltr_list_entry *m_list_itr;
3368 struct LIST_HEAD_TYPE *rule_head;
3369 u16 total_elem_left, s_rule_size;
3370 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3371 enum ice_status status = ICE_SUCCESS;
3372 u16 num_unicast = 0;
3376 rule_lock = &recp_list->filt_rule_lock;
3377 rule_head = &recp_list->filt_rules;
3379 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3381 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3385 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3386 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3387 if (!ice_is_vsi_valid(hw, vsi_handle))
3388 return ICE_ERR_PARAM;
3389 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3390 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3391 /* update the src in case it is VSI num */
3392 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3393 return ICE_ERR_PARAM;
3394 m_list_itr->fltr_info.src = hw_vsi_id;
3395 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3396 IS_ZERO_ETHER_ADDR(add))
3397 return ICE_ERR_PARAM;
3398 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3399 /* Don't overwrite the unicast address */
3400 ice_acquire_lock(rule_lock);
3401 if (ice_find_rule_entry(rule_head,
3402 &m_list_itr->fltr_info)) {
3403 ice_release_lock(rule_lock);
3404 return ICE_ERR_ALREADY_EXISTS;
3406 ice_release_lock(rule_lock);
3408 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3409 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3410 m_list_itr->status =
3411 ice_add_rule_internal(hw, recp_list, lport,
3413 if (m_list_itr->status)
3414 return m_list_itr->status;
3418 ice_acquire_lock(rule_lock);
3419 /* Exit if no suitable entries were found for adding bulk switch rule */
3421 status = ICE_SUCCESS;
3422 goto ice_add_mac_exit;
3425 /* Allocate switch rule buffer for the bulk update for unicast */
3426 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3427 s_rule = (struct ice_aqc_sw_rules_elem *)
3428 ice_calloc(hw, num_unicast, s_rule_size);
3430 status = ICE_ERR_NO_MEMORY;
3431 goto ice_add_mac_exit;
3435 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3437 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3438 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3440 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3441 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3442 ice_aqc_opc_add_sw_rules);
3443 r_iter = (struct ice_aqc_sw_rules_elem *)
3444 ((u8 *)r_iter + s_rule_size);
3448 /* Call AQ bulk switch rule update for all unicast addresses */
3450 /* Call AQ switch rule in AQ_MAX chunk */
3451 for (total_elem_left = num_unicast; total_elem_left > 0;
3452 total_elem_left -= elem_sent) {
3453 struct ice_aqc_sw_rules_elem *entry = r_iter;
3455 elem_sent = MIN_T(u8, total_elem_left,
3456 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3457 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3458 elem_sent, ice_aqc_opc_add_sw_rules,
3461 goto ice_add_mac_exit;
3462 r_iter = (struct ice_aqc_sw_rules_elem *)
3463 ((u8 *)r_iter + (elem_sent * s_rule_size));
3466 /* Fill up rule ID based on the value returned from FW */
3468 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3470 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3471 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3472 struct ice_fltr_mgmt_list_entry *fm_entry;
3474 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3475 f_info->fltr_rule_id =
3476 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3477 f_info->fltr_act = ICE_FWD_TO_VSI;
3478 /* Create an entry to track this MAC address */
3479 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3480 ice_malloc(hw, sizeof(*fm_entry));
3482 status = ICE_ERR_NO_MEMORY;
3483 goto ice_add_mac_exit;
3485 fm_entry->fltr_info = *f_info;
3486 fm_entry->vsi_count = 1;
3487 /* The book keeping entries will get removed when
3488 * base driver calls remove filter AQ command
3491 LIST_ADD(&fm_entry->list_entry, rule_head);
3492 r_iter = (struct ice_aqc_sw_rules_elem *)
3493 ((u8 *)r_iter + s_rule_size);
3498 ice_release_lock(rule_lock);
3500 ice_free(hw, s_rule);
3505 * ice_add_mac - Add a MAC address based filter rule
3506 * @hw: pointer to the hardware structure
3507 * @m_list: list of MAC addresses and forwarding information
3509 * Function add MAC rule for logical port from HW struct
3512 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3515 return ICE_ERR_PARAM;
3517 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3518 hw->port_info->lport);
3522 * ice_add_vlan_internal - Add one VLAN based filter rule
3523 * @hw: pointer to the hardware structure
3524 * @recp_list: recipe list for which rule has to be added
3525 * @f_entry: filter entry containing one VLAN information
3527 static enum ice_status
3528 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3529 struct ice_fltr_list_entry *f_entry)
3531 struct ice_fltr_mgmt_list_entry *v_list_itr;
3532 struct ice_fltr_info *new_fltr, *cur_fltr;
3533 enum ice_sw_lkup_type lkup_type;
3534 u16 vsi_list_id = 0, vsi_handle;
3535 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3536 enum ice_status status = ICE_SUCCESS;
3538 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3539 return ICE_ERR_PARAM;
3541 f_entry->fltr_info.fwd_id.hw_vsi_id =
3542 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3543 new_fltr = &f_entry->fltr_info;
3545 /* VLAN ID should only be 12 bits */
3546 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3547 return ICE_ERR_PARAM;
3549 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3550 return ICE_ERR_PARAM;
3552 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3553 lkup_type = new_fltr->lkup_type;
3554 vsi_handle = new_fltr->vsi_handle;
3555 rule_lock = &recp_list->filt_rule_lock;
3556 ice_acquire_lock(rule_lock);
3557 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3559 struct ice_vsi_list_map_info *map_info = NULL;
3561 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3562 /* All VLAN pruning rules use a VSI list. Check if
3563 * there is already a VSI list containing VSI that we
3564 * want to add. If found, use the same vsi_list_id for
3565 * this new VLAN rule or else create a new list.
3567 map_info = ice_find_vsi_list_entry(recp_list,
3571 status = ice_create_vsi_list_rule(hw,
3579 /* Convert the action to forwarding to a VSI list. */
3580 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3581 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3584 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3586 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3589 status = ICE_ERR_DOES_NOT_EXIST;
3592 /* reuse VSI list for new rule and increment ref_cnt */
3594 v_list_itr->vsi_list_info = map_info;
3595 map_info->ref_cnt++;
3597 v_list_itr->vsi_list_info =
3598 ice_create_vsi_list_map(hw, &vsi_handle,
3602 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3603 /* Update existing VSI list to add new VSI ID only if it used
3606 cur_fltr = &v_list_itr->fltr_info;
3607 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3610 /* If VLAN rule exists and VSI list being used by this rule is
3611 * referenced by more than 1 VLAN rule. Then create a new VSI
3612 * list appending previous VSI with new VSI and update existing
3613 * VLAN rule to point to new VSI list ID
3615 struct ice_fltr_info tmp_fltr;
3616 u16 vsi_handle_arr[2];
3619 /* Current implementation only supports reusing VSI list with
3620 * one VSI count. We should never hit below condition
3622 if (v_list_itr->vsi_count > 1 &&
3623 v_list_itr->vsi_list_info->ref_cnt > 1) {
3624 ice_debug(hw, ICE_DBG_SW,
3625 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3626 status = ICE_ERR_CFG;
3631 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3634 /* A rule already exists with the new VSI being added */
3635 if (cur_handle == vsi_handle) {
3636 status = ICE_ERR_ALREADY_EXISTS;
3640 vsi_handle_arr[0] = cur_handle;
3641 vsi_handle_arr[1] = vsi_handle;
3642 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3643 &vsi_list_id, lkup_type);
3647 tmp_fltr = v_list_itr->fltr_info;
3648 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3649 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3650 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3651 /* Update the previous switch rule to a new VSI list which
3652 * includes current VSI that is requested
3654 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3658 /* before overriding VSI list map info. decrement ref_cnt of
3661 v_list_itr->vsi_list_info->ref_cnt--;
3663 /* now update to newly created list */
3664 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3665 v_list_itr->vsi_list_info =
3666 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3668 v_list_itr->vsi_count++;
3672 ice_release_lock(rule_lock);
3677 * ice_add_vlan_rule - Add VLAN based filter rule
3678 * @hw: pointer to the hardware structure
3679 * @v_list: list of VLAN entries and forwarding information
3680 * @sw: pointer to switch info struct for which function add rule
3682 static enum ice_status
3683 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3684 struct ice_switch_info *sw)
3686 struct ice_fltr_list_entry *v_list_itr;
3687 struct ice_sw_recipe *recp_list;
3689 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3690 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3692 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3693 return ICE_ERR_PARAM;
3694 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3695 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3697 if (v_list_itr->status)
3698 return v_list_itr->status;
3704 * ice_add_vlan - Add a VLAN based filter rule
3705 * @hw: pointer to the hardware structure
3706 * @v_list: list of VLAN and forwarding information
3708 * Function add VLAN rule for logical port from HW struct
3711 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3714 return ICE_ERR_PARAM;
3716 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3720 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3721 * @hw: pointer to the hardware structure
3722 * @mv_list: list of MAC and VLAN filters
3723 * @sw: pointer to switch info struct for which function add rule
3724 * @lport: logic port number on which function add rule
3726 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3727 * pruning bits enabled, then it is the responsibility of the caller to make
3728 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3729 * VLAN won't be received on that VSI otherwise.
3731 static enum ice_status
3732 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3733 struct ice_switch_info *sw, u8 lport)
3735 struct ice_fltr_list_entry *mv_list_itr;
3736 struct ice_sw_recipe *recp_list;
3738 if (!mv_list || !hw)
3739 return ICE_ERR_PARAM;
3741 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3742 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3744 enum ice_sw_lkup_type l_type =
3745 mv_list_itr->fltr_info.lkup_type;
3747 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3748 return ICE_ERR_PARAM;
3749 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3750 mv_list_itr->status =
3751 ice_add_rule_internal(hw, recp_list, lport,
3753 if (mv_list_itr->status)
3754 return mv_list_itr->status;
3760 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3761 * @hw: pointer to the hardware structure
3762 * @mv_list: list of MAC VLAN addresses and forwarding information
3764 * Function add MAC VLAN rule for logical port from HW struct
3767 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3769 if (!mv_list || !hw)
3770 return ICE_ERR_PARAM;
3772 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3773 hw->port_info->lport);
3777 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3778 * @hw: pointer to the hardware structure
3779 * @em_list: list of ether type MAC filter, MAC is optional
3780 * @sw: pointer to switch info struct for which function add rule
3781 * @lport: logic port number on which function add rule
3783 * This function requires the caller to populate the entries in
3784 * the filter list with the necessary fields (including flags to
3785 * indicate Tx or Rx rules).
3787 static enum ice_status
3788 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3789 struct ice_switch_info *sw, u8 lport)
3791 struct ice_fltr_list_entry *em_list_itr;
3793 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3795 struct ice_sw_recipe *recp_list;
3796 enum ice_sw_lkup_type l_type;
3798 l_type = em_list_itr->fltr_info.lkup_type;
3799 recp_list = &sw->recp_list[l_type];
3801 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3802 l_type != ICE_SW_LKUP_ETHERTYPE)
3803 return ICE_ERR_PARAM;
3805 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3808 if (em_list_itr->status)
3809 return em_list_itr->status;
3816 * ice_add_eth_mac - Add a ethertype based filter rule
3817 * @hw: pointer to the hardware structure
3818 * @em_list: list of ethertype and forwarding information
3820 * Function add ethertype rule for logical port from HW struct
3822 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3824 if (!em_list || !hw)
3825 return ICE_ERR_PARAM;
3827 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3828 hw->port_info->lport);
3832 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3833 * @hw: pointer to the hardware structure
3834 * @em_list: list of ethertype or ethertype MAC entries
3835 * @sw: pointer to switch info struct for which function add rule
3837 static enum ice_status
3838 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3839 struct ice_switch_info *sw)
3841 struct ice_fltr_list_entry *em_list_itr, *tmp;
3843 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3845 struct ice_sw_recipe *recp_list;
3846 enum ice_sw_lkup_type l_type;
3848 l_type = em_list_itr->fltr_info.lkup_type;
3850 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3851 l_type != ICE_SW_LKUP_ETHERTYPE)
3852 return ICE_ERR_PARAM;
3854 recp_list = &sw->recp_list[l_type];
3855 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3857 if (em_list_itr->status)
3858 return em_list_itr->status;
3864 * ice_remove_eth_mac - remove a ethertype based filter rule
3865 * @hw: pointer to the hardware structure
3866 * @em_list: list of ethertype and forwarding information
3870 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3872 if (!em_list || !hw)
3873 return ICE_ERR_PARAM;
3875 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3879 * ice_rem_sw_rule_info
3880 * @hw: pointer to the hardware structure
3881 * @rule_head: pointer to the switch list structure that we want to delete
3884 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3886 if (!LIST_EMPTY(rule_head)) {
3887 struct ice_fltr_mgmt_list_entry *entry;
3888 struct ice_fltr_mgmt_list_entry *tmp;
3890 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3891 ice_fltr_mgmt_list_entry, list_entry) {
3892 LIST_DEL(&entry->list_entry);
3893 ice_free(hw, entry);
3899 * ice_rem_adv_rule_info
3900 * @hw: pointer to the hardware structure
3901 * @rule_head: pointer to the switch list structure that we want to delete
3904 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3906 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3907 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3909 if (LIST_EMPTY(rule_head))
3912 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3913 ice_adv_fltr_mgmt_list_entry, list_entry) {
3914 LIST_DEL(&lst_itr->list_entry);
3915 ice_free(hw, lst_itr->lkups);
3916 ice_free(hw, lst_itr);
3921 * ice_rem_all_sw_rules_info
3922 * @hw: pointer to the hardware structure
3924 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3926 struct ice_switch_info *sw = hw->switch_info;
3929 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3930 struct LIST_HEAD_TYPE *rule_head;
3932 rule_head = &sw->recp_list[i].filt_rules;
3933 if (!sw->recp_list[i].adv_rule)
3934 ice_rem_sw_rule_info(hw, rule_head);
3936 ice_rem_adv_rule_info(hw, rule_head);
3941 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3942 * @pi: pointer to the port_info structure
3943 * @vsi_handle: VSI handle to set as default
3944 * @set: true to add the above mentioned switch rule, false to remove it
3945 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3947 * add filter rule to set/unset given VSI as default VSI for the switch
3948 * (represented by swid)
3951 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3954 struct ice_aqc_sw_rules_elem *s_rule;
3955 struct ice_fltr_info f_info;
3956 struct ice_hw *hw = pi->hw;
3957 enum ice_adminq_opc opcode;
3958 enum ice_status status;
3962 if (!ice_is_vsi_valid(hw, vsi_handle))
3963 return ICE_ERR_PARAM;
3964 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3966 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3967 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3968 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3970 return ICE_ERR_NO_MEMORY;
3972 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3974 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3975 f_info.flag = direction;
3976 f_info.fltr_act = ICE_FWD_TO_VSI;
3977 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3979 if (f_info.flag & ICE_FLTR_RX) {
3980 f_info.src = pi->lport;
3981 f_info.src_id = ICE_SRC_ID_LPORT;
3983 f_info.fltr_rule_id =
3984 pi->dflt_rx_vsi_rule_id;
3985 } else if (f_info.flag & ICE_FLTR_TX) {
3986 f_info.src_id = ICE_SRC_ID_VSI;
3987 f_info.src = hw_vsi_id;
3989 f_info.fltr_rule_id =
3990 pi->dflt_tx_vsi_rule_id;
3994 opcode = ice_aqc_opc_add_sw_rules;
3996 opcode = ice_aqc_opc_remove_sw_rules;
3998 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4000 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4001 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4004 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4006 if (f_info.flag & ICE_FLTR_TX) {
4007 pi->dflt_tx_vsi_num = hw_vsi_id;
4008 pi->dflt_tx_vsi_rule_id = index;
4009 } else if (f_info.flag & ICE_FLTR_RX) {
4010 pi->dflt_rx_vsi_num = hw_vsi_id;
4011 pi->dflt_rx_vsi_rule_id = index;
4014 if (f_info.flag & ICE_FLTR_TX) {
4015 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4016 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4017 } else if (f_info.flag & ICE_FLTR_RX) {
4018 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4019 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4024 ice_free(hw, s_rule);
4029 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4030 * @list_head: head of rule list
4031 * @f_info: rule information
4033 * Helper function to search for a unicast rule entry - this is to be used
4034 * to remove unicast MAC filter that is not shared with other VSIs on the
4037 * Returns pointer to entry storing the rule if found
4039 static struct ice_fltr_mgmt_list_entry *
4040 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4041 struct ice_fltr_info *f_info)
4043 struct ice_fltr_mgmt_list_entry *list_itr;
4045 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4047 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4048 sizeof(f_info->l_data)) &&
4049 f_info->fwd_id.hw_vsi_id ==
4050 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4051 f_info->flag == list_itr->fltr_info.flag)
4058 * ice_remove_mac_rule - remove a MAC based filter rule
4059 * @hw: pointer to the hardware structure
4060 * @m_list: list of MAC addresses and forwarding information
4061 * @recp_list: list from which function remove MAC address
4063 * This function removes either a MAC filter rule or a specific VSI from a
4064 * VSI list for a multicast MAC address.
4066 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4067 * ice_add_mac. Caller should be aware that this call will only work if all
4068 * the entries passed into m_list were added previously. It will not attempt to
4069 * do a partial remove of entries that were found.
4071 static enum ice_status
4072 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4073 struct ice_sw_recipe *recp_list)
4075 struct ice_fltr_list_entry *list_itr, *tmp;
4076 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4079 return ICE_ERR_PARAM;
4081 rule_lock = &recp_list->filt_rule_lock;
4082 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4084 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4085 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4088 if (l_type != ICE_SW_LKUP_MAC)
4089 return ICE_ERR_PARAM;
4091 vsi_handle = list_itr->fltr_info.vsi_handle;
4092 if (!ice_is_vsi_valid(hw, vsi_handle))
4093 return ICE_ERR_PARAM;
4095 list_itr->fltr_info.fwd_id.hw_vsi_id =
4096 ice_get_hw_vsi_num(hw, vsi_handle);
4097 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4098 /* Don't remove the unicast address that belongs to
4099 * another VSI on the switch, since it is not being
4102 ice_acquire_lock(rule_lock);
4103 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4104 &list_itr->fltr_info)) {
4105 ice_release_lock(rule_lock);
4106 return ICE_ERR_DOES_NOT_EXIST;
4108 ice_release_lock(rule_lock);
4110 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4112 if (list_itr->status)
4113 return list_itr->status;
4119 * ice_remove_mac - remove a MAC address based filter rule
4120 * @hw: pointer to the hardware structure
4121 * @m_list: list of MAC addresses and forwarding information
4125 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4127 struct ice_sw_recipe *recp_list;
4129 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4130 return ice_remove_mac_rule(hw, m_list, recp_list);
4134 * ice_remove_vlan_rule - Remove VLAN based filter rule
4135 * @hw: pointer to the hardware structure
4136 * @v_list: list of VLAN entries and forwarding information
4137 * @recp_list: list from which function remove VLAN
4139 static enum ice_status
4140 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4141 struct ice_sw_recipe *recp_list)
4143 struct ice_fltr_list_entry *v_list_itr, *tmp;
4145 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4147 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4149 if (l_type != ICE_SW_LKUP_VLAN)
4150 return ICE_ERR_PARAM;
4151 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4153 if (v_list_itr->status)
4154 return v_list_itr->status;
4160 * ice_remove_vlan - remove a VLAN address based filter rule
4161 * @hw: pointer to the hardware structure
4162 * @v_list: list of VLAN and forwarding information
4166 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4168 struct ice_sw_recipe *recp_list;
4171 return ICE_ERR_PARAM;
4173 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4174 return ice_remove_vlan_rule(hw, v_list, recp_list);
4178 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4179 * @hw: pointer to the hardware structure
4180 * @v_list: list of MAC VLAN entries and forwarding information
4181 * @recp_list: list from which function remove MAC VLAN
4183 static enum ice_status
4184 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4185 struct ice_sw_recipe *recp_list)
4187 struct ice_fltr_list_entry *v_list_itr, *tmp;
4189 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4190 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4192 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4194 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4195 return ICE_ERR_PARAM;
4196 v_list_itr->status =
4197 ice_remove_rule_internal(hw, recp_list,
4199 if (v_list_itr->status)
4200 return v_list_itr->status;
4206 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4207 * @hw: pointer to the hardware structure
4208 * @mv_list: list of MAC VLAN and forwarding information
4211 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4213 struct ice_sw_recipe *recp_list;
4215 if (!mv_list || !hw)
4216 return ICE_ERR_PARAM;
4218 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4219 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4223 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4224 * @fm_entry: filter entry to inspect
4225 * @vsi_handle: VSI handle to compare with filter info
4228 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4230 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4231 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4232 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4233 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4238 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4239 * @hw: pointer to the hardware structure
4240 * @vsi_handle: VSI handle to remove filters from
4241 * @vsi_list_head: pointer to the list to add entry to
4242 * @fi: pointer to fltr_info of filter entry to copy & add
4244 * Helper function, used when creating a list of filters to remove from
4245 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4246 * original filter entry, with the exception of fltr_info.fltr_act and
4247 * fltr_info.fwd_id fields. These are set such that later logic can
4248 * extract which VSI to remove the fltr from, and pass on that information.
4250 static enum ice_status
4251 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4252 struct LIST_HEAD_TYPE *vsi_list_head,
4253 struct ice_fltr_info *fi)
4255 struct ice_fltr_list_entry *tmp;
4257 /* this memory is freed up in the caller function
4258 * once filters for this VSI are removed
4260 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4262 return ICE_ERR_NO_MEMORY;
4264 tmp->fltr_info = *fi;
4266 /* Overwrite these fields to indicate which VSI to remove filter from,
4267 * so find and remove logic can extract the information from the
4268 * list entries. Note that original entries will still have proper
4271 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4272 tmp->fltr_info.vsi_handle = vsi_handle;
4273 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4275 LIST_ADD(&tmp->list_entry, vsi_list_head);
4281 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4282 * @hw: pointer to the hardware structure
4283 * @vsi_handle: VSI handle to remove filters from
4284 * @lkup_list_head: pointer to the list that has certain lookup type filters
4285 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4287 * Locates all filters in lkup_list_head that are used by the given VSI,
4288 * and adds COPIES of those entries to vsi_list_head (intended to be used
4289 * to remove the listed filters).
4290 * Note that this means all entries in vsi_list_head must be explicitly
4291 * deallocated by the caller when done with list.
4293 static enum ice_status
4294 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4295 struct LIST_HEAD_TYPE *lkup_list_head,
4296 struct LIST_HEAD_TYPE *vsi_list_head)
4298 struct ice_fltr_mgmt_list_entry *fm_entry;
4299 enum ice_status status = ICE_SUCCESS;
4301 /* check to make sure VSI ID is valid and within boundary */
4302 if (!ice_is_vsi_valid(hw, vsi_handle))
4303 return ICE_ERR_PARAM;
4305 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4306 ice_fltr_mgmt_list_entry, list_entry) {
4307 struct ice_fltr_info *fi;
4309 fi = &fm_entry->fltr_info;
4310 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4313 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4322 * ice_determine_promisc_mask
4323 * @fi: filter info to parse
4325 * Helper function to determine which ICE_PROMISC_ mask corresponds
4326 * to given filter into.
4328 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4330 u16 vid = fi->l_data.mac_vlan.vlan_id;
4331 u8 *macaddr = fi->l_data.mac.mac_addr;
4332 bool is_tx_fltr = false;
4333 u8 promisc_mask = 0;
4335 if (fi->flag == ICE_FLTR_TX)
4338 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4339 promisc_mask |= is_tx_fltr ?
4340 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4341 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4342 promisc_mask |= is_tx_fltr ?
4343 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4344 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4345 promisc_mask |= is_tx_fltr ?
4346 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4348 promisc_mask |= is_tx_fltr ?
4349 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4351 return promisc_mask;
4355 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4356 * @hw: pointer to the hardware structure
4357 * @vsi_handle: VSI handle to retrieve info from
4358 * @promisc_mask: pointer to mask to be filled in
4359 * @vid: VLAN ID of promisc VLAN VSI
4362 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4365 struct ice_switch_info *sw = hw->switch_info;
4366 struct ice_fltr_mgmt_list_entry *itr;
4367 struct LIST_HEAD_TYPE *rule_head;
4368 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4370 if (!ice_is_vsi_valid(hw, vsi_handle))
4371 return ICE_ERR_PARAM;
4375 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4376 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4378 ice_acquire_lock(rule_lock);
4379 LIST_FOR_EACH_ENTRY(itr, rule_head,
4380 ice_fltr_mgmt_list_entry, list_entry) {
4381 /* Continue if this filter doesn't apply to this VSI or the
4382 * VSI ID is not in the VSI map for this filter
4384 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4387 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4389 ice_release_lock(rule_lock);
4395 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4396 * @hw: pointer to the hardware structure
4397 * @vsi_handle: VSI handle to retrieve info from
4398 * @promisc_mask: pointer to mask to be filled in
4399 * @vid: VLAN ID of promisc VLAN VSI
4402 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4405 struct ice_switch_info *sw = hw->switch_info;
4406 struct ice_fltr_mgmt_list_entry *itr;
4407 struct LIST_HEAD_TYPE *rule_head;
4408 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4410 if (!ice_is_vsi_valid(hw, vsi_handle))
4411 return ICE_ERR_PARAM;
4415 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4416 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4418 ice_acquire_lock(rule_lock);
4419 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4421 /* Continue if this filter doesn't apply to this VSI or the
4422 * VSI ID is not in the VSI map for this filter
4424 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4427 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4429 ice_release_lock(rule_lock);
4435 * ice_remove_promisc - Remove promisc based filter rules
4436 * @hw: pointer to the hardware structure
4437 * @recp_id: recipe ID for which the rule needs to removed
4438 * @v_list: list of promisc entries
4440 static enum ice_status
4441 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4442 struct LIST_HEAD_TYPE *v_list)
4444 struct ice_fltr_list_entry *v_list_itr, *tmp;
4445 struct ice_sw_recipe *recp_list;
4447 recp_list = &hw->switch_info->recp_list[recp_id];
4448 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4450 v_list_itr->status =
4451 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4452 if (v_list_itr->status)
4453 return v_list_itr->status;
4459 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4460 * @hw: pointer to the hardware structure
4461 * @vsi_handle: VSI handle to clear mode
4462 * @promisc_mask: mask of promiscuous config bits to clear
4463 * @vid: VLAN ID to clear VLAN promiscuous
4466 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4469 struct ice_switch_info *sw = hw->switch_info;
4470 struct ice_fltr_list_entry *fm_entry, *tmp;
4471 struct LIST_HEAD_TYPE remove_list_head;
4472 struct ice_fltr_mgmt_list_entry *itr;
4473 struct LIST_HEAD_TYPE *rule_head;
4474 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4475 enum ice_status status = ICE_SUCCESS;
4478 if (!ice_is_vsi_valid(hw, vsi_handle))
4479 return ICE_ERR_PARAM;
4481 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4482 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4484 recipe_id = ICE_SW_LKUP_PROMISC;
4486 rule_head = &sw->recp_list[recipe_id].filt_rules;
4487 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4489 INIT_LIST_HEAD(&remove_list_head);
4491 ice_acquire_lock(rule_lock);
4492 LIST_FOR_EACH_ENTRY(itr, rule_head,
4493 ice_fltr_mgmt_list_entry, list_entry) {
4494 struct ice_fltr_info *fltr_info;
4495 u8 fltr_promisc_mask = 0;
4497 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4499 fltr_info = &itr->fltr_info;
4501 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4502 vid != fltr_info->l_data.mac_vlan.vlan_id)
4505 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4507 /* Skip if filter is not completely specified by given mask */
4508 if (fltr_promisc_mask & ~promisc_mask)
4511 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4515 ice_release_lock(rule_lock);
4516 goto free_fltr_list;
4519 ice_release_lock(rule_lock);
4521 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4524 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4525 ice_fltr_list_entry, list_entry) {
4526 LIST_DEL(&fm_entry->list_entry);
4527 ice_free(hw, fm_entry);
4534 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4535 * @hw: pointer to the hardware structure
4536 * @vsi_handle: VSI handle to configure
4537 * @promisc_mask: mask of promiscuous config bits
4538 * @vid: VLAN ID to set VLAN promiscuous
4541 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4543 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4544 struct ice_fltr_list_entry f_list_entry;
4545 struct ice_fltr_info new_fltr;
4546 enum ice_status status = ICE_SUCCESS;
4552 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4554 if (!ice_is_vsi_valid(hw, vsi_handle))
4555 return ICE_ERR_PARAM;
4556 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4558 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4560 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4561 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4562 new_fltr.l_data.mac_vlan.vlan_id = vid;
4563 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4565 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4566 recipe_id = ICE_SW_LKUP_PROMISC;
4569 /* Separate filters must be set for each direction/packet type
4570 * combination, so we will loop over the mask value, store the
4571 * individual type, and clear it out in the input mask as it
4574 while (promisc_mask) {
4575 struct ice_sw_recipe *recp_list;
4581 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4582 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4583 pkt_type = UCAST_FLTR;
4584 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4585 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4586 pkt_type = UCAST_FLTR;
4588 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4589 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4590 pkt_type = MCAST_FLTR;
4591 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4592 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4593 pkt_type = MCAST_FLTR;
4595 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4596 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4597 pkt_type = BCAST_FLTR;
4598 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4599 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4600 pkt_type = BCAST_FLTR;
4604 /* Check for VLAN promiscuous flag */
4605 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4606 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4607 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4608 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4612 /* Set filter DA based on packet type */
4613 mac_addr = new_fltr.l_data.mac.mac_addr;
4614 if (pkt_type == BCAST_FLTR) {
4615 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4616 } else if (pkt_type == MCAST_FLTR ||
4617 pkt_type == UCAST_FLTR) {
4618 /* Use the dummy ether header DA */
4619 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4620 ICE_NONDMA_TO_NONDMA);
4621 if (pkt_type == MCAST_FLTR)
4622 mac_addr[0] |= 0x1; /* Set multicast bit */
4625 /* Need to reset this to zero for all iterations */
4628 new_fltr.flag |= ICE_FLTR_TX;
4629 new_fltr.src = hw_vsi_id;
4631 new_fltr.flag |= ICE_FLTR_RX;
4632 new_fltr.src = hw->port_info->lport;
4635 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4636 new_fltr.vsi_handle = vsi_handle;
4637 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4638 f_list_entry.fltr_info = new_fltr;
4639 recp_list = &hw->switch_info->recp_list[recipe_id];
4641 status = ice_add_rule_internal(hw, recp_list,
4642 hw->port_info->lport,
4644 if (status != ICE_SUCCESS)
4645 goto set_promisc_exit;
4653 * ice_set_vlan_vsi_promisc
4654 * @hw: pointer to the hardware structure
4655 * @vsi_handle: VSI handle to configure
4656 * @promisc_mask: mask of promiscuous config bits
4657 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4659 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4662 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4663 bool rm_vlan_promisc)
4665 struct ice_switch_info *sw = hw->switch_info;
4666 struct ice_fltr_list_entry *list_itr, *tmp;
4667 struct LIST_HEAD_TYPE vsi_list_head;
4668 struct LIST_HEAD_TYPE *vlan_head;
4669 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4670 enum ice_status status;
4673 INIT_LIST_HEAD(&vsi_list_head);
4674 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4675 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4676 ice_acquire_lock(vlan_lock);
4677 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4679 ice_release_lock(vlan_lock);
4681 goto free_fltr_list;
4683 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4685 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4686 if (rm_vlan_promisc)
4687 status = ice_clear_vsi_promisc(hw, vsi_handle,
4688 promisc_mask, vlan_id);
4690 status = ice_set_vsi_promisc(hw, vsi_handle,
4691 promisc_mask, vlan_id);
4697 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4698 ice_fltr_list_entry, list_entry) {
4699 LIST_DEL(&list_itr->list_entry);
4700 ice_free(hw, list_itr);
4706 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4707 * @hw: pointer to the hardware structure
4708 * @vsi_handle: VSI handle to remove filters from
4709 * @recp_list: recipe list from which function remove fltr
4710 * @lkup: switch rule filter lookup type
4713 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4714 struct ice_sw_recipe *recp_list,
4715 enum ice_sw_lkup_type lkup)
4717 struct ice_fltr_list_entry *fm_entry;
4718 struct LIST_HEAD_TYPE remove_list_head;
4719 struct LIST_HEAD_TYPE *rule_head;
4720 struct ice_fltr_list_entry *tmp;
4721 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4722 enum ice_status status;
4724 INIT_LIST_HEAD(&remove_list_head);
4725 rule_lock = &recp_list[lkup].filt_rule_lock;
4726 rule_head = &recp_list[lkup].filt_rules;
4727 ice_acquire_lock(rule_lock);
4728 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4730 ice_release_lock(rule_lock);
4735 case ICE_SW_LKUP_MAC:
4736 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4738 case ICE_SW_LKUP_VLAN:
4739 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4741 case ICE_SW_LKUP_PROMISC:
4742 case ICE_SW_LKUP_PROMISC_VLAN:
4743 ice_remove_promisc(hw, lkup, &remove_list_head);
4745 case ICE_SW_LKUP_MAC_VLAN:
4746 ice_remove_mac_vlan(hw, &remove_list_head);
4748 case ICE_SW_LKUP_ETHERTYPE:
4749 case ICE_SW_LKUP_ETHERTYPE_MAC:
4750 ice_remove_eth_mac(hw, &remove_list_head);
4752 case ICE_SW_LKUP_DFLT:
4753 ice_debug(hw, ICE_DBG_SW,
4754 "Remove filters for this lookup type hasn't been implemented yet\n");
4756 case ICE_SW_LKUP_LAST:
4757 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4761 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4762 ice_fltr_list_entry, list_entry) {
4763 LIST_DEL(&fm_entry->list_entry);
4764 ice_free(hw, fm_entry);
4769 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4770 * @hw: pointer to the hardware structure
4771 * @vsi_handle: VSI handle to remove filters from
4772 * @sw: pointer to switch info struct
4775 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4776 struct ice_switch_info *sw)
4778 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4780 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4781 sw->recp_list, ICE_SW_LKUP_MAC);
4782 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4783 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4784 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4785 sw->recp_list, ICE_SW_LKUP_PROMISC);
4786 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4787 sw->recp_list, ICE_SW_LKUP_VLAN);
4788 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4789 sw->recp_list, ICE_SW_LKUP_DFLT);
4790 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4791 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4792 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4793 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4794 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4795 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4799 * ice_remove_vsi_fltr - Remove all filters for a VSI
4800 * @hw: pointer to the hardware structure
4801 * @vsi_handle: VSI handle to remove filters from
4803 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4805 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4809 * ice_alloc_res_cntr - allocating resource counter
4810 * @hw: pointer to the hardware structure
4811 * @type: type of resource
4812 * @alloc_shared: if set it is shared else dedicated
4813 * @num_items: number of entries requested for FD resource type
4814 * @counter_id: counter index returned by AQ call
4817 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4820 struct ice_aqc_alloc_free_res_elem *buf;
4821 enum ice_status status;
4824 /* Allocate resource */
4825 buf_len = sizeof(*buf);
4826 buf = (struct ice_aqc_alloc_free_res_elem *)
4827 ice_malloc(hw, buf_len);
4829 return ICE_ERR_NO_MEMORY;
4831 buf->num_elems = CPU_TO_LE16(num_items);
4832 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4833 ICE_AQC_RES_TYPE_M) | alloc_shared);
4835 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4836 ice_aqc_opc_alloc_res, NULL);
4840 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4848 * ice_free_res_cntr - free resource counter
4849 * @hw: pointer to the hardware structure
4850 * @type: type of resource
4851 * @alloc_shared: if set it is shared else dedicated
4852 * @num_items: number of entries to be freed for FD resource type
4853 * @counter_id: counter ID resource which needs to be freed
4856 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4859 struct ice_aqc_alloc_free_res_elem *buf;
4860 enum ice_status status;
4864 buf_len = sizeof(*buf);
4865 buf = (struct ice_aqc_alloc_free_res_elem *)
4866 ice_malloc(hw, buf_len);
4868 return ICE_ERR_NO_MEMORY;
4870 buf->num_elems = CPU_TO_LE16(num_items);
4871 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4872 ICE_AQC_RES_TYPE_M) | alloc_shared);
4873 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4875 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4876 ice_aqc_opc_free_res, NULL);
4878 ice_debug(hw, ICE_DBG_SW,
4879 "counter resource could not be freed\n");
4886 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4887 * @hw: pointer to the hardware structure
4888 * @counter_id: returns counter index
4890 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4892 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4893 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4898 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4899 * @hw: pointer to the hardware structure
4900 * @counter_id: counter index to be freed
4902 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4904 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4905 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4910 * ice_alloc_res_lg_act - add large action resource
4911 * @hw: pointer to the hardware structure
4912 * @l_id: large action ID to fill it in
4913 * @num_acts: number of actions to hold with a large action entry
4915 static enum ice_status
4916 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4918 struct ice_aqc_alloc_free_res_elem *sw_buf;
4919 enum ice_status status;
4922 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4923 return ICE_ERR_PARAM;
4925 /* Allocate resource for large action */
4926 buf_len = sizeof(*sw_buf);
4927 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4928 ice_malloc(hw, buf_len);
4930 return ICE_ERR_NO_MEMORY;
4932 sw_buf->num_elems = CPU_TO_LE16(1);
4934 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4935 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4936 * If num_acts is greater than 2, then use
4937 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4938 * The num_acts cannot exceed 4. This was ensured at the
4939 * beginning of the function.
4942 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4943 else if (num_acts == 2)
4944 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4946 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4948 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4949 ice_aqc_opc_alloc_res, NULL);
4951 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4953 ice_free(hw, sw_buf);
4958 * ice_add_mac_with_sw_marker - add filter with sw marker
4959 * @hw: pointer to the hardware structure
4960 * @f_info: filter info structure containing the MAC filter information
4961 * @sw_marker: sw marker to tag the Rx descriptor with
4964 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4967 struct ice_fltr_mgmt_list_entry *m_entry;
4968 struct ice_fltr_list_entry fl_info;
4969 struct ice_sw_recipe *recp_list;
4970 struct LIST_HEAD_TYPE l_head;
4971 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4972 enum ice_status ret;
4976 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4977 return ICE_ERR_PARAM;
4979 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4980 return ICE_ERR_PARAM;
4982 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4983 return ICE_ERR_PARAM;
4985 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4986 return ICE_ERR_PARAM;
4987 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4989 /* Add filter if it doesn't exist so then the adding of large
4990 * action always results in update
4993 INIT_LIST_HEAD(&l_head);
4994 fl_info.fltr_info = *f_info;
4995 LIST_ADD(&fl_info.list_entry, &l_head);
4997 entry_exists = false;
4998 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4999 hw->port_info->lport);
5000 if (ret == ICE_ERR_ALREADY_EXISTS)
5001 entry_exists = true;
5005 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5006 rule_lock = &recp_list->filt_rule_lock;
5007 ice_acquire_lock(rule_lock);
5008 /* Get the book keeping entry for the filter */
5009 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5013 /* If counter action was enabled for this rule then don't enable
5014 * sw marker large action
5016 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5017 ret = ICE_ERR_PARAM;
5021 /* if same marker was added before */
5022 if (m_entry->sw_marker_id == sw_marker) {
5023 ret = ICE_ERR_ALREADY_EXISTS;
5027 /* Allocate a hardware table entry to hold large act. Three actions
5028 * for marker based large action
5030 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5034 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5037 /* Update the switch rule to add the marker action */
5038 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5040 ice_release_lock(rule_lock);
5045 ice_release_lock(rule_lock);
5046 /* only remove entry if it did not exist previously */
5048 ret = ice_remove_mac(hw, &l_head);
5054 * ice_add_mac_with_counter - add filter with counter enabled
5055 * @hw: pointer to the hardware structure
5056 * @f_info: pointer to filter info structure containing the MAC filter
5060 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5062 struct ice_fltr_mgmt_list_entry *m_entry;
5063 struct ice_fltr_list_entry fl_info;
5064 struct ice_sw_recipe *recp_list;
5065 struct LIST_HEAD_TYPE l_head;
5066 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5067 enum ice_status ret;
5072 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5073 return ICE_ERR_PARAM;
5075 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5076 return ICE_ERR_PARAM;
5078 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5079 return ICE_ERR_PARAM;
5080 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5081 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5083 entry_exist = false;
5085 rule_lock = &recp_list->filt_rule_lock;
5087 /* Add filter if it doesn't exist so then the adding of large
5088 * action always results in update
5090 INIT_LIST_HEAD(&l_head);
5092 fl_info.fltr_info = *f_info;
5093 LIST_ADD(&fl_info.list_entry, &l_head);
5095 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5096 hw->port_info->lport);
5097 if (ret == ICE_ERR_ALREADY_EXISTS)
5102 ice_acquire_lock(rule_lock);
5103 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5105 ret = ICE_ERR_BAD_PTR;
5109 /* Don't enable counter for a filter for which sw marker was enabled */
5110 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5111 ret = ICE_ERR_PARAM;
5115 /* If a counter was already enabled then don't need to add again */
5116 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5117 ret = ICE_ERR_ALREADY_EXISTS;
5121 /* Allocate a hardware table entry to VLAN counter */
5122 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5126 /* Allocate a hardware table entry to hold large act. Two actions for
5127 * counter based large action
5129 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5133 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5136 /* Update the switch rule to add the counter action */
5137 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5139 ice_release_lock(rule_lock);
5144 ice_release_lock(rule_lock);
5145 /* only remove entry if it did not exist previously */
5147 ret = ice_remove_mac(hw, &l_head);
5152 /* This is mapping table entry that maps every word within a given protocol
5153 * structure to the real byte offset as per the specification of that
5155 * for example dst address is 3 words in ethertype header and corresponding
5156 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5157 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5158 * matching entry describing its field. This needs to be updated if new
5159 * structure is added to that union.
5161 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5162 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5163 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5164 { ICE_ETYPE_OL, { 0 } },
5165 { ICE_VLAN_OFOS, { 0, 2 } },
5166 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5167 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5168 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5169 26, 28, 30, 32, 34, 36, 38 } },
5170 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5171 26, 28, 30, 32, 34, 36, 38 } },
5172 { ICE_TCP_IL, { 0, 2 } },
5173 { ICE_UDP_OF, { 0, 2 } },
5174 { ICE_UDP_ILOS, { 0, 2 } },
5175 { ICE_SCTP_IL, { 0, 2 } },
5176 { ICE_VXLAN, { 8, 10, 12, 14 } },
5177 { ICE_GENEVE, { 8, 10, 12, 14 } },
5178 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5179 { ICE_NVGRE, { 0, 2, 4, 6 } },
5180 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5181 { ICE_PPPOE, { 0, 2, 4, 6 } },
5182 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5183 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5184 { ICE_ESP, { 0, 2, 4, 6 } },
5185 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5186 { ICE_NAT_T, { 8, 10, 12, 14 } },
5189 /* The following table describes preferred grouping of recipes.
5190 * If a recipe that needs to be programmed is a superset or matches one of the
5191 * following combinations, then the recipe needs to be chained as per the
5195 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5196 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5197 { ICE_MAC_IL, ICE_MAC_IL_HW },
5198 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5199 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5200 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5201 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5202 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5203 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5204 { ICE_TCP_IL, ICE_TCP_IL_HW },
5205 { ICE_UDP_OF, ICE_UDP_OF_HW },
5206 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5207 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5208 { ICE_VXLAN, ICE_UDP_OF_HW },
5209 { ICE_GENEVE, ICE_UDP_OF_HW },
5210 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5211 { ICE_NVGRE, ICE_GRE_OF_HW },
5212 { ICE_GTP, ICE_UDP_OF_HW },
5213 { ICE_PPPOE, ICE_PPPOE_HW },
5214 { ICE_PFCP, ICE_UDP_ILOS_HW },
5215 { ICE_L2TPV3, ICE_L2TPV3_HW },
5216 { ICE_ESP, ICE_ESP_HW },
5217 { ICE_AH, ICE_AH_HW },
5218 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5222 * ice_find_recp - find a recipe
5223 * @hw: pointer to the hardware structure
5224 * @lkup_exts: extension sequence to match
5226 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5228 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5229 enum ice_sw_tunnel_type tun_type)
5231 bool refresh_required = true;
5232 struct ice_sw_recipe *recp;
5235 /* Walk through existing recipes to find a match */
5236 recp = hw->switch_info->recp_list;
5237 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5238 /* If recipe was not created for this ID, in SW bookkeeping,
5239 * check if FW has an entry for this recipe. If the FW has an
5240 * entry update it in our SW bookkeeping and continue with the
5243 if (!recp[i].recp_created)
5244 if (ice_get_recp_frm_fw(hw,
5245 hw->switch_info->recp_list, i,
5249 /* Skip inverse action recipes */
5250 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5251 ICE_AQ_RECIPE_ACT_INV_ACT)
5254 /* if number of words we are looking for match */
5255 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5256 struct ice_fv_word *a = lkup_exts->fv_words;
5257 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
5258 u16 *c = recp[i].lkup_exts.field_mask;
5259 u16 *d = lkup_exts->field_mask;
5263 for (p = 0; p < lkup_exts->n_val_words; p++) {
5264 for (q = 0; q < recp[i].lkup_exts.n_val_words;
5266 if (a[p].off == b[q].off &&
5267 a[p].prot_id == b[q].prot_id &&
5269 /* Found the "p"th word in the
5274 /* After walking through all the words in the
5275 * "i"th recipe if "p"th word was not found then
5276 * this recipe is not what we are looking for.
5277 * So break out from this loop and try the next
5280 if (q >= recp[i].lkup_exts.n_val_words) {
5285 /* If for "i"th recipe the found was never set to false
5286 * then it means we found our match
5288 if (ice_is_prof_rule(tun_type) &&
5289 tun_type == recp[i].tun_type && found)
5290 return i; /* Return the recipe ID */
5291 else if (!ice_is_prof_rule(tun_type) && found)
5292 return i; /* Return the recipe ID */
5295 return ICE_MAX_NUM_RECIPES;
5299 * ice_prot_type_to_id - get protocol ID from protocol type
5300 * @type: protocol type
5301 * @id: pointer to variable that will receive the ID
5303 * Returns true if found, false otherwise
5305 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5309 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5310 if (ice_prot_id_tbl[i].type == type) {
5311 *id = ice_prot_id_tbl[i].protocol_id;
5318 * ice_find_valid_words - count valid words
5319 * @rule: advanced rule with lookup information
5320 * @lkup_exts: byte offset extractions of the words that are valid
5322 * calculate valid words in a lookup rule using mask value
5325 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5326 struct ice_prot_lkup_ext *lkup_exts)
5328 u8 j, word, prot_id, ret_val;
5330 if (!ice_prot_type_to_id(rule->type, &prot_id))
5333 word = lkup_exts->n_val_words;
5335 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5336 if (((u16 *)&rule->m_u)[j] &&
5337 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5338 /* No more space to accommodate */
5339 if (word >= ICE_MAX_CHAIN_WORDS)
5341 lkup_exts->fv_words[word].off =
5342 ice_prot_ext[rule->type].offs[j];
5343 lkup_exts->fv_words[word].prot_id =
5344 ice_prot_id_tbl[rule->type].protocol_id;
5345 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5349 ret_val = word - lkup_exts->n_val_words;
5350 lkup_exts->n_val_words = word;
5356 * ice_create_first_fit_recp_def - Create a recipe grouping
5357 * @hw: pointer to the hardware structure
5358 * @lkup_exts: an array of protocol header extractions
5359 * @rg_list: pointer to a list that stores new recipe groups
5360 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5362 * Using first fit algorithm, take all the words that are still not done
5363 * and start grouping them in 4-word groups. Each group makes up one
5366 static enum ice_status
5367 ice_create_first_fit_recp_def(struct ice_hw *hw,
5368 struct ice_prot_lkup_ext *lkup_exts,
5369 struct LIST_HEAD_TYPE *rg_list,
5372 struct ice_pref_recipe_group *grp = NULL;
5377 if (!lkup_exts->n_val_words) {
5378 struct ice_recp_grp_entry *entry;
5380 entry = (struct ice_recp_grp_entry *)
5381 ice_malloc(hw, sizeof(*entry));
5383 return ICE_ERR_NO_MEMORY;
5384 LIST_ADD(&entry->l_entry, rg_list);
5385 grp = &entry->r_group;
5387 grp->n_val_pairs = 0;
5390 /* Walk through every word in the rule to check if it is not done. If so
5391 * then this word needs to be part of a new recipe.
5393 for (j = 0; j < lkup_exts->n_val_words; j++)
5394 if (!ice_is_bit_set(lkup_exts->done, j)) {
5396 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5397 struct ice_recp_grp_entry *entry;
5399 entry = (struct ice_recp_grp_entry *)
5400 ice_malloc(hw, sizeof(*entry));
5402 return ICE_ERR_NO_MEMORY;
5403 LIST_ADD(&entry->l_entry, rg_list);
5404 grp = &entry->r_group;
5408 grp->pairs[grp->n_val_pairs].prot_id =
5409 lkup_exts->fv_words[j].prot_id;
5410 grp->pairs[grp->n_val_pairs].off =
5411 lkup_exts->fv_words[j].off;
5412 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5420 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5421 * @hw: pointer to the hardware structure
5422 * @fv_list: field vector with the extraction sequence information
5423 * @rg_list: recipe groupings with protocol-offset pairs
5425 * Helper function to fill in the field vector indices for protocol-offset
5426 * pairs. These indexes are then ultimately programmed into a recipe.
5428 static enum ice_status
5429 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5430 struct LIST_HEAD_TYPE *rg_list)
5432 struct ice_sw_fv_list_entry *fv;
5433 struct ice_recp_grp_entry *rg;
5434 struct ice_fv_word *fv_ext;
5436 if (LIST_EMPTY(fv_list))
5439 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5440 fv_ext = fv->fv_ptr->ew;
5442 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5445 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5446 struct ice_fv_word *pr;
5451 pr = &rg->r_group.pairs[i];
5452 mask = rg->r_group.mask[i];
5454 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5455 if (fv_ext[j].prot_id == pr->prot_id &&
5456 fv_ext[j].off == pr->off) {
5459 /* Store index of field vector */
5461 /* Mask is given by caller as big
5462 * endian, but sent to FW as little
5465 rg->fv_mask[i] = mask << 8 | mask >> 8;
5469 /* Protocol/offset could not be found, caller gave an
5473 return ICE_ERR_PARAM;
5481 * ice_find_free_recp_res_idx - find free result indexes for recipe
5482 * @hw: pointer to hardware structure
5483 * @profiles: bitmap of profiles that will be associated with the new recipe
5484 * @free_idx: pointer to variable to receive the free index bitmap
5486 * The algorithm used here is:
5487 * 1. When creating a new recipe, create a set P which contains all
5488 * Profiles that will be associated with our new recipe
5490 * 2. For each Profile p in set P:
5491 * a. Add all recipes associated with Profile p into set R
5492 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5493 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5494 * i. Or just assume they all have the same possible indexes:
5496 * i.e., PossibleIndexes = 0x0000F00000000000
5498 * 3. For each Recipe r in set R:
5499 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5500 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5502 * FreeIndexes will contain the bits indicating the indexes free for use,
5503 * then the code needs to update the recipe[r].used_result_idx_bits to
5504 * indicate which indexes were selected for use by this recipe.
5507 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5508 ice_bitmap_t *free_idx)
5510 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5511 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5512 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5516 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5517 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5518 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5519 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5521 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5522 ice_set_bit(count, possible_idx);
5524 /* For each profile we are going to associate the recipe with, add the
5525 * recipes that are associated with that profile. This will give us
5526 * the set of recipes that our recipe may collide with. Also, determine
5527 * what possible result indexes are usable given this set of profiles.
5530 while (ICE_MAX_NUM_PROFILES >
5531 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5532 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5533 ICE_MAX_NUM_RECIPES);
5534 ice_and_bitmap(possible_idx, possible_idx,
5535 hw->switch_info->prof_res_bm[bit],
5540 /* For each recipe that our new recipe may collide with, determine
5541 * which indexes have been used.
5543 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5544 if (ice_is_bit_set(recipes, bit)) {
5545 ice_or_bitmap(used_idx, used_idx,
5546 hw->switch_info->recp_list[bit].res_idxs,
5550 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5552 /* return number of free indexes */
5555 while (ICE_MAX_FV_WORDS >
5556 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5565 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5566 * @hw: pointer to hardware structure
5567 * @rm: recipe management list entry
5568 * @match_tun: if field vector index for tunnel needs to be programmed
5569 * @profiles: bitmap of profiles that will be assocated.
5571 static enum ice_status
5572 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5573 bool match_tun, ice_bitmap_t *profiles)
5575 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5576 struct ice_aqc_recipe_data_elem *tmp;
5577 struct ice_aqc_recipe_data_elem *buf;
5578 struct ice_recp_grp_entry *entry;
5579 enum ice_status status;
5585 /* When more than one recipe are required, another recipe is needed to
5586 * chain them together. Matching a tunnel metadata ID takes up one of
5587 * the match fields in the chaining recipe reducing the number of
5588 * chained recipes by one.
5590 /* check number of free result indices */
5591 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5592 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5594 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5595 free_res_idx, rm->n_grp_count);
5597 if (rm->n_grp_count > 1) {
5598 if (rm->n_grp_count > free_res_idx)
5599 return ICE_ERR_MAX_LIMIT;
5604 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5605 return ICE_ERR_MAX_LIMIT;
5607 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5608 ICE_MAX_NUM_RECIPES,
5611 return ICE_ERR_NO_MEMORY;
5613 buf = (struct ice_aqc_recipe_data_elem *)
5614 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5616 status = ICE_ERR_NO_MEMORY;
5620 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5621 recipe_count = ICE_MAX_NUM_RECIPES;
5622 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5624 if (status || recipe_count == 0)
5627 /* Allocate the recipe resources, and configure them according to the
5628 * match fields from protocol headers and extracted field vectors.
5630 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5631 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5634 status = ice_alloc_recipe(hw, &entry->rid);
5638 /* Clear the result index of the located recipe, as this will be
5639 * updated, if needed, later in the recipe creation process.
5641 tmp[0].content.result_indx = 0;
5643 buf[recps] = tmp[0];
5644 buf[recps].recipe_indx = (u8)entry->rid;
5645 /* if the recipe is a non-root recipe RID should be programmed
5646 * as 0 for the rules to be applied correctly.
5648 buf[recps].content.rid = 0;
5649 ice_memset(&buf[recps].content.lkup_indx, 0,
5650 sizeof(buf[recps].content.lkup_indx),
5653 /* All recipes use look-up index 0 to match switch ID. */
5654 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5655 buf[recps].content.mask[0] =
5656 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5657 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5660 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5661 buf[recps].content.lkup_indx[i] = 0x80;
5662 buf[recps].content.mask[i] = 0;
5665 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5666 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5667 buf[recps].content.mask[i + 1] =
5668 CPU_TO_LE16(entry->fv_mask[i]);
5671 if (rm->n_grp_count > 1) {
5672 /* Checks to see if there really is a valid result index
5675 if (chain_idx >= ICE_MAX_FV_WORDS) {
5676 ice_debug(hw, ICE_DBG_SW,
5677 "No chain index available\n");
5678 status = ICE_ERR_MAX_LIMIT;
5682 entry->chain_idx = chain_idx;
5683 buf[recps].content.result_indx =
5684 ICE_AQ_RECIPE_RESULT_EN |
5685 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5686 ICE_AQ_RECIPE_RESULT_DATA_M);
5687 ice_clear_bit(chain_idx, result_idx_bm);
5688 chain_idx = ice_find_first_bit(result_idx_bm,
5692 /* fill recipe dependencies */
5693 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5694 ICE_MAX_NUM_RECIPES);
5695 ice_set_bit(buf[recps].recipe_indx,
5696 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5697 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5701 if (rm->n_grp_count == 1) {
5702 rm->root_rid = buf[0].recipe_indx;
5703 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5704 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5705 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5706 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5707 sizeof(buf[0].recipe_bitmap),
5708 ICE_NONDMA_TO_NONDMA);
5710 status = ICE_ERR_BAD_PTR;
5713 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5714 * the recipe which is getting created if specified
5715 * by user. Usually any advanced switch filter, which results
5716 * into new extraction sequence, ended up creating a new recipe
5717 * of type ROOT and usually recipes are associated with profiles
5718 * Switch rule referreing newly created recipe, needs to have
5719 * either/or 'fwd' or 'join' priority, otherwise switch rule
5720 * evaluation will not happen correctly. In other words, if
5721 * switch rule to be evaluated on priority basis, then recipe
5722 * needs to have priority, otherwise it will be evaluated last.
5724 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5726 struct ice_recp_grp_entry *last_chain_entry;
5729 /* Allocate the last recipe that will chain the outcomes of the
5730 * other recipes together
5732 status = ice_alloc_recipe(hw, &rid);
5736 buf[recps].recipe_indx = (u8)rid;
5737 buf[recps].content.rid = (u8)rid;
5738 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5739 /* the new entry created should also be part of rg_list to
5740 * make sure we have complete recipe
5742 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5743 sizeof(*last_chain_entry));
5744 if (!last_chain_entry) {
5745 status = ICE_ERR_NO_MEMORY;
5748 last_chain_entry->rid = rid;
5749 ice_memset(&buf[recps].content.lkup_indx, 0,
5750 sizeof(buf[recps].content.lkup_indx),
5752 /* All recipes use look-up index 0 to match switch ID. */
5753 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5754 buf[recps].content.mask[0] =
5755 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5756 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5757 buf[recps].content.lkup_indx[i] =
5758 ICE_AQ_RECIPE_LKUP_IGNORE;
5759 buf[recps].content.mask[i] = 0;
5763 /* update r_bitmap with the recp that is used for chaining */
5764 ice_set_bit(rid, rm->r_bitmap);
5765 /* this is the recipe that chains all the other recipes so it
5766 * should not have a chaining ID to indicate the same
5768 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5769 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5771 last_chain_entry->fv_idx[i] = entry->chain_idx;
5772 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5773 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5774 ice_set_bit(entry->rid, rm->r_bitmap);
5776 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5777 if (sizeof(buf[recps].recipe_bitmap) >=
5778 sizeof(rm->r_bitmap)) {
5779 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5780 sizeof(buf[recps].recipe_bitmap),
5781 ICE_NONDMA_TO_NONDMA);
5783 status = ICE_ERR_BAD_PTR;
5786 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5788 /* To differentiate among different UDP tunnels, a meta data ID
5792 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5793 buf[recps].content.mask[i] =
5794 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5798 rm->root_rid = (u8)rid;
5800 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5804 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5805 ice_release_change_lock(hw);
5809 /* Every recipe that just got created add it to the recipe
5812 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5813 struct ice_switch_info *sw = hw->switch_info;
5814 bool is_root, idx_found = false;
5815 struct ice_sw_recipe *recp;
5816 u16 idx, buf_idx = 0;
5818 /* find buffer index for copying some data */
5819 for (idx = 0; idx < rm->n_grp_count; idx++)
5820 if (buf[idx].recipe_indx == entry->rid) {
5826 status = ICE_ERR_OUT_OF_RANGE;
5830 recp = &sw->recp_list[entry->rid];
5831 is_root = (rm->root_rid == entry->rid);
5832 recp->is_root = is_root;
5834 recp->root_rid = entry->rid;
5835 recp->big_recp = (is_root && rm->n_grp_count > 1);
5837 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5838 entry->r_group.n_val_pairs *
5839 sizeof(struct ice_fv_word),
5840 ICE_NONDMA_TO_NONDMA);
5842 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5843 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5845 /* Copy non-result fv index values and masks to recipe. This
5846 * call will also update the result recipe bitmask.
5848 ice_collect_result_idx(&buf[buf_idx], recp);
5850 /* for non-root recipes, also copy to the root, this allows
5851 * easier matching of a complete chained recipe
5854 ice_collect_result_idx(&buf[buf_idx],
5855 &sw->recp_list[rm->root_rid]);
5857 recp->n_ext_words = entry->r_group.n_val_pairs;
5858 recp->chain_idx = entry->chain_idx;
5859 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5860 recp->n_grp_count = rm->n_grp_count;
5861 recp->tun_type = rm->tun_type;
5862 recp->recp_created = true;
5877 * ice_create_recipe_group - creates recipe group
5878 * @hw: pointer to hardware structure
5879 * @rm: recipe management list entry
5880 * @lkup_exts: lookup elements
5882 static enum ice_status
5883 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5884 struct ice_prot_lkup_ext *lkup_exts)
5886 enum ice_status status;
5889 rm->n_grp_count = 0;
5891 /* Create recipes for words that are marked not done by packing them
5894 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5895 &rm->rg_list, &recp_count);
5897 rm->n_grp_count += recp_count;
5898 rm->n_ext_words = lkup_exts->n_val_words;
5899 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5900 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5901 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5902 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5909 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5910 * @hw: pointer to hardware structure
5911 * @lkups: lookup elements or match criteria for the advanced recipe, one
5912 * structure per protocol header
5913 * @lkups_cnt: number of protocols
5914 * @bm: bitmap of field vectors to consider
5915 * @fv_list: pointer to a list that holds the returned field vectors
5917 static enum ice_status
5918 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5919 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5921 enum ice_status status;
5928 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5930 return ICE_ERR_NO_MEMORY;
5932 for (i = 0; i < lkups_cnt; i++)
5933 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5934 status = ICE_ERR_CFG;
5938 /* Find field vectors that include all specified protocol types */
5939 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5942 ice_free(hw, prot_ids);
5947 * ice_add_special_words - Add words that are not protocols, such as metadata
5948 * @rinfo: other information regarding the rule e.g. priority and action info
5949 * @lkup_exts: lookup word structure
5951 static enum ice_status
5952 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5953 struct ice_prot_lkup_ext *lkup_exts)
5955 /* If this is a tunneled packet, then add recipe index to match the
5956 * tunnel bit in the packet metadata flags.
5958 if (rinfo->tun_type != ICE_NON_TUN) {
5959 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5960 u8 word = lkup_exts->n_val_words++;
5962 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5963 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5965 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5967 return ICE_ERR_MAX_LIMIT;
5974 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5975 * @hw: pointer to hardware structure
5976 * @rinfo: other information regarding the rule e.g. priority and action info
5977 * @bm: pointer to memory for returning the bitmap of field vectors
5980 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5983 enum ice_prof_type prof_type;
5985 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
5987 switch (rinfo->tun_type) {
5989 prof_type = ICE_PROF_NON_TUN;
5991 case ICE_ALL_TUNNELS:
5992 prof_type = ICE_PROF_TUN_ALL;
5994 case ICE_SW_TUN_VXLAN_GPE:
5995 case ICE_SW_TUN_GENEVE:
5996 case ICE_SW_TUN_VXLAN:
5997 case ICE_SW_TUN_UDP:
5998 case ICE_SW_TUN_GTP:
5999 prof_type = ICE_PROF_TUN_UDP;
6001 case ICE_SW_TUN_NVGRE:
6002 prof_type = ICE_PROF_TUN_GRE;
6004 case ICE_SW_TUN_PPPOE:
6005 prof_type = ICE_PROF_TUN_PPPOE;
6007 case ICE_SW_TUN_PROFID_IPV6_ESP:
6008 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6010 case ICE_SW_TUN_PROFID_IPV6_AH:
6011 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6013 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6014 case ICE_SW_TUN_IPV6_L2TPV3:
6015 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6017 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6018 case ICE_SW_TUN_IPV6_NAT_T:
6019 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6021 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6022 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6024 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6025 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6027 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6028 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6030 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6031 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6033 case ICE_SW_TUN_IPV4_NAT_T:
6034 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6036 case ICE_SW_TUN_IPV4_L2TPV3:
6037 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6039 case ICE_SW_TUN_AND_NON_TUN:
6041 prof_type = ICE_PROF_ALL;
6045 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6049 * ice_is_prof_rule - determine if rule type is a profile rule
6050 * @type: the rule type
6052 * if the rule type is a profile rule, that means that there no field value
6053 * match required, in this case just a profile hit is required.
6055 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6058 case ICE_SW_TUN_PROFID_IPV6_ESP:
6059 case ICE_SW_TUN_PROFID_IPV6_AH:
6060 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6061 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6062 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6063 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6064 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6065 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6075 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6076 * @hw: pointer to hardware structure
6077 * @lkups: lookup elements or match criteria for the advanced recipe, one
6078 * structure per protocol header
6079 * @lkups_cnt: number of protocols
6080 * @rinfo: other information regarding the rule e.g. priority and action info
6081 * @rid: return the recipe ID of the recipe created
6083 static enum ice_status
6084 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6085 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6087 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6088 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6089 struct ice_prot_lkup_ext *lkup_exts;
6090 struct ice_recp_grp_entry *r_entry;
6091 struct ice_sw_fv_list_entry *fvit;
6092 struct ice_recp_grp_entry *r_tmp;
6093 struct ice_sw_fv_list_entry *tmp;
6094 enum ice_status status = ICE_SUCCESS;
6095 struct ice_sw_recipe *rm;
6096 bool match_tun = false;
6099 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6100 return ICE_ERR_PARAM;
6102 lkup_exts = (struct ice_prot_lkup_ext *)
6103 ice_malloc(hw, sizeof(*lkup_exts));
6105 return ICE_ERR_NO_MEMORY;
6107 /* Determine the number of words to be matched and if it exceeds a
6108 * recipe's restrictions
6110 for (i = 0; i < lkups_cnt; i++) {
6113 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6114 status = ICE_ERR_CFG;
6115 goto err_free_lkup_exts;
6118 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6120 status = ICE_ERR_CFG;
6121 goto err_free_lkup_exts;
6125 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6127 status = ICE_ERR_NO_MEMORY;
6128 goto err_free_lkup_exts;
6131 /* Get field vectors that contain fields extracted from all the protocol
6132 * headers being programmed.
6134 INIT_LIST_HEAD(&rm->fv_list);
6135 INIT_LIST_HEAD(&rm->rg_list);
6137 /* Get bitmap of field vectors (profiles) that are compatible with the
6138 * rule request; only these will be searched in the subsequent call to
6141 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6143 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6147 /* Group match words into recipes using preferred recipe grouping
6150 status = ice_create_recipe_group(hw, rm, lkup_exts);
6154 /* There is only profile for UDP tunnels. So, it is necessary to use a
6155 * metadata ID flag to differentiate different tunnel types. A separate
6156 * recipe needs to be used for the metadata.
6158 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
6159 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
6160 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
6163 /* set the recipe priority if specified */
6164 rm->priority = (u8)rinfo->priority;
6166 /* Find offsets from the field vector. Pick the first one for all the
6169 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6173 /* An empty FV list means to use all the profiles returned in the
6176 if (LIST_EMPTY(&rm->fv_list)) {
6179 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6180 if (ice_is_bit_set(fv_bitmap, j)) {
6181 struct ice_sw_fv_list_entry *fvl;
6183 fvl = (struct ice_sw_fv_list_entry *)
6184 ice_malloc(hw, sizeof(*fvl));
6188 fvl->profile_id = j;
6189 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6193 /* get bitmap of all profiles the recipe will be associated with */
6194 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6195 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6197 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6198 ice_set_bit((u16)fvit->profile_id, profiles);
6201 /* Create any special protocol/offset pairs, such as looking at tunnel
6202 * bits by extracting metadata
6204 status = ice_add_special_words(rinfo, lkup_exts);
6206 goto err_free_lkup_exts;
6208 /* Look for a recipe which matches our requested fv / mask list */
6209 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6210 if (*rid < ICE_MAX_NUM_RECIPES)
6211 /* Success if found a recipe that match the existing criteria */
6214 rm->tun_type = rinfo->tun_type;
6215 /* Recipe we need does not exist, add a recipe */
6216 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
6220 /* Associate all the recipes created with all the profiles in the
6221 * common field vector.
6223 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6225 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6228 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6229 (u8 *)r_bitmap, NULL);
6233 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6234 ICE_MAX_NUM_RECIPES);
6235 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6239 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6242 ice_release_change_lock(hw);
6247 /* Update profile to recipe bitmap array */
6248 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6249 ICE_MAX_NUM_RECIPES);
6251 /* Update recipe to profile bitmap array */
6252 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6253 if (ice_is_bit_set(r_bitmap, j))
6254 ice_set_bit((u16)fvit->profile_id,
6255 recipe_to_profile[j]);
6258 *rid = rm->root_rid;
6259 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6260 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6262 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6263 ice_recp_grp_entry, l_entry) {
6264 LIST_DEL(&r_entry->l_entry);
6265 ice_free(hw, r_entry);
6268 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6270 LIST_DEL(&fvit->list_entry);
6275 ice_free(hw, rm->root_buf);
6280 ice_free(hw, lkup_exts);
6286 * ice_find_dummy_packet - find dummy packet by tunnel type
6288 * @lkups: lookup elements or match criteria for the advanced recipe, one
6289 * structure per protocol header
6290 * @lkups_cnt: number of protocols
6291 * @tun_type: tunnel type from the match criteria
6292 * @pkt: dummy packet to fill according to filter match criteria
6293 * @pkt_len: packet length of dummy packet
6294 * @offsets: pointer to receive the pointer to the offsets for the packet
6297 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6298 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6300 const struct ice_dummy_pkt_offsets **offsets)
6302 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6306 for (i = 0; i < lkups_cnt; i++) {
6307 if (lkups[i].type == ICE_UDP_ILOS)
6309 else if (lkups[i].type == ICE_TCP_IL)
6311 else if (lkups[i].type == ICE_IPV6_OFOS)
6313 else if (lkups[i].type == ICE_VLAN_OFOS)
6315 else if (lkups[i].type == ICE_IPV4_OFOS &&
6316 lkups[i].h_u.ipv4_hdr.protocol ==
6317 ICE_IPV4_NVGRE_PROTO_ID &&
6318 lkups[i].m_u.ipv4_hdr.protocol ==
6321 else if (lkups[i].type == ICE_PPPOE &&
6322 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6323 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6324 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6327 else if (lkups[i].type == ICE_ETYPE_OL &&
6328 lkups[i].h_u.ethertype.ethtype_id ==
6329 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6330 lkups[i].m_u.ethertype.ethtype_id ==
6335 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6336 *pkt = dummy_ipv4_esp_pkt;
6337 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6338 *offsets = dummy_ipv4_esp_packet_offsets;
6342 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6343 *pkt = dummy_ipv6_esp_pkt;
6344 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6345 *offsets = dummy_ipv6_esp_packet_offsets;
6349 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6350 *pkt = dummy_ipv4_ah_pkt;
6351 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6352 *offsets = dummy_ipv4_ah_packet_offsets;
6356 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6357 *pkt = dummy_ipv6_ah_pkt;
6358 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6359 *offsets = dummy_ipv6_ah_packet_offsets;
6363 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6364 *pkt = dummy_ipv4_nat_pkt;
6365 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6366 *offsets = dummy_ipv4_nat_packet_offsets;
6370 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6371 *pkt = dummy_ipv6_nat_pkt;
6372 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6373 *offsets = dummy_ipv6_nat_packet_offsets;
6377 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6378 *pkt = dummy_ipv4_l2tpv3_pkt;
6379 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6380 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6384 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6385 *pkt = dummy_ipv6_l2tpv3_pkt;
6386 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6387 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6391 if (tun_type == ICE_SW_TUN_GTP) {
6392 *pkt = dummy_udp_gtp_packet;
6393 *pkt_len = sizeof(dummy_udp_gtp_packet);
6394 *offsets = dummy_udp_gtp_packet_offsets;
6397 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6398 *pkt = dummy_pppoe_ipv6_packet;
6399 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6400 *offsets = dummy_pppoe_packet_offsets;
6402 } else if (tun_type == ICE_SW_TUN_PPPOE) {
6403 *pkt = dummy_pppoe_ipv4_packet;
6404 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6405 *offsets = dummy_pppoe_packet_offsets;
6409 if (tun_type == ICE_ALL_TUNNELS) {
6410 *pkt = dummy_gre_udp_packet;
6411 *pkt_len = sizeof(dummy_gre_udp_packet);
6412 *offsets = dummy_gre_udp_packet_offsets;
6416 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6418 *pkt = dummy_gre_tcp_packet;
6419 *pkt_len = sizeof(dummy_gre_tcp_packet);
6420 *offsets = dummy_gre_tcp_packet_offsets;
6424 *pkt = dummy_gre_udp_packet;
6425 *pkt_len = sizeof(dummy_gre_udp_packet);
6426 *offsets = dummy_gre_udp_packet_offsets;
6430 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6431 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
6433 *pkt = dummy_udp_tun_tcp_packet;
6434 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6435 *offsets = dummy_udp_tun_tcp_packet_offsets;
6439 *pkt = dummy_udp_tun_udp_packet;
6440 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6441 *offsets = dummy_udp_tun_udp_packet_offsets;
6447 *pkt = dummy_vlan_udp_packet;
6448 *pkt_len = sizeof(dummy_vlan_udp_packet);
6449 *offsets = dummy_vlan_udp_packet_offsets;
6452 *pkt = dummy_udp_packet;
6453 *pkt_len = sizeof(dummy_udp_packet);
6454 *offsets = dummy_udp_packet_offsets;
6456 } else if (udp && ipv6) {
6458 *pkt = dummy_vlan_udp_ipv6_packet;
6459 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6460 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6463 *pkt = dummy_udp_ipv6_packet;
6464 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6465 *offsets = dummy_udp_ipv6_packet_offsets;
6467 } else if ((tcp && ipv6) || ipv6) {
6469 *pkt = dummy_vlan_tcp_ipv6_packet;
6470 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6471 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6474 *pkt = dummy_tcp_ipv6_packet;
6475 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6476 *offsets = dummy_tcp_ipv6_packet_offsets;
6481 *pkt = dummy_vlan_tcp_packet;
6482 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6483 *offsets = dummy_vlan_tcp_packet_offsets;
6485 *pkt = dummy_tcp_packet;
6486 *pkt_len = sizeof(dummy_tcp_packet);
6487 *offsets = dummy_tcp_packet_offsets;
6492 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6494 * @lkups: lookup elements or match criteria for the advanced recipe, one
6495 * structure per protocol header
6496 * @lkups_cnt: number of protocols
6497 * @s_rule: stores rule information from the match criteria
6498 * @dummy_pkt: dummy packet to fill according to filter match criteria
6499 * @pkt_len: packet length of dummy packet
6500 * @offsets: offset info for the dummy packet
6502 static enum ice_status
6503 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6504 struct ice_aqc_sw_rules_elem *s_rule,
6505 const u8 *dummy_pkt, u16 pkt_len,
6506 const struct ice_dummy_pkt_offsets *offsets)
6511 /* Start with a packet with a pre-defined/dummy content. Then, fill
6512 * in the header values to be looked up or matched.
6514 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6516 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6518 for (i = 0; i < lkups_cnt; i++) {
6519 enum ice_protocol_type type;
6520 u16 offset = 0, len = 0, j;
6523 /* find the start of this layer; it should be found since this
6524 * was already checked when search for the dummy packet
6526 type = lkups[i].type;
6527 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6528 if (type == offsets[j].type) {
6529 offset = offsets[j].offset;
6534 /* this should never happen in a correct calling sequence */
6536 return ICE_ERR_PARAM;
6538 switch (lkups[i].type) {
6541 len = sizeof(struct ice_ether_hdr);
6544 len = sizeof(struct ice_ethtype_hdr);
6547 len = sizeof(struct ice_vlan_hdr);
6551 len = sizeof(struct ice_ipv4_hdr);
6555 len = sizeof(struct ice_ipv6_hdr);
6560 len = sizeof(struct ice_l4_hdr);
6563 len = sizeof(struct ice_sctp_hdr);
6566 len = sizeof(struct ice_nvgre);
6571 len = sizeof(struct ice_udp_tnl_hdr);
6575 len = sizeof(struct ice_udp_gtp_hdr);
6578 len = sizeof(struct ice_pppoe_hdr);
6581 len = sizeof(struct ice_esp_hdr);
6584 len = sizeof(struct ice_nat_t_hdr);
6587 len = sizeof(struct ice_ah_hdr);
6590 len = sizeof(struct ice_l2tpv3_sess_hdr);
6593 return ICE_ERR_PARAM;
6596 /* the length should be a word multiple */
6597 if (len % ICE_BYTES_PER_WORD)
6600 /* We have the offset to the header start, the length, the
6601 * caller's header values and mask. Use this information to
6602 * copy the data into the dummy packet appropriately based on
6603 * the mask. Note that we need to only write the bits as
6604 * indicated by the mask to make sure we don't improperly write
6605 * over any significant packet data.
6607 for (j = 0; j < len / sizeof(u16); j++)
6608 if (((u16 *)&lkups[i].m_u)[j])
6609 ((u16 *)(pkt + offset))[j] =
6610 (((u16 *)(pkt + offset))[j] &
6611 ~((u16 *)&lkups[i].m_u)[j]) |
6612 (((u16 *)&lkups[i].h_u)[j] &
6613 ((u16 *)&lkups[i].m_u)[j]);
6616 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6622 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6623 * @hw: pointer to the hardware structure
6624 * @tun_type: tunnel type
6625 * @pkt: dummy packet to fill in
6626 * @offsets: offset info for the dummy packet
6628 static enum ice_status
6629 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6630 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6635 case ICE_SW_TUN_AND_NON_TUN:
6636 case ICE_SW_TUN_VXLAN_GPE:
6637 case ICE_SW_TUN_VXLAN:
6638 case ICE_SW_TUN_UDP:
6639 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6643 case ICE_SW_TUN_GENEVE:
6644 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6649 /* Nothing needs to be done for this tunnel type */
6653 /* Find the outer UDP protocol header and insert the port number */
6654 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6655 if (offsets[i].type == ICE_UDP_OF) {
6656 struct ice_l4_hdr *hdr;
6659 offset = offsets[i].offset;
6660 hdr = (struct ice_l4_hdr *)&pkt[offset];
6661 hdr->dst_port = CPU_TO_BE16(open_port);
6671 * ice_find_adv_rule_entry - Search a rule entry
6672 * @hw: pointer to the hardware structure
6673 * @lkups: lookup elements or match criteria for the advanced recipe, one
6674 * structure per protocol header
6675 * @lkups_cnt: number of protocols
6676 * @recp_id: recipe ID for which we are finding the rule
6677 * @rinfo: other information regarding the rule e.g. priority and action info
6679 * Helper function to search for a given advance rule entry
6680 * Returns pointer to entry storing the rule if found
6682 static struct ice_adv_fltr_mgmt_list_entry *
6683 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6684 u16 lkups_cnt, u16 recp_id,
6685 struct ice_adv_rule_info *rinfo)
6687 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6688 struct ice_switch_info *sw = hw->switch_info;
6691 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6692 ice_adv_fltr_mgmt_list_entry, list_entry) {
6693 bool lkups_matched = true;
6695 if (lkups_cnt != list_itr->lkups_cnt)
6697 for (i = 0; i < list_itr->lkups_cnt; i++)
6698 if (memcmp(&list_itr->lkups[i], &lkups[i],
6700 lkups_matched = false;
6703 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6704 rinfo->tun_type == list_itr->rule_info.tun_type &&
6712 * ice_adv_add_update_vsi_list
6713 * @hw: pointer to the hardware structure
6714 * @m_entry: pointer to current adv filter management list entry
6715 * @cur_fltr: filter information from the book keeping entry
6716 * @new_fltr: filter information with the new VSI to be added
6718 * Call AQ command to add or update previously created VSI list with new VSI.
6720 * Helper function to do book keeping associated with adding filter information
6721 * The algorithm to do the booking keeping is described below :
6722 * When a VSI needs to subscribe to a given advanced filter
6723 * if only one VSI has been added till now
6724 * Allocate a new VSI list and add two VSIs
6725 * to this list using switch rule command
6726 * Update the previously created switch rule with the
6727 * newly created VSI list ID
6728 * if a VSI list was previously created
6729 * Add the new VSI to the previously created VSI list set
6730 * using the update switch rule command
6732 static enum ice_status
6733 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6734 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6735 struct ice_adv_rule_info *cur_fltr,
6736 struct ice_adv_rule_info *new_fltr)
6738 enum ice_status status;
6739 u16 vsi_list_id = 0;
6741 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6742 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6743 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6744 return ICE_ERR_NOT_IMPL;
6746 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6747 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6748 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6749 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6750 return ICE_ERR_NOT_IMPL;
6752 /* Workaround fix for unexpected rule deletion by kernel PF
6755 if (new_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI &&
6756 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI)
6757 return ICE_ERR_NOT_IMPL;
6759 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6760 /* Only one entry existed in the mapping and it was not already
6761 * a part of a VSI list. So, create a VSI list with the old and
6764 struct ice_fltr_info tmp_fltr;
6765 u16 vsi_handle_arr[2];
6767 /* A rule already exists with the new VSI being added */
6768 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6769 new_fltr->sw_act.fwd_id.hw_vsi_id)
6770 return ICE_ERR_ALREADY_EXISTS;
6772 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6773 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6774 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6780 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6781 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
6782 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6783 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6784 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6785 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6787 /* Update the previous switch rule of "forward to VSI" to
6790 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6794 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6795 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6796 m_entry->vsi_list_info =
6797 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6800 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6802 if (!m_entry->vsi_list_info)
6805 /* A rule already exists with the new VSI being added */
6806 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6809 /* Update the previously created VSI list set with
6810 * the new VSI ID passed in
6812 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6814 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6816 ice_aqc_opc_update_sw_rules,
6818 /* update VSI list mapping info with new VSI ID */
6820 ice_set_bit(vsi_handle,
6821 m_entry->vsi_list_info->vsi_map);
6824 m_entry->vsi_count++;
6829 * ice_add_adv_rule - helper function to create an advanced switch rule
6830 * @hw: pointer to the hardware structure
6831 * @lkups: information on the words that needs to be looked up. All words
6832 * together makes one recipe
6833 * @lkups_cnt: num of entries in the lkups array
6834 * @rinfo: other information related to the rule that needs to be programmed
6835 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6836 * ignored is case of error.
6838 * This function can program only 1 rule at a time. The lkups is used to
6839 * describe the all the words that forms the "lookup" portion of the recipe.
6840 * These words can span multiple protocols. Callers to this function need to
6841 * pass in a list of protocol headers with lookup information along and mask
6842 * that determines which words are valid from the given protocol header.
6843 * rinfo describes other information related to this rule such as forwarding
6844 * IDs, priority of this rule, etc.
6847 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6848 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6849 struct ice_rule_query_data *added_entry)
6851 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6852 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6853 const struct ice_dummy_pkt_offsets *pkt_offsets;
6854 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6855 struct LIST_HEAD_TYPE *rule_head;
6856 struct ice_switch_info *sw;
6857 enum ice_status status;
6858 const u8 *pkt = NULL;
6864 /* Initialize profile to result index bitmap */
6865 if (!hw->switch_info->prof_res_bm_init) {
6866 hw->switch_info->prof_res_bm_init = 1;
6867 ice_init_prof_result_bm(hw);
6870 prof_rule = ice_is_prof_rule(rinfo->tun_type);
6871 if (!prof_rule && !lkups_cnt)
6872 return ICE_ERR_PARAM;
6874 /* get # of words we need to match */
6876 for (i = 0; i < lkups_cnt; i++) {
6879 ptr = (u16 *)&lkups[i].m_u;
6880 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6886 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6887 return ICE_ERR_PARAM;
6889 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6890 return ICE_ERR_PARAM;
6893 /* make sure that we can locate a dummy packet */
6894 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6897 status = ICE_ERR_PARAM;
6898 goto err_ice_add_adv_rule;
6901 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6902 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6903 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6904 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6907 vsi_handle = rinfo->sw_act.vsi_handle;
6908 if (!ice_is_vsi_valid(hw, vsi_handle))
6909 return ICE_ERR_PARAM;
6911 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6912 rinfo->sw_act.fwd_id.hw_vsi_id =
6913 ice_get_hw_vsi_num(hw, vsi_handle);
6914 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6915 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6917 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6920 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6922 /* we have to add VSI to VSI_LIST and increment vsi_count.
6923 * Also Update VSI list so that we can change forwarding rule
6924 * if the rule already exists, we will check if it exists with
6925 * same vsi_id, if not then add it to the VSI list if it already
6926 * exists if not then create a VSI list and add the existing VSI
6927 * ID and the new VSI ID to the list
6928 * We will add that VSI to the list
6930 status = ice_adv_add_update_vsi_list(hw, m_entry,
6931 &m_entry->rule_info,
6934 added_entry->rid = rid;
6935 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6936 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6940 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6941 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6943 return ICE_ERR_NO_MEMORY;
6944 act |= ICE_SINGLE_ACT_LAN_ENABLE;
6945 switch (rinfo->sw_act.fltr_act) {
6946 case ICE_FWD_TO_VSI:
6947 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6948 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6949 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6952 act |= ICE_SINGLE_ACT_TO_Q;
6953 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6954 ICE_SINGLE_ACT_Q_INDEX_M;
6956 case ICE_FWD_TO_QGRP:
6957 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6958 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6959 act |= ICE_SINGLE_ACT_TO_Q;
6960 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6961 ICE_SINGLE_ACT_Q_INDEX_M;
6962 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6963 ICE_SINGLE_ACT_Q_REGION_M;
6965 case ICE_DROP_PACKET:
6966 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6967 ICE_SINGLE_ACT_VALID_BIT;
6970 status = ICE_ERR_CFG;
6971 goto err_ice_add_adv_rule;
6974 /* set the rule LOOKUP type based on caller specified 'RX'
6975 * instead of hardcoding it to be either LOOKUP_TX/RX
6977 * for 'RX' set the source to be the port number
6978 * for 'TX' set the source to be the source HW VSI number (determined
6982 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6983 s_rule->pdata.lkup_tx_rx.src =
6984 CPU_TO_LE16(hw->port_info->lport);
6986 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6987 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6990 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6991 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6993 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6994 pkt_len, pkt_offsets);
6996 goto err_ice_add_adv_rule;
6998 if (rinfo->tun_type != ICE_NON_TUN &&
6999 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7000 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7001 s_rule->pdata.lkup_tx_rx.hdr,
7004 goto err_ice_add_adv_rule;
7007 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7008 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7011 goto err_ice_add_adv_rule;
7012 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7013 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7015 status = ICE_ERR_NO_MEMORY;
7016 goto err_ice_add_adv_rule;
7019 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7020 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7021 ICE_NONDMA_TO_NONDMA);
7022 if (!adv_fltr->lkups && !prof_rule) {
7023 status = ICE_ERR_NO_MEMORY;
7024 goto err_ice_add_adv_rule;
7027 adv_fltr->lkups_cnt = lkups_cnt;
7028 adv_fltr->rule_info = *rinfo;
7029 adv_fltr->rule_info.fltr_rule_id =
7030 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7031 sw = hw->switch_info;
7032 sw->recp_list[rid].adv_rule = true;
7033 rule_head = &sw->recp_list[rid].filt_rules;
7035 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7036 adv_fltr->vsi_count = 1;
7038 /* Add rule entry to book keeping list */
7039 LIST_ADD(&adv_fltr->list_entry, rule_head);
7041 added_entry->rid = rid;
7042 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7043 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7045 err_ice_add_adv_rule:
7046 if (status && adv_fltr) {
7047 ice_free(hw, adv_fltr->lkups);
7048 ice_free(hw, adv_fltr);
7051 ice_free(hw, s_rule);
7057 * ice_adv_rem_update_vsi_list
7058 * @hw: pointer to the hardware structure
7059 * @vsi_handle: VSI handle of the VSI to remove
7060 * @fm_list: filter management entry for which the VSI list management needs to
7063 static enum ice_status
7064 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7065 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7067 struct ice_vsi_list_map_info *vsi_list_info;
7068 enum ice_sw_lkup_type lkup_type;
7069 enum ice_status status;
7072 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7073 fm_list->vsi_count == 0)
7074 return ICE_ERR_PARAM;
7076 /* A rule with the VSI being removed does not exist */
7077 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7078 return ICE_ERR_DOES_NOT_EXIST;
7080 lkup_type = ICE_SW_LKUP_LAST;
7081 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7082 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7083 ice_aqc_opc_update_sw_rules,
7088 fm_list->vsi_count--;
7089 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7090 vsi_list_info = fm_list->vsi_list_info;
7091 if (fm_list->vsi_count == 1) {
7092 struct ice_fltr_info tmp_fltr;
7095 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7097 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7098 return ICE_ERR_OUT_OF_RANGE;
7100 /* Make sure VSI list is empty before removing it below */
7101 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7103 ice_aqc_opc_update_sw_rules,
7108 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7109 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7110 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7111 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7112 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7113 tmp_fltr.fwd_id.hw_vsi_id =
7114 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7115 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7116 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7118 /* Update the previous switch rule of "MAC forward to VSI" to
7119 * "MAC fwd to VSI list"
7121 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7123 ice_debug(hw, ICE_DBG_SW,
7124 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7125 tmp_fltr.fwd_id.hw_vsi_id, status);
7129 /* Remove the VSI list since it is no longer used */
7130 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7132 ice_debug(hw, ICE_DBG_SW,
7133 "Failed to remove VSI list %d, error %d\n",
7134 vsi_list_id, status);
7138 LIST_DEL(&vsi_list_info->list_entry);
7139 ice_free(hw, vsi_list_info);
7140 fm_list->vsi_list_info = NULL;
7147 * ice_rem_adv_rule - removes existing advanced switch rule
7148 * @hw: pointer to the hardware structure
7149 * @lkups: information on the words that needs to be looked up. All words
7150 * together makes one recipe
7151 * @lkups_cnt: num of entries in the lkups array
7152 * @rinfo: Its the pointer to the rule information for the rule
7154 * This function can be used to remove 1 rule at a time. The lkups is
7155 * used to describe all the words that forms the "lookup" portion of the
7156 * rule. These words can span multiple protocols. Callers to this function
7157 * need to pass in a list of protocol headers with lookup information along
7158 * and mask that determines which words are valid from the given protocol
7159 * header. rinfo describes other information related to this rule such as
7160 * forwarding IDs, priority of this rule, etc.
7163 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7164 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7166 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7167 struct ice_prot_lkup_ext lkup_exts;
7168 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7169 enum ice_status status = ICE_SUCCESS;
7170 bool remove_rule = false;
7171 u16 i, rid, vsi_handle;
7173 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7174 for (i = 0; i < lkups_cnt; i++) {
7177 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7180 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7185 /* Create any special protocol/offset pairs, such as looking at tunnel
7186 * bits by extracting metadata
7188 status = ice_add_special_words(rinfo, &lkup_exts);
7192 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7193 /* If did not find a recipe that match the existing criteria */
7194 if (rid == ICE_MAX_NUM_RECIPES)
7195 return ICE_ERR_PARAM;
7197 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7198 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7199 /* the rule is already removed */
7202 ice_acquire_lock(rule_lock);
7203 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7205 } else if (list_elem->vsi_count > 1) {
7206 list_elem->vsi_list_info->ref_cnt--;
7207 remove_rule = false;
7208 vsi_handle = rinfo->sw_act.vsi_handle;
7209 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7211 vsi_handle = rinfo->sw_act.vsi_handle;
7212 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7214 ice_release_lock(rule_lock);
7217 if (list_elem->vsi_count == 0)
7220 ice_release_lock(rule_lock);
7222 struct ice_aqc_sw_rules_elem *s_rule;
7225 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7227 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7230 return ICE_ERR_NO_MEMORY;
7231 s_rule->pdata.lkup_tx_rx.act = 0;
7232 s_rule->pdata.lkup_tx_rx.index =
7233 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7234 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7235 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7237 ice_aqc_opc_remove_sw_rules, NULL);
7238 if (status == ICE_SUCCESS) {
7239 ice_acquire_lock(rule_lock);
7240 LIST_DEL(&list_elem->list_entry);
7241 ice_free(hw, list_elem->lkups);
7242 ice_free(hw, list_elem);
7243 ice_release_lock(rule_lock);
7245 ice_free(hw, s_rule);
7251 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7252 * @hw: pointer to the hardware structure
7253 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7255 * This function is used to remove 1 rule at a time. The removal is based on
7256 * the remove_entry parameter. This function will remove rule for a given
7257 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7260 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7261 struct ice_rule_query_data *remove_entry)
7263 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7264 struct LIST_HEAD_TYPE *list_head;
7265 struct ice_adv_rule_info rinfo;
7266 struct ice_switch_info *sw;
7268 sw = hw->switch_info;
7269 if (!sw->recp_list[remove_entry->rid].recp_created)
7270 return ICE_ERR_PARAM;
7271 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7272 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7274 if (list_itr->rule_info.fltr_rule_id ==
7275 remove_entry->rule_id) {
7276 rinfo = list_itr->rule_info;
7277 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7278 return ice_rem_adv_rule(hw, list_itr->lkups,
7279 list_itr->lkups_cnt, &rinfo);
7282 return ICE_ERR_PARAM;
7286 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7288 * @hw: pointer to the hardware structure
7289 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7291 * This function is used to remove all the rules for a given VSI and as soon
7292 * as removing a rule fails, it will return immediately with the error code,
7293 * else it will return ICE_SUCCESS
7296 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7298 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7299 struct ice_vsi_list_map_info *map_info;
7300 struct LIST_HEAD_TYPE *list_head;
7301 struct ice_adv_rule_info rinfo;
7302 struct ice_switch_info *sw;
7303 enum ice_status status;
7304 u16 vsi_list_id = 0;
7307 sw = hw->switch_info;
7308 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7309 if (!sw->recp_list[rid].recp_created)
7311 if (!sw->recp_list[rid].adv_rule)
7313 list_head = &sw->recp_list[rid].filt_rules;
7315 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7316 ice_adv_fltr_mgmt_list_entry, list_entry) {
7317 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7322 rinfo = list_itr->rule_info;
7323 rinfo.sw_act.vsi_handle = vsi_handle;
7324 status = ice_rem_adv_rule(hw, list_itr->lkups,
7325 list_itr->lkups_cnt, &rinfo);
7335 * ice_replay_fltr - Replay all the filters stored by a specific list head
7336 * @hw: pointer to the hardware structure
7337 * @list_head: list for which filters needs to be replayed
7338 * @recp_id: Recipe ID for which rules need to be replayed
7340 static enum ice_status
7341 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7343 struct ice_fltr_mgmt_list_entry *itr;
7344 enum ice_status status = ICE_SUCCESS;
7345 struct ice_sw_recipe *recp_list;
7346 u8 lport = hw->port_info->lport;
7347 struct LIST_HEAD_TYPE l_head;
7349 if (LIST_EMPTY(list_head))
7352 recp_list = &hw->switch_info->recp_list[recp_id];
7353 /* Move entries from the given list_head to a temporary l_head so that
7354 * they can be replayed. Otherwise when trying to re-add the same
7355 * filter, the function will return already exists
7357 LIST_REPLACE_INIT(list_head, &l_head);
7359 /* Mark the given list_head empty by reinitializing it so filters
7360 * could be added again by *handler
7362 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7364 struct ice_fltr_list_entry f_entry;
7366 f_entry.fltr_info = itr->fltr_info;
7367 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7368 status = ice_add_rule_internal(hw, recp_list, lport,
7370 if (status != ICE_SUCCESS)
7375 /* Add a filter per VSI separately */
7380 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7382 if (!ice_is_vsi_valid(hw, vsi_handle))
7385 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7386 f_entry.fltr_info.vsi_handle = vsi_handle;
7387 f_entry.fltr_info.fwd_id.hw_vsi_id =
7388 ice_get_hw_vsi_num(hw, vsi_handle);
7389 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7390 if (recp_id == ICE_SW_LKUP_VLAN)
7391 status = ice_add_vlan_internal(hw, recp_list,
7394 status = ice_add_rule_internal(hw, recp_list,
7397 if (status != ICE_SUCCESS)
7402 /* Clear the filter management list */
7403 ice_rem_sw_rule_info(hw, &l_head);
7408 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7409 * @hw: pointer to the hardware structure
7411 * NOTE: This function does not clean up partially added filters on error.
7412 * It is up to caller of the function to issue a reset or fail early.
7414 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7416 struct ice_switch_info *sw = hw->switch_info;
7417 enum ice_status status = ICE_SUCCESS;
7420 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7421 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7423 status = ice_replay_fltr(hw, i, head);
7424 if (status != ICE_SUCCESS)
7431 * ice_replay_vsi_fltr - Replay filters for requested VSI
7432 * @hw: pointer to the hardware structure
7433 * @vsi_handle: driver VSI handle
7434 * @recp_id: Recipe ID for which rules need to be replayed
7435 * @list_head: list for which filters need to be replayed
7437 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7438 * It is required to pass valid VSI handle.
7440 static enum ice_status
7441 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
7442 struct LIST_HEAD_TYPE *list_head)
7444 struct ice_fltr_mgmt_list_entry *itr;
7445 enum ice_status status = ICE_SUCCESS;
7446 struct ice_sw_recipe *recp_list;
7449 if (LIST_EMPTY(list_head))
7451 recp_list = &hw->switch_info->recp_list[recp_id];
7452 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7454 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7456 struct ice_fltr_list_entry f_entry;
7458 f_entry.fltr_info = itr->fltr_info;
7459 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7460 itr->fltr_info.vsi_handle == vsi_handle) {
7461 /* update the src in case it is VSI num */
7462 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7463 f_entry.fltr_info.src = hw_vsi_id;
7464 status = ice_add_rule_internal(hw, recp_list,
7465 hw->port_info->lport,
7467 if (status != ICE_SUCCESS)
7471 if (!itr->vsi_list_info ||
7472 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7474 /* Clearing it so that the logic can add it back */
7475 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7476 f_entry.fltr_info.vsi_handle = vsi_handle;
7477 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7478 /* update the src in case it is VSI num */
7479 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7480 f_entry.fltr_info.src = hw_vsi_id;
7481 if (recp_id == ICE_SW_LKUP_VLAN)
7482 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7484 status = ice_add_rule_internal(hw, recp_list,
7485 hw->port_info->lport,
7487 if (status != ICE_SUCCESS)
7495 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7496 * @hw: pointer to the hardware structure
7497 * @vsi_handle: driver VSI handle
7498 * @list_head: list for which filters need to be replayed
7500 * Replay the advanced rule for the given VSI.
7502 static enum ice_status
7503 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7504 struct LIST_HEAD_TYPE *list_head)
7506 struct ice_rule_query_data added_entry = { 0 };
7507 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7508 enum ice_status status = ICE_SUCCESS;
7510 if (LIST_EMPTY(list_head))
7512 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7514 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7515 u16 lk_cnt = adv_fltr->lkups_cnt;
7517 if (vsi_handle != rinfo->sw_act.vsi_handle)
7519 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7528 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7529 * @hw: pointer to the hardware structure
7530 * @vsi_handle: driver VSI handle
7532 * Replays filters for requested VSI via vsi_handle.
7534 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7536 struct ice_switch_info *sw = hw->switch_info;
7537 enum ice_status status;
7540 /* Update the recipes that were created */
7541 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7542 struct LIST_HEAD_TYPE *head;
7544 head = &sw->recp_list[i].filt_replay_rules;
7545 if (!sw->recp_list[i].adv_rule)
7546 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7548 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7549 if (status != ICE_SUCCESS)
7557 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7558 * @hw: pointer to the HW struct
7560 * Deletes the filter replay rules.
7562 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7564 struct ice_switch_info *sw = hw->switch_info;
7570 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7571 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7572 struct LIST_HEAD_TYPE *l_head;
7574 l_head = &sw->recp_list[i].filt_replay_rules;
7575 if (!sw->recp_list[i].adv_rule)
7576 ice_rem_sw_rule_info(hw, l_head);
7578 ice_rem_adv_rule_info(hw, l_head);