1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x10, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
463 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, /* 2 bytes for 4 byte alignment */
468 /* C-tag (802.1Q): IPv6 + UDP */
469 static const struct ice_dummy_pkt_offsets
470 dummy_vlan_udp_ipv6_packet_offsets[] = {
472 { ICE_ETYPE_OL, 12 },
473 { ICE_VLAN_OFOS, 14 },
474 { ICE_IPV6_OFOS, 18 },
475 { ICE_UDP_ILOS, 58 },
476 { ICE_PROTOCOL_LAST, 0 },
479 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
480 static const u8 dummy_vlan_udp_ipv6_packet[] = {
481 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
485 0x81, 0x00, /* ICE_ETYPE_OL 12 */
487 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
490 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
501 0x00, 0x08, 0x00, 0x00,
503 0x00, 0x00, /* 2 bytes for 4 byte alignment */
506 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508 { ICE_IPV4_OFOS, 14 },
511 { ICE_PROTOCOL_LAST, 0 },
514 static const u8 dummy_udp_gtp_packet[] = {
515 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
520 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x11, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
526 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
527 0x00, 0x1c, 0x00, 0x00,
529 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
530 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x00, 0x85,
533 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
534 0x00, 0x00, 0x00, 0x00,
537 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
539 { ICE_ETYPE_OL, 12 },
540 { ICE_VLAN_OFOS, 14},
542 { ICE_PROTOCOL_LAST, 0 },
545 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
547 { ICE_ETYPE_OL, 12 },
548 { ICE_VLAN_OFOS, 14},
550 { ICE_IPV4_OFOS, 26 },
551 { ICE_PROTOCOL_LAST, 0 },
554 static const u8 dummy_pppoe_ipv4_packet[] = {
555 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
556 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
559 0x81, 0x00, /* ICE_ETYPE_OL 12 */
561 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
563 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
566 0x00, 0x21, /* PPP Link Layer 24 */
568 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
578 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
580 { ICE_ETYPE_OL, 12 },
581 { ICE_VLAN_OFOS, 14},
583 { ICE_IPV4_OFOS, 26 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
593 0x81, 0x00, /* ICE_ETYPE_OL 12 */
595 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
597 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
600 0x00, 0x21, /* PPP Link Layer 24 */
602 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
603 0x00, 0x01, 0x00, 0x00,
604 0x00, 0x06, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
611 0x50, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
618 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
620 { ICE_ETYPE_OL, 12 },
621 { ICE_VLAN_OFOS, 14},
623 { ICE_IPV4_OFOS, 26 },
624 { ICE_UDP_ILOS, 46 },
625 { ICE_PROTOCOL_LAST, 0 },
628 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
629 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
630 0x00, 0x00, 0x00, 0x00,
631 0x00, 0x00, 0x00, 0x00,
633 0x81, 0x00, /* ICE_ETYPE_OL 12 */
635 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
637 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
640 0x00, 0x21, /* PPP Link Layer 24 */
642 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
643 0x00, 0x01, 0x00, 0x00,
644 0x00, 0x11, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
649 0x00, 0x08, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
654 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
656 { ICE_ETYPE_OL, 12 },
657 { ICE_VLAN_OFOS, 14},
659 { ICE_IPV6_OFOS, 26 },
660 { ICE_PROTOCOL_LAST, 0 },
663 static const u8 dummy_pppoe_ipv6_packet[] = {
664 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
665 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00,
668 0x81, 0x00, /* ICE_ETYPE_OL 12 */
670 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
672 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
675 0x00, 0x57, /* PPP Link Layer 24 */
677 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
678 0x00, 0x00, 0x3b, 0x00,
679 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
685 0x00, 0x00, 0x00, 0x00,
686 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
692 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
694 { ICE_ETYPE_OL, 12 },
695 { ICE_VLAN_OFOS, 14},
697 { ICE_IPV6_OFOS, 26 },
699 { ICE_PROTOCOL_LAST, 0 },
702 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
703 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
704 0x00, 0x00, 0x00, 0x00,
705 0x00, 0x00, 0x00, 0x00,
707 0x81, 0x00, /* ICE_ETYPE_OL 12 */
709 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
711 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
714 0x00, 0x57, /* PPP Link Layer 24 */
716 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
717 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
728 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, 0x00, 0x00,
730 0x50, 0x00, 0x00, 0x00,
731 0x00, 0x00, 0x00, 0x00,
733 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
737 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
739 { ICE_ETYPE_OL, 12 },
740 { ICE_VLAN_OFOS, 14},
742 { ICE_IPV6_OFOS, 26 },
743 { ICE_UDP_ILOS, 66 },
744 { ICE_PROTOCOL_LAST, 0 },
747 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
748 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x81, 0x00, /* ICE_ETYPE_OL 12 */
754 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
756 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
759 0x00, 0x57, /* PPP Link Layer 24 */
761 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
762 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
763 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
773 0x00, 0x08, 0x00, 0x00,
775 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
778 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
780 { ICE_IPV4_OFOS, 14 },
782 { ICE_PROTOCOL_LAST, 0 },
785 static const u8 dummy_ipv4_esp_pkt[] = {
786 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
787 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x00, 0x00, 0x00,
791 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
792 0x00, 0x00, 0x40, 0x00,
793 0x40, 0x32, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
798 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
802 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
804 { ICE_IPV6_OFOS, 14 },
806 { ICE_PROTOCOL_LAST, 0 },
809 static const u8 dummy_ipv6_esp_pkt[] = {
810 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
815 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
816 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
831 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
833 { ICE_IPV4_OFOS, 14 },
835 { ICE_PROTOCOL_LAST, 0 },
838 static const u8 dummy_ipv4_ah_pkt[] = {
839 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
840 0x00, 0x00, 0x00, 0x00,
841 0x00, 0x00, 0x00, 0x00,
844 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
845 0x00, 0x00, 0x40, 0x00,
846 0x40, 0x33, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
851 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
856 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
858 { ICE_IPV6_OFOS, 14 },
860 { ICE_PROTOCOL_LAST, 0 },
863 static const u8 dummy_ipv6_ah_pkt[] = {
864 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
865 0x00, 0x00, 0x00, 0x00,
866 0x00, 0x00, 0x00, 0x00,
869 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
870 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
871 0x00, 0x00, 0x00, 0x00,
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
886 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
888 { ICE_IPV4_OFOS, 14 },
889 { ICE_UDP_ILOS, 34 },
891 { ICE_PROTOCOL_LAST, 0 },
894 static const u8 dummy_ipv4_nat_pkt[] = {
895 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
896 0x00, 0x00, 0x00, 0x00,
897 0x00, 0x00, 0x00, 0x00,
900 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
901 0x00, 0x00, 0x40, 0x00,
902 0x40, 0x11, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
914 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
916 { ICE_IPV6_OFOS, 14 },
917 { ICE_UDP_ILOS, 54 },
919 { ICE_PROTOCOL_LAST, 0 },
922 static const u8 dummy_ipv6_nat_pkt[] = {
923 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
928 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
929 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x00, 0x00,
936 0x00, 0x00, 0x00, 0x00,
937 0x00, 0x00, 0x00, 0x00,
939 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
940 0x00, 0x00, 0x00, 0x00,
942 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
948 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
950 { ICE_IPV4_OFOS, 14 },
952 { ICE_PROTOCOL_LAST, 0 },
955 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
956 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
957 0x00, 0x00, 0x00, 0x00,
958 0x00, 0x00, 0x00, 0x00,
961 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
962 0x00, 0x00, 0x40, 0x00,
963 0x40, 0x73, 0x00, 0x00,
964 0x00, 0x00, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
968 0x00, 0x00, 0x00, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
973 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
975 { ICE_IPV6_OFOS, 14 },
977 { ICE_PROTOCOL_LAST, 0 },
980 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
981 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
982 0x00, 0x00, 0x00, 0x00,
983 0x00, 0x00, 0x00, 0x00,
986 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
987 0x00, 0x0c, 0x73, 0x40,
988 0x00, 0x00, 0x00, 0x00,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
997 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
998 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1003 /* this is a recipe to profile association bitmap */
1004 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1005 ICE_MAX_NUM_PROFILES);
1007 /* this is a profile to recipe association bitmap */
1008 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1009 ICE_MAX_NUM_RECIPES);
1011 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1014 * ice_collect_result_idx - copy result index values
1015 * @buf: buffer that contains the result index
1016 * @recp: the recipe struct to copy data into
1018 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1019 struct ice_sw_recipe *recp)
1021 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1022 ice_set_bit(buf->content.result_indx &
1023 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1027 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1028 * @rid: recipe ID that we are populating
1030 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
1032 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1033 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1034 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1035 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1036 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1037 u16 i, j, profile_num = 0;
1038 bool non_tun_valid = false;
1039 bool pppoe_valid = false;
1040 bool vxlan_valid = false;
1041 bool gre_valid = false;
1042 bool gtp_valid = false;
1043 bool flag_valid = false;
1045 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1046 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1051 for (i = 0; i < 12; i++) {
1052 if (gre_profile[i] == j)
1056 for (i = 0; i < 12; i++) {
1057 if (vxlan_profile[i] == j)
1061 for (i = 0; i < 7; i++) {
1062 if (pppoe_profile[i] == j)
1066 for (i = 0; i < 6; i++) {
1067 if (non_tun_profile[i] == j)
1068 non_tun_valid = true;
1071 if (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1072 j <= ICE_PROFID_IPV6_GTPU_IPV6_OTHER)
1075 if (j >= ICE_PROFID_IPV4_ESP &&
1076 j <= ICE_PROFID_IPV6_PFCP_SESSION)
1080 if (!non_tun_valid && vxlan_valid)
1081 tun_type = ICE_SW_TUN_VXLAN;
1082 else if (!non_tun_valid && gre_valid)
1083 tun_type = ICE_SW_TUN_NVGRE;
1084 else if (!non_tun_valid && pppoe_valid)
1085 tun_type = ICE_SW_TUN_PPPOE;
1086 else if (!non_tun_valid && gtp_valid)
1087 tun_type = ICE_SW_TUN_GTP;
1088 else if ((non_tun_valid && vxlan_valid) ||
1089 (non_tun_valid && gre_valid) ||
1090 (non_tun_valid && gtp_valid) ||
1091 (non_tun_valid && pppoe_valid))
1092 tun_type = ICE_SW_TUN_AND_NON_TUN;
1093 else if ((non_tun_valid && !vxlan_valid) ||
1094 (non_tun_valid && !gre_valid) ||
1095 (non_tun_valid && !gtp_valid) ||
1096 (non_tun_valid && !pppoe_valid))
1097 tun_type = ICE_NON_TUN;
1099 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1100 i = ice_is_bit_set(recipe_to_profile[rid],
1101 ICE_PROFID_PPPOE_IPV4_OTHER);
1102 j = ice_is_bit_set(recipe_to_profile[rid],
1103 ICE_PROFID_PPPOE_IPV6_OTHER);
1105 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1107 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1110 if (profile_num == 1 && (flag_valid || non_tun_valid)) {
1111 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1112 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1114 case ICE_PROFID_IPV4_TCP:
1115 tun_type = ICE_SW_IPV4_TCP;
1117 case ICE_PROFID_IPV4_UDP:
1118 tun_type = ICE_SW_IPV4_UDP;
1120 case ICE_PROFID_IPV6_TCP:
1121 tun_type = ICE_SW_IPV6_TCP;
1123 case ICE_PROFID_IPV6_UDP:
1124 tun_type = ICE_SW_IPV6_UDP;
1126 case ICE_PROFID_PPPOE_PAY:
1127 tun_type = ICE_SW_TUN_PPPOE_PAY;
1129 case ICE_PROFID_PPPOE_IPV4_TCP:
1130 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1132 case ICE_PROFID_PPPOE_IPV4_UDP:
1133 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1135 case ICE_PROFID_PPPOE_IPV4_OTHER:
1136 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1138 case ICE_PROFID_PPPOE_IPV6_TCP:
1139 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1141 case ICE_PROFID_PPPOE_IPV6_UDP:
1142 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1144 case ICE_PROFID_PPPOE_IPV6_OTHER:
1145 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1147 case ICE_PROFID_IPV4_ESP:
1148 tun_type = ICE_SW_TUN_IPV4_ESP;
1150 case ICE_PROFID_IPV6_ESP:
1151 tun_type = ICE_SW_TUN_IPV6_ESP;
1153 case ICE_PROFID_IPV4_AH:
1154 tun_type = ICE_SW_TUN_IPV4_AH;
1156 case ICE_PROFID_IPV6_AH:
1157 tun_type = ICE_SW_TUN_IPV6_AH;
1159 case ICE_PROFID_IPV4_NAT_T:
1160 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1162 case ICE_PROFID_IPV6_NAT_T:
1163 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1165 case ICE_PROFID_IPV4_PFCP_NODE:
1167 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1169 case ICE_PROFID_IPV6_PFCP_NODE:
1171 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1173 case ICE_PROFID_IPV4_PFCP_SESSION:
1175 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1177 case ICE_PROFID_IPV6_PFCP_SESSION:
1179 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1181 case ICE_PROFID_MAC_IPV4_L2TPV3:
1182 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1184 case ICE_PROFID_MAC_IPV6_L2TPV3:
1185 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1200 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1201 * @hw: pointer to hardware structure
1202 * @recps: struct that we need to populate
1203 * @rid: recipe ID that we are populating
1204 * @refresh_required: true if we should get recipe to profile mapping from FW
1206 * This function is used to populate all the necessary entries into our
1207 * bookkeeping so that we have a current list of all the recipes that are
1208 * programmed in the firmware.
1210 static enum ice_status
1211 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1212 bool *refresh_required)
1214 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1215 struct ice_aqc_recipe_data_elem *tmp;
1216 u16 num_recps = ICE_MAX_NUM_RECIPES;
1217 struct ice_prot_lkup_ext *lkup_exts;
1218 enum ice_status status;
1222 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1224 /* we need a buffer big enough to accommodate all the recipes */
1225 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1226 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1228 return ICE_ERR_NO_MEMORY;
1230 tmp[0].recipe_indx = rid;
1231 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1232 /* non-zero status meaning recipe doesn't exist */
1236 /* Get recipe to profile map so that we can get the fv from lkups that
1237 * we read for a recipe from FW. Since we want to minimize the number of
1238 * times we make this FW call, just make one call and cache the copy
1239 * until a new recipe is added. This operation is only required the
1240 * first time to get the changes from FW. Then to search existing
1241 * entries we don't need to update the cache again until another recipe
1244 if (*refresh_required) {
1245 ice_get_recp_to_prof_map(hw);
1246 *refresh_required = false;
1249 /* Start populating all the entries for recps[rid] based on lkups from
1250 * firmware. Note that we are only creating the root recipe in our
1253 lkup_exts = &recps[rid].lkup_exts;
1255 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1256 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1257 struct ice_recp_grp_entry *rg_entry;
1258 u8 i, prof, idx, prot = 0;
1262 rg_entry = (struct ice_recp_grp_entry *)
1263 ice_malloc(hw, sizeof(*rg_entry));
1265 status = ICE_ERR_NO_MEMORY;
1269 idx = root_bufs.recipe_indx;
1270 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1272 /* Mark all result indices in this chain */
1273 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1274 ice_set_bit(root_bufs.content.result_indx &
1275 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1277 /* get the first profile that is associated with rid */
1278 prof = ice_find_first_bit(recipe_to_profile[idx],
1279 ICE_MAX_NUM_PROFILES);
1280 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1281 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1283 rg_entry->fv_idx[i] = lkup_indx;
1284 rg_entry->fv_mask[i] =
1285 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1287 /* If the recipe is a chained recipe then all its
1288 * child recipe's result will have a result index.
1289 * To fill fv_words we should not use those result
1290 * index, we only need the protocol ids and offsets.
1291 * We will skip all the fv_idx which stores result
1292 * index in them. We also need to skip any fv_idx which
1293 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1294 * valid offset value.
1296 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1297 rg_entry->fv_idx[i]) ||
1298 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1299 rg_entry->fv_idx[i] == 0)
1302 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1303 rg_entry->fv_idx[i], &prot, &off);
1304 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1305 lkup_exts->fv_words[fv_word_idx].off = off;
1306 lkup_exts->field_mask[fv_word_idx] =
1307 rg_entry->fv_mask[i];
1310 /* populate rg_list with the data from the child entry of this
1313 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1315 /* Propagate some data to the recipe database */
1316 recps[idx].is_root = !!is_root;
1317 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1318 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1319 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1320 recps[idx].chain_idx = root_bufs.content.result_indx &
1321 ~ICE_AQ_RECIPE_RESULT_EN;
1322 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1324 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1330 /* Only do the following for root recipes entries */
1331 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1332 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1333 recps[idx].root_rid = root_bufs.content.rid &
1334 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1335 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1338 /* Complete initialization of the root recipe entry */
1339 lkup_exts->n_val_words = fv_word_idx;
1340 recps[rid].big_recp = (num_recps > 1);
1341 recps[rid].n_grp_count = (u8)num_recps;
1342 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
1343 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1344 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1345 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1346 if (!recps[rid].root_buf)
1349 /* Copy result indexes */
1350 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1351 recps[rid].recp_created = true;
1359 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1360 * @hw: pointer to hardware structure
1362 * This function is used to populate recipe_to_profile matrix where index to
1363 * this array is the recipe ID and the element is the mapping of which profiles
1364 * is this recipe mapped to.
1366 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1368 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1371 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1374 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1375 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1376 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1378 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1379 ICE_MAX_NUM_RECIPES);
1380 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1381 if (ice_is_bit_set(r_bitmap, j))
1382 ice_set_bit(i, recipe_to_profile[j]);
1387 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1388 * @hw: pointer to the HW struct
1389 * @recp_list: pointer to sw recipe list
1391 * Allocate memory for the entire recipe table and initialize the structures/
1392 * entries corresponding to basic recipes.
1395 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1397 struct ice_sw_recipe *recps;
1400 recps = (struct ice_sw_recipe *)
1401 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1403 return ICE_ERR_NO_MEMORY;
1405 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1406 recps[i].root_rid = i;
1407 INIT_LIST_HEAD(&recps[i].filt_rules);
1408 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1409 INIT_LIST_HEAD(&recps[i].rg_list);
1410 ice_init_lock(&recps[i].filt_rule_lock);
1419 * ice_aq_get_sw_cfg - get switch configuration
1420 * @hw: pointer to the hardware structure
1421 * @buf: pointer to the result buffer
1422 * @buf_size: length of the buffer available for response
1423 * @req_desc: pointer to requested descriptor
1424 * @num_elems: pointer to number of elements
1425 * @cd: pointer to command details structure or NULL
1427 * Get switch configuration (0x0200) to be placed in 'buff'.
1428 * This admin command returns information such as initial VSI/port number
1429 * and switch ID it belongs to.
1431 * NOTE: *req_desc is both an input/output parameter.
1432 * The caller of this function first calls this function with *request_desc set
1433 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1434 * configuration information has been returned; if non-zero (meaning not all
1435 * the information was returned), the caller should call this function again
1436 * with *req_desc set to the previous value returned by f/w to get the
1437 * next block of switch configuration information.
1439 * *num_elems is output only parameter. This reflects the number of elements
1440 * in response buffer. The caller of this function to use *num_elems while
1441 * parsing the response buffer.
1443 static enum ice_status
1444 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1445 u16 buf_size, u16 *req_desc, u16 *num_elems,
1446 struct ice_sq_cd *cd)
1448 struct ice_aqc_get_sw_cfg *cmd;
1449 enum ice_status status;
1450 struct ice_aq_desc desc;
1452 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1453 cmd = &desc.params.get_sw_conf;
1454 cmd->element = CPU_TO_LE16(*req_desc);
1456 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1458 *req_desc = LE16_TO_CPU(cmd->element);
1459 *num_elems = LE16_TO_CPU(cmd->num_elems);
1466 * ice_alloc_sw - allocate resources specific to switch
1467 * @hw: pointer to the HW struct
1468 * @ena_stats: true to turn on VEB stats
1469 * @shared_res: true for shared resource, false for dedicated resource
1470 * @sw_id: switch ID returned
1471 * @counter_id: VEB counter ID returned
1473 * allocates switch resources (SWID and VEB counter) (0x0208)
1476 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1479 struct ice_aqc_alloc_free_res_elem *sw_buf;
1480 struct ice_aqc_res_elem *sw_ele;
1481 enum ice_status status;
1484 buf_len = sizeof(*sw_buf);
1485 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1486 ice_malloc(hw, buf_len);
1488 return ICE_ERR_NO_MEMORY;
1490 /* Prepare buffer for switch ID.
1491 * The number of resource entries in buffer is passed as 1 since only a
1492 * single switch/VEB instance is allocated, and hence a single sw_id
1495 sw_buf->num_elems = CPU_TO_LE16(1);
1497 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1498 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1499 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1501 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1502 ice_aqc_opc_alloc_res, NULL);
1505 goto ice_alloc_sw_exit;
1507 sw_ele = &sw_buf->elem[0];
1508 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1511 /* Prepare buffer for VEB Counter */
1512 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1513 struct ice_aqc_alloc_free_res_elem *counter_buf;
1514 struct ice_aqc_res_elem *counter_ele;
1516 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1517 ice_malloc(hw, buf_len);
1519 status = ICE_ERR_NO_MEMORY;
1520 goto ice_alloc_sw_exit;
1523 /* The number of resource entries in buffer is passed as 1 since
1524 * only a single switch/VEB instance is allocated, and hence a
1525 * single VEB counter is requested.
1527 counter_buf->num_elems = CPU_TO_LE16(1);
1528 counter_buf->res_type =
1529 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1530 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1531 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1535 ice_free(hw, counter_buf);
1536 goto ice_alloc_sw_exit;
1538 counter_ele = &counter_buf->elem[0];
1539 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1540 ice_free(hw, counter_buf);
1544 ice_free(hw, sw_buf);
1549 * ice_free_sw - free resources specific to switch
1550 * @hw: pointer to the HW struct
1551 * @sw_id: switch ID returned
1552 * @counter_id: VEB counter ID returned
1554 * free switch resources (SWID and VEB counter) (0x0209)
1556 * NOTE: This function frees multiple resources. It continues
1557 * releasing other resources even after it encounters error.
1558 * The error code returned is the last error it encountered.
1560 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1562 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1563 enum ice_status status, ret_status;
1566 buf_len = sizeof(*sw_buf);
1567 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1568 ice_malloc(hw, buf_len);
1570 return ICE_ERR_NO_MEMORY;
1572 /* Prepare buffer to free for switch ID res.
1573 * The number of resource entries in buffer is passed as 1 since only a
1574 * single switch/VEB instance is freed, and hence a single sw_id
1577 sw_buf->num_elems = CPU_TO_LE16(1);
1578 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1579 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1581 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1582 ice_aqc_opc_free_res, NULL);
1585 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1587 /* Prepare buffer to free for VEB Counter resource */
1588 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1589 ice_malloc(hw, buf_len);
1591 ice_free(hw, sw_buf);
1592 return ICE_ERR_NO_MEMORY;
1595 /* The number of resource entries in buffer is passed as 1 since only a
1596 * single switch/VEB instance is freed, and hence a single VEB counter
1599 counter_buf->num_elems = CPU_TO_LE16(1);
1600 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1601 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1603 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1604 ice_aqc_opc_free_res, NULL);
1606 ice_debug(hw, ICE_DBG_SW,
1607 "VEB counter resource could not be freed\n");
1608 ret_status = status;
1611 ice_free(hw, counter_buf);
1612 ice_free(hw, sw_buf);
1618 * @hw: pointer to the HW struct
1619 * @vsi_ctx: pointer to a VSI context struct
1620 * @cd: pointer to command details structure or NULL
1622 * Add a VSI context to the hardware (0x0210)
1625 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1626 struct ice_sq_cd *cd)
1628 struct ice_aqc_add_update_free_vsi_resp *res;
1629 struct ice_aqc_add_get_update_free_vsi *cmd;
1630 struct ice_aq_desc desc;
1631 enum ice_status status;
1633 cmd = &desc.params.vsi_cmd;
1634 res = &desc.params.add_update_free_vsi_res;
1636 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1638 if (!vsi_ctx->alloc_from_pool)
1639 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1640 ICE_AQ_VSI_IS_VALID);
1642 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1644 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1646 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1647 sizeof(vsi_ctx->info), cd);
1650 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1651 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1652 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1660 * @hw: pointer to the HW struct
1661 * @vsi_ctx: pointer to a VSI context struct
1662 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1663 * @cd: pointer to command details structure or NULL
1665 * Free VSI context info from hardware (0x0213)
1668 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1669 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1671 struct ice_aqc_add_update_free_vsi_resp *resp;
1672 struct ice_aqc_add_get_update_free_vsi *cmd;
1673 struct ice_aq_desc desc;
1674 enum ice_status status;
1676 cmd = &desc.params.vsi_cmd;
1677 resp = &desc.params.add_update_free_vsi_res;
1679 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1681 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1683 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1685 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1687 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1688 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1696 * @hw: pointer to the HW struct
1697 * @vsi_ctx: pointer to a VSI context struct
1698 * @cd: pointer to command details structure or NULL
1700 * Update VSI context in the hardware (0x0211)
1703 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1704 struct ice_sq_cd *cd)
1706 struct ice_aqc_add_update_free_vsi_resp *resp;
1707 struct ice_aqc_add_get_update_free_vsi *cmd;
1708 struct ice_aq_desc desc;
1709 enum ice_status status;
1711 cmd = &desc.params.vsi_cmd;
1712 resp = &desc.params.add_update_free_vsi_res;
1714 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1716 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1718 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1720 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1721 sizeof(vsi_ctx->info), cd);
1724 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1725 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1732 * ice_is_vsi_valid - check whether the VSI is valid or not
1733 * @hw: pointer to the HW struct
1734 * @vsi_handle: VSI handle
1736 * check whether the VSI is valid or not
1738 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1740 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1744 * ice_get_hw_vsi_num - return the HW VSI number
1745 * @hw: pointer to the HW struct
1746 * @vsi_handle: VSI handle
1748 * return the HW VSI number
1749 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1751 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1753 return hw->vsi_ctx[vsi_handle]->vsi_num;
1757 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1758 * @hw: pointer to the HW struct
1759 * @vsi_handle: VSI handle
1761 * return the VSI context entry for a given VSI handle
1763 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1765 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1769 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1770 * @hw: pointer to the HW struct
1771 * @vsi_handle: VSI handle
1772 * @vsi: VSI context pointer
1774 * save the VSI context entry for a given VSI handle
1777 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1779 hw->vsi_ctx[vsi_handle] = vsi;
1783 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1784 * @hw: pointer to the HW struct
1785 * @vsi_handle: VSI handle
1787 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1789 struct ice_vsi_ctx *vsi;
1792 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1795 ice_for_each_traffic_class(i) {
1796 if (vsi->lan_q_ctx[i]) {
1797 ice_free(hw, vsi->lan_q_ctx[i]);
1798 vsi->lan_q_ctx[i] = NULL;
1804 * ice_clear_vsi_ctx - clear the VSI context entry
1805 * @hw: pointer to the HW struct
1806 * @vsi_handle: VSI handle
1808 * clear the VSI context entry
1810 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1812 struct ice_vsi_ctx *vsi;
1814 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1816 ice_clear_vsi_q_ctx(hw, vsi_handle);
1818 hw->vsi_ctx[vsi_handle] = NULL;
1823 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1824 * @hw: pointer to the HW struct
1826 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1830 for (i = 0; i < ICE_MAX_VSI; i++)
1831 ice_clear_vsi_ctx(hw, i);
1835 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1836 * @hw: pointer to the HW struct
1837 * @vsi_handle: unique VSI handle provided by drivers
1838 * @vsi_ctx: pointer to a VSI context struct
1839 * @cd: pointer to command details structure or NULL
1841 * Add a VSI context to the hardware also add it into the VSI handle list.
1842 * If this function gets called after reset for existing VSIs then update
1843 * with the new HW VSI number in the corresponding VSI handle list entry.
1846 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1847 struct ice_sq_cd *cd)
1849 struct ice_vsi_ctx *tmp_vsi_ctx;
1850 enum ice_status status;
1852 if (vsi_handle >= ICE_MAX_VSI)
1853 return ICE_ERR_PARAM;
1854 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1857 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1859 /* Create a new VSI context */
1860 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1861 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1863 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1864 return ICE_ERR_NO_MEMORY;
1866 *tmp_vsi_ctx = *vsi_ctx;
1868 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1870 /* update with new HW VSI num */
1871 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1878 * ice_free_vsi- free VSI context from hardware and VSI handle list
1879 * @hw: pointer to the HW struct
1880 * @vsi_handle: unique VSI handle
1881 * @vsi_ctx: pointer to a VSI context struct
1882 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1883 * @cd: pointer to command details structure or NULL
1885 * Free VSI context info from hardware as well as from VSI handle list
1888 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1889 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1891 enum ice_status status;
1893 if (!ice_is_vsi_valid(hw, vsi_handle))
1894 return ICE_ERR_PARAM;
1895 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1896 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1898 ice_clear_vsi_ctx(hw, vsi_handle);
1904 * @hw: pointer to the HW struct
1905 * @vsi_handle: unique VSI handle
1906 * @vsi_ctx: pointer to a VSI context struct
1907 * @cd: pointer to command details structure or NULL
1909 * Update VSI context in the hardware
1912 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1913 struct ice_sq_cd *cd)
1915 if (!ice_is_vsi_valid(hw, vsi_handle))
1916 return ICE_ERR_PARAM;
1917 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1918 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1922 * ice_aq_get_vsi_params
1923 * @hw: pointer to the HW struct
1924 * @vsi_ctx: pointer to a VSI context struct
1925 * @cd: pointer to command details structure or NULL
1927 * Get VSI context info from hardware (0x0212)
1930 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1931 struct ice_sq_cd *cd)
1933 struct ice_aqc_add_get_update_free_vsi *cmd;
1934 struct ice_aqc_get_vsi_resp *resp;
1935 struct ice_aq_desc desc;
1936 enum ice_status status;
1938 cmd = &desc.params.vsi_cmd;
1939 resp = &desc.params.get_vsi_resp;
1941 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1943 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1945 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1946 sizeof(vsi_ctx->info), cd);
1948 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1950 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1951 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1958 * ice_aq_add_update_mir_rule - add/update a mirror rule
1959 * @hw: pointer to the HW struct
1960 * @rule_type: Rule Type
1961 * @dest_vsi: VSI number to which packets will be mirrored
1962 * @count: length of the list
1963 * @mr_buf: buffer for list of mirrored VSI numbers
1964 * @cd: pointer to command details structure or NULL
1967 * Add/Update Mirror Rule (0x260).
1970 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1971 u16 count, struct ice_mir_rule_buf *mr_buf,
1972 struct ice_sq_cd *cd, u16 *rule_id)
1974 struct ice_aqc_add_update_mir_rule *cmd;
1975 struct ice_aq_desc desc;
1976 enum ice_status status;
1977 __le16 *mr_list = NULL;
1980 switch (rule_type) {
1981 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1982 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1983 /* Make sure count and mr_buf are set for these rule_types */
1984 if (!(count && mr_buf))
1985 return ICE_ERR_PARAM;
1987 buf_size = count * sizeof(__le16);
1988 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1990 return ICE_ERR_NO_MEMORY;
1992 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1993 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1994 /* Make sure count and mr_buf are not set for these
1997 if (count || mr_buf)
1998 return ICE_ERR_PARAM;
2001 ice_debug(hw, ICE_DBG_SW,
2002 "Error due to unsupported rule_type %u\n", rule_type);
2003 return ICE_ERR_OUT_OF_RANGE;
2006 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2008 /* Pre-process 'mr_buf' items for add/update of virtual port
2009 * ingress/egress mirroring (but not physical port ingress/egress
2015 for (i = 0; i < count; i++) {
2018 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2020 /* Validate specified VSI number, make sure it is less
2021 * than ICE_MAX_VSI, if not return with error.
2023 if (id >= ICE_MAX_VSI) {
2024 ice_debug(hw, ICE_DBG_SW,
2025 "Error VSI index (%u) out-of-range\n",
2027 ice_free(hw, mr_list);
2028 return ICE_ERR_OUT_OF_RANGE;
2031 /* add VSI to mirror rule */
2034 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2035 else /* remove VSI from mirror rule */
2036 mr_list[i] = CPU_TO_LE16(id);
2040 cmd = &desc.params.add_update_rule;
2041 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2042 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2043 ICE_AQC_RULE_ID_VALID_M);
2044 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2045 cmd->num_entries = CPU_TO_LE16(count);
2046 cmd->dest = CPU_TO_LE16(dest_vsi);
2048 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2050 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2052 ice_free(hw, mr_list);
2058 * ice_aq_delete_mir_rule - delete a mirror rule
2059 * @hw: pointer to the HW struct
2060 * @rule_id: Mirror rule ID (to be deleted)
2061 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2062 * otherwise it is returned to the shared pool
2063 * @cd: pointer to command details structure or NULL
2065 * Delete Mirror Rule (0x261).
2068 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2069 struct ice_sq_cd *cd)
2071 struct ice_aqc_delete_mir_rule *cmd;
2072 struct ice_aq_desc desc;
2074 /* rule_id should be in the range 0...63 */
2075 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2076 return ICE_ERR_OUT_OF_RANGE;
2078 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2080 cmd = &desc.params.del_rule;
2081 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2082 cmd->rule_id = CPU_TO_LE16(rule_id);
2085 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2087 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2091 * ice_aq_alloc_free_vsi_list
2092 * @hw: pointer to the HW struct
2093 * @vsi_list_id: VSI list ID returned or used for lookup
2094 * @lkup_type: switch rule filter lookup type
2095 * @opc: switch rules population command type - pass in the command opcode
2097 * allocates or free a VSI list resource
2099 static enum ice_status
2100 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2101 enum ice_sw_lkup_type lkup_type,
2102 enum ice_adminq_opc opc)
2104 struct ice_aqc_alloc_free_res_elem *sw_buf;
2105 struct ice_aqc_res_elem *vsi_ele;
2106 enum ice_status status;
2109 buf_len = sizeof(*sw_buf);
2110 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
2111 ice_malloc(hw, buf_len);
2113 return ICE_ERR_NO_MEMORY;
2114 sw_buf->num_elems = CPU_TO_LE16(1);
2116 if (lkup_type == ICE_SW_LKUP_MAC ||
2117 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2118 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2119 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2120 lkup_type == ICE_SW_LKUP_PROMISC ||
2121 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2122 lkup_type == ICE_SW_LKUP_LAST) {
2123 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2124 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2126 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2128 status = ICE_ERR_PARAM;
2129 goto ice_aq_alloc_free_vsi_list_exit;
2132 if (opc == ice_aqc_opc_free_res)
2133 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2135 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2137 goto ice_aq_alloc_free_vsi_list_exit;
2139 if (opc == ice_aqc_opc_alloc_res) {
2140 vsi_ele = &sw_buf->elem[0];
2141 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2144 ice_aq_alloc_free_vsi_list_exit:
2145 ice_free(hw, sw_buf);
2150 * ice_aq_set_storm_ctrl - Sets storm control configuration
2151 * @hw: pointer to the HW struct
2152 * @bcast_thresh: represents the upper threshold for broadcast storm control
2153 * @mcast_thresh: represents the upper threshold for multicast storm control
2154 * @ctl_bitmask: storm control control knobs
2156 * Sets the storm control configuration (0x0280)
2159 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2162 struct ice_aqc_storm_cfg *cmd;
2163 struct ice_aq_desc desc;
2165 cmd = &desc.params.storm_conf;
2167 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2169 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2170 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2171 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2173 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2177 * ice_aq_get_storm_ctrl - gets storm control configuration
2178 * @hw: pointer to the HW struct
2179 * @bcast_thresh: represents the upper threshold for broadcast storm control
2180 * @mcast_thresh: represents the upper threshold for multicast storm control
2181 * @ctl_bitmask: storm control control knobs
2183 * Gets the storm control configuration (0x0281)
2186 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2189 enum ice_status status;
2190 struct ice_aq_desc desc;
2192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2194 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2196 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2199 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2202 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2205 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2212 * ice_aq_sw_rules - add/update/remove switch rules
2213 * @hw: pointer to the HW struct
2214 * @rule_list: pointer to switch rule population list
2215 * @rule_list_sz: total size of the rule list in bytes
2216 * @num_rules: number of switch rules in the rule_list
2217 * @opc: switch rules population command type - pass in the command opcode
2218 * @cd: pointer to command details structure or NULL
2220 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2222 static enum ice_status
2223 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2224 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2226 struct ice_aq_desc desc;
2227 enum ice_status status;
2229 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2231 if (opc != ice_aqc_opc_add_sw_rules &&
2232 opc != ice_aqc_opc_update_sw_rules &&
2233 opc != ice_aqc_opc_remove_sw_rules)
2234 return ICE_ERR_PARAM;
2236 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2238 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2239 desc.params.sw_rules.num_rules_fltr_entry_index =
2240 CPU_TO_LE16(num_rules);
2241 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2242 if (opc != ice_aqc_opc_add_sw_rules &&
2243 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2244 status = ICE_ERR_DOES_NOT_EXIST;
2250 * ice_aq_add_recipe - add switch recipe
2251 * @hw: pointer to the HW struct
2252 * @s_recipe_list: pointer to switch rule population list
2253 * @num_recipes: number of switch recipes in the list
2254 * @cd: pointer to command details structure or NULL
2259 ice_aq_add_recipe(struct ice_hw *hw,
2260 struct ice_aqc_recipe_data_elem *s_recipe_list,
2261 u16 num_recipes, struct ice_sq_cd *cd)
2263 struct ice_aqc_add_get_recipe *cmd;
2264 struct ice_aq_desc desc;
2267 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2268 cmd = &desc.params.add_get_recipe;
2269 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2271 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2272 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2274 buf_size = num_recipes * sizeof(*s_recipe_list);
2276 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2280 * ice_aq_get_recipe - get switch recipe
2281 * @hw: pointer to the HW struct
2282 * @s_recipe_list: pointer to switch rule population list
2283 * @num_recipes: pointer to the number of recipes (input and output)
2284 * @recipe_root: root recipe number of recipe(s) to retrieve
2285 * @cd: pointer to command details structure or NULL
2289 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2290 * On output, *num_recipes will equal the number of entries returned in
2293 * The caller must supply enough space in s_recipe_list to hold all possible
2294 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2297 ice_aq_get_recipe(struct ice_hw *hw,
2298 struct ice_aqc_recipe_data_elem *s_recipe_list,
2299 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2301 struct ice_aqc_add_get_recipe *cmd;
2302 struct ice_aq_desc desc;
2303 enum ice_status status;
2306 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2307 return ICE_ERR_PARAM;
2309 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2310 cmd = &desc.params.add_get_recipe;
2311 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2313 cmd->return_index = CPU_TO_LE16(recipe_root);
2314 cmd->num_sub_recipes = 0;
2316 buf_size = *num_recipes * sizeof(*s_recipe_list);
2318 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2319 /* cppcheck-suppress constArgument */
2320 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2326 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2327 * @hw: pointer to the HW struct
2328 * @profile_id: package profile ID to associate the recipe with
2329 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2330 * @cd: pointer to command details structure or NULL
2331 * Recipe to profile association (0x0291)
2334 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2335 struct ice_sq_cd *cd)
2337 struct ice_aqc_recipe_to_profile *cmd;
2338 struct ice_aq_desc desc;
2340 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2341 cmd = &desc.params.recipe_to_profile;
2342 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2343 cmd->profile_id = CPU_TO_LE16(profile_id);
2344 /* Set the recipe ID bit in the bitmask to let the device know which
2345 * profile we are associating the recipe to
2347 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2348 ICE_NONDMA_TO_NONDMA);
2350 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2354 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2355 * @hw: pointer to the HW struct
2356 * @profile_id: package profile ID to associate the recipe with
2357 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2358 * @cd: pointer to command details structure or NULL
2359 * Associate profile ID with given recipe (0x0293)
2362 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2363 struct ice_sq_cd *cd)
2365 struct ice_aqc_recipe_to_profile *cmd;
2366 struct ice_aq_desc desc;
2367 enum ice_status status;
2369 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2370 cmd = &desc.params.recipe_to_profile;
2371 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2372 cmd->profile_id = CPU_TO_LE16(profile_id);
2374 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2376 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2377 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2383 * ice_alloc_recipe - add recipe resource
2384 * @hw: pointer to the hardware structure
2385 * @rid: recipe ID returned as response to AQ call
2387 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2389 struct ice_aqc_alloc_free_res_elem *sw_buf;
2390 enum ice_status status;
2393 buf_len = sizeof(*sw_buf);
2394 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2396 return ICE_ERR_NO_MEMORY;
2398 sw_buf->num_elems = CPU_TO_LE16(1);
2399 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2400 ICE_AQC_RES_TYPE_S) |
2401 ICE_AQC_RES_TYPE_FLAG_SHARED);
2402 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2403 ice_aqc_opc_alloc_res, NULL);
2405 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2406 ice_free(hw, sw_buf);
2411 /* ice_init_port_info - Initialize port_info with switch configuration data
2412 * @pi: pointer to port_info
2413 * @vsi_port_num: VSI number or port number
2414 * @type: Type of switch element (port or VSI)
2415 * @swid: switch ID of the switch the element is attached to
2416 * @pf_vf_num: PF or VF number
2417 * @is_vf: true if the element is a VF, false otherwise
2420 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2421 u16 swid, u16 pf_vf_num, bool is_vf)
2424 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2425 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2427 pi->pf_vf_num = pf_vf_num;
2429 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2430 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2433 ice_debug(pi->hw, ICE_DBG_SW,
2434 "incorrect VSI/port type received\n");
2439 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2440 * @hw: pointer to the hardware structure
2442 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2444 struct ice_aqc_get_sw_cfg_resp *rbuf;
2445 enum ice_status status;
2452 num_total_ports = 1;
2454 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2455 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2458 return ICE_ERR_NO_MEMORY;
2460 /* Multiple calls to ice_aq_get_sw_cfg may be required
2461 * to get all the switch configuration information. The need
2462 * for additional calls is indicated by ice_aq_get_sw_cfg
2463 * writing a non-zero value in req_desc
2466 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2467 &req_desc, &num_elems, NULL);
2472 for (i = 0; i < num_elems; i++) {
2473 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2474 u16 pf_vf_num, swid, vsi_port_num;
2478 ele = rbuf[i].elements;
2479 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2480 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2482 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2483 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2485 swid = LE16_TO_CPU(ele->swid);
2487 if (LE16_TO_CPU(ele->pf_vf_num) &
2488 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2491 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2492 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2495 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2496 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2497 if (j == num_total_ports) {
2498 ice_debug(hw, ICE_DBG_SW,
2499 "more ports than expected\n");
2500 status = ICE_ERR_CFG;
2503 ice_init_port_info(hw->port_info,
2504 vsi_port_num, res_type, swid,
2512 } while (req_desc && !status);
2515 ice_free(hw, (void *)rbuf);
2520 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2521 * @hw: pointer to the hardware structure
2522 * @fi: filter info structure to fill/update
2524 * This helper function populates the lb_en and lan_en elements of the provided
2525 * ice_fltr_info struct using the switch's type and characteristics of the
2526 * switch rule being configured.
2528 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2530 if ((fi->flag & ICE_FLTR_RX) &&
2531 (fi->fltr_act == ICE_FWD_TO_VSI ||
2532 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2533 fi->lkup_type == ICE_SW_LKUP_LAST)
2537 if ((fi->flag & ICE_FLTR_TX) &&
2538 (fi->fltr_act == ICE_FWD_TO_VSI ||
2539 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2540 fi->fltr_act == ICE_FWD_TO_Q ||
2541 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2542 /* Setting LB for prune actions will result in replicated
2543 * packets to the internal switch that will be dropped.
2545 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2548 /* Set lan_en to TRUE if
2549 * 1. The switch is a VEB AND
2551 * 2.1 The lookup is a directional lookup like ethertype,
2552 * promiscuous, ethertype-MAC, promiscuous-VLAN
2553 * and default-port OR
2554 * 2.2 The lookup is VLAN, OR
2555 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2556 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2560 * The switch is a VEPA.
2562 * In all other cases, the LAN enable has to be set to false.
2565 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2566 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2567 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2568 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2569 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2570 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2571 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2572 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2573 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2574 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2583 * ice_fill_sw_rule - Helper function to fill switch rule structure
2584 * @hw: pointer to the hardware structure
2585 * @f_info: entry containing packet forwarding information
2586 * @s_rule: switch rule structure to be filled in based on mac_entry
2587 * @opc: switch rules population command type - pass in the command opcode
2590 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2591 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2593 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2601 if (opc == ice_aqc_opc_remove_sw_rules) {
2602 s_rule->pdata.lkup_tx_rx.act = 0;
2603 s_rule->pdata.lkup_tx_rx.index =
2604 CPU_TO_LE16(f_info->fltr_rule_id);
2605 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2609 eth_hdr_sz = sizeof(dummy_eth_header);
2610 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2612 /* initialize the ether header with a dummy header */
2613 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2614 ice_fill_sw_info(hw, f_info);
2616 switch (f_info->fltr_act) {
2617 case ICE_FWD_TO_VSI:
2618 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2619 ICE_SINGLE_ACT_VSI_ID_M;
2620 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2621 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2622 ICE_SINGLE_ACT_VALID_BIT;
2624 case ICE_FWD_TO_VSI_LIST:
2625 act |= ICE_SINGLE_ACT_VSI_LIST;
2626 act |= (f_info->fwd_id.vsi_list_id <<
2627 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2628 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2629 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2630 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2631 ICE_SINGLE_ACT_VALID_BIT;
2634 act |= ICE_SINGLE_ACT_TO_Q;
2635 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2636 ICE_SINGLE_ACT_Q_INDEX_M;
2638 case ICE_DROP_PACKET:
2639 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2640 ICE_SINGLE_ACT_VALID_BIT;
2642 case ICE_FWD_TO_QGRP:
2643 q_rgn = f_info->qgrp_size > 0 ?
2644 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2645 act |= ICE_SINGLE_ACT_TO_Q;
2646 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2647 ICE_SINGLE_ACT_Q_INDEX_M;
2648 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2649 ICE_SINGLE_ACT_Q_REGION_M;
2656 act |= ICE_SINGLE_ACT_LB_ENABLE;
2658 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2660 switch (f_info->lkup_type) {
2661 case ICE_SW_LKUP_MAC:
2662 daddr = f_info->l_data.mac.mac_addr;
2664 case ICE_SW_LKUP_VLAN:
2665 vlan_id = f_info->l_data.vlan.vlan_id;
2666 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2667 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2668 act |= ICE_SINGLE_ACT_PRUNE;
2669 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2672 case ICE_SW_LKUP_ETHERTYPE_MAC:
2673 daddr = f_info->l_data.ethertype_mac.mac_addr;
2675 case ICE_SW_LKUP_ETHERTYPE:
2676 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2677 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2679 case ICE_SW_LKUP_MAC_VLAN:
2680 daddr = f_info->l_data.mac_vlan.mac_addr;
2681 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2683 case ICE_SW_LKUP_PROMISC_VLAN:
2684 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2686 case ICE_SW_LKUP_PROMISC:
2687 daddr = f_info->l_data.mac_vlan.mac_addr;
2693 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2694 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2695 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2697 /* Recipe set depending on lookup type */
2698 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2699 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2700 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2703 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2704 ICE_NONDMA_TO_NONDMA);
2706 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2707 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2708 *off = CPU_TO_BE16(vlan_id);
2711 /* Create the switch rule with the final dummy Ethernet header */
2712 if (opc != ice_aqc_opc_update_sw_rules)
2713 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2717 * ice_add_marker_act
2718 * @hw: pointer to the hardware structure
2719 * @m_ent: the management entry for which sw marker needs to be added
2720 * @sw_marker: sw marker to tag the Rx descriptor with
2721 * @l_id: large action resource ID
2723 * Create a large action to hold software marker and update the switch rule
2724 * entry pointed by m_ent with newly created large action
2726 static enum ice_status
2727 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2728 u16 sw_marker, u16 l_id)
2730 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2731 /* For software marker we need 3 large actions
2732 * 1. FWD action: FWD TO VSI or VSI LIST
2733 * 2. GENERIC VALUE action to hold the profile ID
2734 * 3. GENERIC VALUE action to hold the software marker ID
2736 const u16 num_lg_acts = 3;
2737 enum ice_status status;
2743 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2744 return ICE_ERR_PARAM;
2746 /* Create two back-to-back switch rules and submit them to the HW using
2747 * one memory buffer:
2751 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2752 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2753 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2755 return ICE_ERR_NO_MEMORY;
2757 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2759 /* Fill in the first switch rule i.e. large action */
2760 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2761 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2762 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2764 /* First action VSI forwarding or VSI list forwarding depending on how
2767 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2768 m_ent->fltr_info.fwd_id.hw_vsi_id;
2770 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2771 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2772 ICE_LG_ACT_VSI_LIST_ID_M;
2773 if (m_ent->vsi_count > 1)
2774 act |= ICE_LG_ACT_VSI_LIST;
2775 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2777 /* Second action descriptor type */
2778 act = ICE_LG_ACT_GENERIC;
2780 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2781 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2783 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2784 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2786 /* Third action Marker value */
2787 act |= ICE_LG_ACT_GENERIC;
2788 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2789 ICE_LG_ACT_GENERIC_VALUE_M;
2791 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2793 /* call the fill switch rule to fill the lookup Tx Rx structure */
2794 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2795 ice_aqc_opc_update_sw_rules);
2797 /* Update the action to point to the large action ID */
2798 rx_tx->pdata.lkup_tx_rx.act =
2799 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2800 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2801 ICE_SINGLE_ACT_PTR_VAL_M));
2803 /* Use the filter rule ID of the previously created rule with single
2804 * act. Once the update happens, hardware will treat this as large
2807 rx_tx->pdata.lkup_tx_rx.index =
2808 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2810 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2811 ice_aqc_opc_update_sw_rules, NULL);
2813 m_ent->lg_act_idx = l_id;
2814 m_ent->sw_marker_id = sw_marker;
2817 ice_free(hw, lg_act);
2822 * ice_add_counter_act - add/update filter rule with counter action
2823 * @hw: pointer to the hardware structure
2824 * @m_ent: the management entry for which counter needs to be added
2825 * @counter_id: VLAN counter ID returned as part of allocate resource
2826 * @l_id: large action resource ID
2828 static enum ice_status
2829 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2830 u16 counter_id, u16 l_id)
2832 struct ice_aqc_sw_rules_elem *lg_act;
2833 struct ice_aqc_sw_rules_elem *rx_tx;
2834 enum ice_status status;
2835 /* 2 actions will be added while adding a large action counter */
2836 const int num_acts = 2;
2843 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2844 return ICE_ERR_PARAM;
2846 /* Create two back-to-back switch rules and submit them to the HW using
2847 * one memory buffer:
2851 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2852 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2853 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2856 return ICE_ERR_NO_MEMORY;
2858 rx_tx = (struct ice_aqc_sw_rules_elem *)
2859 ((u8 *)lg_act + lg_act_size);
2861 /* Fill in the first switch rule i.e. large action */
2862 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2863 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2864 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2866 /* First action VSI forwarding or VSI list forwarding depending on how
2869 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2870 m_ent->fltr_info.fwd_id.hw_vsi_id;
2872 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2873 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2874 ICE_LG_ACT_VSI_LIST_ID_M;
2875 if (m_ent->vsi_count > 1)
2876 act |= ICE_LG_ACT_VSI_LIST;
2877 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2879 /* Second action counter ID */
2880 act = ICE_LG_ACT_STAT_COUNT;
2881 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2882 ICE_LG_ACT_STAT_COUNT_M;
2883 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2885 /* call the fill switch rule to fill the lookup Tx Rx structure */
2886 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2887 ice_aqc_opc_update_sw_rules);
2889 act = ICE_SINGLE_ACT_PTR;
2890 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2891 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2893 /* Use the filter rule ID of the previously created rule with single
2894 * act. Once the update happens, hardware will treat this as large
2897 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2898 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2900 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2901 ice_aqc_opc_update_sw_rules, NULL);
2903 m_ent->lg_act_idx = l_id;
2904 m_ent->counter_index = counter_id;
2907 ice_free(hw, lg_act);
2912 * ice_create_vsi_list_map
2913 * @hw: pointer to the hardware structure
2914 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2915 * @num_vsi: number of VSI handles in the array
2916 * @vsi_list_id: VSI list ID generated as part of allocate resource
2918 * Helper function to create a new entry of VSI list ID to VSI mapping
2919 * using the given VSI list ID
2921 static struct ice_vsi_list_map_info *
2922 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2925 struct ice_switch_info *sw = hw->switch_info;
2926 struct ice_vsi_list_map_info *v_map;
2929 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2934 v_map->vsi_list_id = vsi_list_id;
2936 for (i = 0; i < num_vsi; i++)
2937 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2939 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2944 * ice_update_vsi_list_rule
2945 * @hw: pointer to the hardware structure
2946 * @vsi_handle_arr: array of VSI handles to form a VSI list
2947 * @num_vsi: number of VSI handles in the array
2948 * @vsi_list_id: VSI list ID generated as part of allocate resource
2949 * @remove: Boolean value to indicate if this is a remove action
2950 * @opc: switch rules population command type - pass in the command opcode
2951 * @lkup_type: lookup type of the filter
2953 * Call AQ command to add a new switch rule or update existing switch rule
2954 * using the given VSI list ID
2956 static enum ice_status
2957 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2958 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2959 enum ice_sw_lkup_type lkup_type)
2961 struct ice_aqc_sw_rules_elem *s_rule;
2962 enum ice_status status;
2968 return ICE_ERR_PARAM;
2970 if (lkup_type == ICE_SW_LKUP_MAC ||
2971 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2972 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2973 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2974 lkup_type == ICE_SW_LKUP_PROMISC ||
2975 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2976 lkup_type == ICE_SW_LKUP_LAST)
2977 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2978 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2979 else if (lkup_type == ICE_SW_LKUP_VLAN)
2980 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2981 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2983 return ICE_ERR_PARAM;
2985 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2986 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2988 return ICE_ERR_NO_MEMORY;
2989 for (i = 0; i < num_vsi; i++) {
2990 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2991 status = ICE_ERR_PARAM;
2994 /* AQ call requires hw_vsi_id(s) */
2995 s_rule->pdata.vsi_list.vsi[i] =
2996 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2999 s_rule->type = CPU_TO_LE16(rule_type);
3000 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3001 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3003 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3006 ice_free(hw, s_rule);
3011 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3012 * @hw: pointer to the HW struct
3013 * @vsi_handle_arr: array of VSI handles to form a VSI list
3014 * @num_vsi: number of VSI handles in the array
3015 * @vsi_list_id: stores the ID of the VSI list to be created
3016 * @lkup_type: switch rule filter's lookup type
3018 static enum ice_status
3019 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3020 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3022 enum ice_status status;
3024 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3025 ice_aqc_opc_alloc_res);
3029 /* Update the newly created VSI list to include the specified VSIs */
3030 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3031 *vsi_list_id, false,
3032 ice_aqc_opc_add_sw_rules, lkup_type);
3036 * ice_create_pkt_fwd_rule
3037 * @hw: pointer to the hardware structure
3038 * @recp_list: corresponding filter management list
3039 * @f_entry: entry containing packet forwarding information
3041 * Create switch rule with given filter information and add an entry
3042 * to the corresponding filter management list to track this switch rule
3045 static enum ice_status
3046 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3047 struct ice_fltr_list_entry *f_entry)
3049 struct ice_fltr_mgmt_list_entry *fm_entry;
3050 struct ice_aqc_sw_rules_elem *s_rule;
3051 enum ice_status status;
3053 s_rule = (struct ice_aqc_sw_rules_elem *)
3054 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3056 return ICE_ERR_NO_MEMORY;
3057 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3058 ice_malloc(hw, sizeof(*fm_entry));
3060 status = ICE_ERR_NO_MEMORY;
3061 goto ice_create_pkt_fwd_rule_exit;
3064 fm_entry->fltr_info = f_entry->fltr_info;
3066 /* Initialize all the fields for the management entry */
3067 fm_entry->vsi_count = 1;
3068 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3069 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3070 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3072 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3073 ice_aqc_opc_add_sw_rules);
3075 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3076 ice_aqc_opc_add_sw_rules, NULL);
3078 ice_free(hw, fm_entry);
3079 goto ice_create_pkt_fwd_rule_exit;
3082 f_entry->fltr_info.fltr_rule_id =
3083 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3084 fm_entry->fltr_info.fltr_rule_id =
3085 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3087 /* The book keeping entries will get removed when base driver
3088 * calls remove filter AQ command
3090 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3092 ice_create_pkt_fwd_rule_exit:
3093 ice_free(hw, s_rule);
3098 * ice_update_pkt_fwd_rule
3099 * @hw: pointer to the hardware structure
3100 * @f_info: filter information for switch rule
3102 * Call AQ command to update a previously created switch rule with a
3105 static enum ice_status
3106 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3108 struct ice_aqc_sw_rules_elem *s_rule;
3109 enum ice_status status;
3111 s_rule = (struct ice_aqc_sw_rules_elem *)
3112 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3114 return ICE_ERR_NO_MEMORY;
3116 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3118 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3120 /* Update switch rule with new rule set to forward VSI list */
3121 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3122 ice_aqc_opc_update_sw_rules, NULL);
3124 ice_free(hw, s_rule);
3129 * ice_update_sw_rule_bridge_mode
3130 * @hw: pointer to the HW struct
3132 * Updates unicast switch filter rules based on VEB/VEPA mode
3134 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3136 struct ice_switch_info *sw = hw->switch_info;
3137 struct ice_fltr_mgmt_list_entry *fm_entry;
3138 enum ice_status status = ICE_SUCCESS;
3139 struct LIST_HEAD_TYPE *rule_head;
3140 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3142 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3143 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3145 ice_acquire_lock(rule_lock);
3146 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3148 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3149 u8 *addr = fi->l_data.mac.mac_addr;
3151 /* Update unicast Tx rules to reflect the selected
3154 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3155 (fi->fltr_act == ICE_FWD_TO_VSI ||
3156 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3157 fi->fltr_act == ICE_FWD_TO_Q ||
3158 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3159 status = ice_update_pkt_fwd_rule(hw, fi);
3165 ice_release_lock(rule_lock);
3171 * ice_add_update_vsi_list
3172 * @hw: pointer to the hardware structure
3173 * @m_entry: pointer to current filter management list entry
3174 * @cur_fltr: filter information from the book keeping entry
3175 * @new_fltr: filter information with the new VSI to be added
3177 * Call AQ command to add or update previously created VSI list with new VSI.
3179 * Helper function to do book keeping associated with adding filter information
3180 * The algorithm to do the book keeping is described below :
3181 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3182 * if only one VSI has been added till now
3183 * Allocate a new VSI list and add two VSIs
3184 * to this list using switch rule command
3185 * Update the previously created switch rule with the
3186 * newly created VSI list ID
3187 * if a VSI list was previously created
3188 * Add the new VSI to the previously created VSI list set
3189 * using the update switch rule command
3191 static enum ice_status
3192 ice_add_update_vsi_list(struct ice_hw *hw,
3193 struct ice_fltr_mgmt_list_entry *m_entry,
3194 struct ice_fltr_info *cur_fltr,
3195 struct ice_fltr_info *new_fltr)
3197 enum ice_status status = ICE_SUCCESS;
3198 u16 vsi_list_id = 0;
3200 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3201 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3202 return ICE_ERR_NOT_IMPL;
3204 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3205 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3206 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3207 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3208 return ICE_ERR_NOT_IMPL;
3210 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3211 /* Only one entry existed in the mapping and it was not already
3212 * a part of a VSI list. So, create a VSI list with the old and
3215 struct ice_fltr_info tmp_fltr;
3216 u16 vsi_handle_arr[2];
3218 /* A rule already exists with the new VSI being added */
3219 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3220 return ICE_ERR_ALREADY_EXISTS;
3222 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3223 vsi_handle_arr[1] = new_fltr->vsi_handle;
3224 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3226 new_fltr->lkup_type);
3230 tmp_fltr = *new_fltr;
3231 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3232 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3233 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3234 /* Update the previous switch rule of "MAC forward to VSI" to
3235 * "MAC fwd to VSI list"
3237 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3241 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3242 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3243 m_entry->vsi_list_info =
3244 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3247 /* If this entry was large action then the large action needs
3248 * to be updated to point to FWD to VSI list
3250 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3252 ice_add_marker_act(hw, m_entry,
3253 m_entry->sw_marker_id,
3254 m_entry->lg_act_idx);
3256 u16 vsi_handle = new_fltr->vsi_handle;
3257 enum ice_adminq_opc opcode;
3259 if (!m_entry->vsi_list_info)
3262 /* A rule already exists with the new VSI being added */
3263 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3266 /* Update the previously created VSI list set with
3267 * the new VSI ID passed in
3269 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3270 opcode = ice_aqc_opc_update_sw_rules;
3272 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3273 vsi_list_id, false, opcode,
3274 new_fltr->lkup_type);
3275 /* update VSI list mapping info with new VSI ID */
3277 ice_set_bit(vsi_handle,
3278 m_entry->vsi_list_info->vsi_map);
3281 m_entry->vsi_count++;
3286 * ice_find_rule_entry - Search a rule entry
3287 * @list_head: head of rule list
3288 * @f_info: rule information
3290 * Helper function to search for a given rule entry
3291 * Returns pointer to entry storing the rule if found
3293 static struct ice_fltr_mgmt_list_entry *
3294 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3295 struct ice_fltr_info *f_info)
3297 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3299 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3301 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3302 sizeof(f_info->l_data)) &&
3303 f_info->flag == list_itr->fltr_info.flag) {
3312 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3313 * @recp_list: VSI lists needs to be searched
3314 * @vsi_handle: VSI handle to be found in VSI list
3315 * @vsi_list_id: VSI list ID found containing vsi_handle
3317 * Helper function to search a VSI list with single entry containing given VSI
3318 * handle element. This can be extended further to search VSI list with more
3319 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3321 static struct ice_vsi_list_map_info *
3322 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3325 struct ice_vsi_list_map_info *map_info = NULL;
3326 struct LIST_HEAD_TYPE *list_head;
3328 list_head = &recp_list->filt_rules;
3329 if (recp_list->adv_rule) {
3330 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3332 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3333 ice_adv_fltr_mgmt_list_entry,
3335 if (list_itr->vsi_list_info) {
3336 map_info = list_itr->vsi_list_info;
3337 if (ice_is_bit_set(map_info->vsi_map,
3339 *vsi_list_id = map_info->vsi_list_id;
3345 struct ice_fltr_mgmt_list_entry *list_itr;
3347 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3348 ice_fltr_mgmt_list_entry,
3350 if (list_itr->vsi_count == 1 &&
3351 list_itr->vsi_list_info) {
3352 map_info = list_itr->vsi_list_info;
3353 if (ice_is_bit_set(map_info->vsi_map,
3355 *vsi_list_id = map_info->vsi_list_id;
3365 * ice_add_rule_internal - add rule for a given lookup type
3366 * @hw: pointer to the hardware structure
3367 * @recp_list: recipe list for which rule has to be added
3368 * @lport: logic port number on which function add rule
3369 * @f_entry: structure containing MAC forwarding information
3371 * Adds or updates the rule lists for a given recipe
3373 static enum ice_status
3374 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3375 u8 lport, struct ice_fltr_list_entry *f_entry)
3377 struct ice_fltr_info *new_fltr, *cur_fltr;
3378 struct ice_fltr_mgmt_list_entry *m_entry;
3379 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3380 enum ice_status status = ICE_SUCCESS;
3382 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3383 return ICE_ERR_PARAM;
3385 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3386 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3387 f_entry->fltr_info.fwd_id.hw_vsi_id =
3388 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3390 rule_lock = &recp_list->filt_rule_lock;
3392 ice_acquire_lock(rule_lock);
3393 new_fltr = &f_entry->fltr_info;
3394 if (new_fltr->flag & ICE_FLTR_RX)
3395 new_fltr->src = lport;
3396 else if (new_fltr->flag & ICE_FLTR_TX)
3398 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3400 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3402 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3403 goto exit_add_rule_internal;
3406 cur_fltr = &m_entry->fltr_info;
3407 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3409 exit_add_rule_internal:
3410 ice_release_lock(rule_lock);
3415 * ice_remove_vsi_list_rule
3416 * @hw: pointer to the hardware structure
3417 * @vsi_list_id: VSI list ID generated as part of allocate resource
3418 * @lkup_type: switch rule filter lookup type
3420 * The VSI list should be emptied before this function is called to remove the
3423 static enum ice_status
3424 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3425 enum ice_sw_lkup_type lkup_type)
3427 /* Free the vsi_list resource that we allocated. It is assumed that the
3428 * list is empty at this point.
3430 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3431 ice_aqc_opc_free_res);
3435 * ice_rem_update_vsi_list
3436 * @hw: pointer to the hardware structure
3437 * @vsi_handle: VSI handle of the VSI to remove
3438 * @fm_list: filter management entry for which the VSI list management needs to
3441 static enum ice_status
3442 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3443 struct ice_fltr_mgmt_list_entry *fm_list)
3445 enum ice_sw_lkup_type lkup_type;
3446 enum ice_status status = ICE_SUCCESS;
3449 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3450 fm_list->vsi_count == 0)
3451 return ICE_ERR_PARAM;
3453 /* A rule with the VSI being removed does not exist */
3454 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3455 return ICE_ERR_DOES_NOT_EXIST;
3457 lkup_type = fm_list->fltr_info.lkup_type;
3458 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3459 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3460 ice_aqc_opc_update_sw_rules,
3465 fm_list->vsi_count--;
3466 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3468 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3469 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3470 struct ice_vsi_list_map_info *vsi_list_info =
3471 fm_list->vsi_list_info;
3474 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3476 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3477 return ICE_ERR_OUT_OF_RANGE;
3479 /* Make sure VSI list is empty before removing it below */
3480 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3482 ice_aqc_opc_update_sw_rules,
3487 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3488 tmp_fltr_info.fwd_id.hw_vsi_id =
3489 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3490 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3491 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3493 ice_debug(hw, ICE_DBG_SW,
3494 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3495 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3499 fm_list->fltr_info = tmp_fltr_info;
3502 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3503 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3504 struct ice_vsi_list_map_info *vsi_list_info =
3505 fm_list->vsi_list_info;
3507 /* Remove the VSI list since it is no longer used */
3508 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3510 ice_debug(hw, ICE_DBG_SW,
3511 "Failed to remove VSI list %d, error %d\n",
3512 vsi_list_id, status);
3516 LIST_DEL(&vsi_list_info->list_entry);
3517 ice_free(hw, vsi_list_info);
3518 fm_list->vsi_list_info = NULL;
3525 * ice_remove_rule_internal - Remove a filter rule of a given type
3527 * @hw: pointer to the hardware structure
3528 * @recp_list: recipe list for which the rule needs to removed
3529 * @f_entry: rule entry containing filter information
3531 static enum ice_status
3532 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3533 struct ice_fltr_list_entry *f_entry)
3535 struct ice_fltr_mgmt_list_entry *list_elem;
3536 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3537 enum ice_status status = ICE_SUCCESS;
3538 bool remove_rule = false;
3541 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3542 return ICE_ERR_PARAM;
3543 f_entry->fltr_info.fwd_id.hw_vsi_id =
3544 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3546 rule_lock = &recp_list->filt_rule_lock;
3547 ice_acquire_lock(rule_lock);
3548 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3549 &f_entry->fltr_info);
3551 status = ICE_ERR_DOES_NOT_EXIST;
3555 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3557 } else if (!list_elem->vsi_list_info) {
3558 status = ICE_ERR_DOES_NOT_EXIST;
3560 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3561 /* a ref_cnt > 1 indicates that the vsi_list is being
3562 * shared by multiple rules. Decrement the ref_cnt and
3563 * remove this rule, but do not modify the list, as it
3564 * is in-use by other rules.
3566 list_elem->vsi_list_info->ref_cnt--;
3569 /* a ref_cnt of 1 indicates the vsi_list is only used
3570 * by one rule. However, the original removal request is only
3571 * for a single VSI. Update the vsi_list first, and only
3572 * remove the rule if there are no further VSIs in this list.
3574 vsi_handle = f_entry->fltr_info.vsi_handle;
3575 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3578 /* if VSI count goes to zero after updating the VSI list */
3579 if (list_elem->vsi_count == 0)
3584 /* Remove the lookup rule */
3585 struct ice_aqc_sw_rules_elem *s_rule;
3587 s_rule = (struct ice_aqc_sw_rules_elem *)
3588 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3590 status = ICE_ERR_NO_MEMORY;
3594 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3595 ice_aqc_opc_remove_sw_rules);
3597 status = ice_aq_sw_rules(hw, s_rule,
3598 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3599 ice_aqc_opc_remove_sw_rules, NULL);
3601 /* Remove a book keeping from the list */
3602 ice_free(hw, s_rule);
3607 LIST_DEL(&list_elem->list_entry);
3608 ice_free(hw, list_elem);
3611 ice_release_lock(rule_lock);
3616 * ice_aq_get_res_alloc - get allocated resources
3617 * @hw: pointer to the HW struct
3618 * @num_entries: pointer to u16 to store the number of resource entries returned
3619 * @buf: pointer to user-supplied buffer
3620 * @buf_size: size of buff
3621 * @cd: pointer to command details structure or NULL
3623 * The user-supplied buffer must be large enough to store the resource
3624 * information for all resource types. Each resource type is an
3625 * ice_aqc_get_res_resp_data_elem structure.
3628 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3629 u16 buf_size, struct ice_sq_cd *cd)
3631 struct ice_aqc_get_res_alloc *resp;
3632 enum ice_status status;
3633 struct ice_aq_desc desc;
3636 return ICE_ERR_BAD_PTR;
3638 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3639 return ICE_ERR_INVAL_SIZE;
3641 resp = &desc.params.get_res;
3643 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3644 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3646 if (!status && num_entries)
3647 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3653 * ice_aq_get_res_descs - get allocated resource descriptors
3654 * @hw: pointer to the hardware structure
3655 * @num_entries: number of resource entries in buffer
3656 * @buf: Indirect buffer to hold data parameters and response
3657 * @buf_size: size of buffer for indirect commands
3658 * @res_type: resource type
3659 * @res_shared: is resource shared
3660 * @desc_id: input - first desc ID to start; output - next desc ID
3661 * @cd: pointer to command details structure or NULL
3664 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3665 struct ice_aqc_get_allocd_res_desc_resp *buf,
3666 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3667 struct ice_sq_cd *cd)
3669 struct ice_aqc_get_allocd_res_desc *cmd;
3670 struct ice_aq_desc desc;
3671 enum ice_status status;
3673 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3675 cmd = &desc.params.get_res_desc;
3678 return ICE_ERR_PARAM;
3680 if (buf_size != (num_entries * sizeof(*buf)))
3681 return ICE_ERR_PARAM;
3683 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3685 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3686 ICE_AQC_RES_TYPE_M) | (res_shared ?
3687 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3688 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3690 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3692 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3698 * ice_add_mac_rule - Add a MAC address based filter rule
3699 * @hw: pointer to the hardware structure
3700 * @m_list: list of MAC addresses and forwarding information
3701 * @sw: pointer to switch info struct for which function add rule
3702 * @lport: logic port number on which function add rule
3704 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3705 * multiple unicast addresses, the function assumes that all the
3706 * addresses are unique in a given add_mac call. It doesn't
3707 * check for duplicates in this case, removing duplicates from a given
3708 * list should be taken care of in the caller of this function.
3710 static enum ice_status
3711 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3712 struct ice_switch_info *sw, u8 lport)
3714 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3715 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3716 struct ice_fltr_list_entry *m_list_itr;
3717 struct LIST_HEAD_TYPE *rule_head;
3718 u16 total_elem_left, s_rule_size;
3719 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3720 enum ice_status status = ICE_SUCCESS;
3721 u16 num_unicast = 0;
3725 rule_lock = &recp_list->filt_rule_lock;
3726 rule_head = &recp_list->filt_rules;
3728 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3730 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3734 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3735 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3736 if (!ice_is_vsi_valid(hw, vsi_handle))
3737 return ICE_ERR_PARAM;
3738 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3739 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3740 /* update the src in case it is VSI num */
3741 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3742 return ICE_ERR_PARAM;
3743 m_list_itr->fltr_info.src = hw_vsi_id;
3744 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3745 IS_ZERO_ETHER_ADDR(add))
3746 return ICE_ERR_PARAM;
3747 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3748 /* Don't overwrite the unicast address */
3749 ice_acquire_lock(rule_lock);
3750 if (ice_find_rule_entry(rule_head,
3751 &m_list_itr->fltr_info)) {
3752 ice_release_lock(rule_lock);
3753 return ICE_ERR_ALREADY_EXISTS;
3755 ice_release_lock(rule_lock);
3757 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3758 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3759 m_list_itr->status =
3760 ice_add_rule_internal(hw, recp_list, lport,
3762 if (m_list_itr->status)
3763 return m_list_itr->status;
3767 ice_acquire_lock(rule_lock);
3768 /* Exit if no suitable entries were found for adding bulk switch rule */
3770 status = ICE_SUCCESS;
3771 goto ice_add_mac_exit;
3774 /* Allocate switch rule buffer for the bulk update for unicast */
3775 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3776 s_rule = (struct ice_aqc_sw_rules_elem *)
3777 ice_calloc(hw, num_unicast, s_rule_size);
3779 status = ICE_ERR_NO_MEMORY;
3780 goto ice_add_mac_exit;
3784 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3786 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3787 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3789 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3790 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3791 ice_aqc_opc_add_sw_rules);
3792 r_iter = (struct ice_aqc_sw_rules_elem *)
3793 ((u8 *)r_iter + s_rule_size);
3797 /* Call AQ bulk switch rule update for all unicast addresses */
3799 /* Call AQ switch rule in AQ_MAX chunk */
3800 for (total_elem_left = num_unicast; total_elem_left > 0;
3801 total_elem_left -= elem_sent) {
3802 struct ice_aqc_sw_rules_elem *entry = r_iter;
3804 elem_sent = MIN_T(u8, total_elem_left,
3805 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3806 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3807 elem_sent, ice_aqc_opc_add_sw_rules,
3810 goto ice_add_mac_exit;
3811 r_iter = (struct ice_aqc_sw_rules_elem *)
3812 ((u8 *)r_iter + (elem_sent * s_rule_size));
3815 /* Fill up rule ID based on the value returned from FW */
3817 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3819 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3820 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3821 struct ice_fltr_mgmt_list_entry *fm_entry;
3823 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3824 f_info->fltr_rule_id =
3825 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3826 f_info->fltr_act = ICE_FWD_TO_VSI;
3827 /* Create an entry to track this MAC address */
3828 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3829 ice_malloc(hw, sizeof(*fm_entry));
3831 status = ICE_ERR_NO_MEMORY;
3832 goto ice_add_mac_exit;
3834 fm_entry->fltr_info = *f_info;
3835 fm_entry->vsi_count = 1;
3836 /* The book keeping entries will get removed when
3837 * base driver calls remove filter AQ command
3840 LIST_ADD(&fm_entry->list_entry, rule_head);
3841 r_iter = (struct ice_aqc_sw_rules_elem *)
3842 ((u8 *)r_iter + s_rule_size);
3847 ice_release_lock(rule_lock);
3849 ice_free(hw, s_rule);
3854 * ice_add_mac - Add a MAC address based filter rule
3855 * @hw: pointer to the hardware structure
3856 * @m_list: list of MAC addresses and forwarding information
3858 * Function add MAC rule for logical port from HW struct
3860 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3863 return ICE_ERR_PARAM;
3865 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3866 hw->port_info->lport);
3870 * ice_add_vlan_internal - Add one VLAN based filter rule
3871 * @hw: pointer to the hardware structure
3872 * @recp_list: recipe list for which rule has to be added
3873 * @f_entry: filter entry containing one VLAN information
3875 static enum ice_status
3876 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3877 struct ice_fltr_list_entry *f_entry)
3879 struct ice_fltr_mgmt_list_entry *v_list_itr;
3880 struct ice_fltr_info *new_fltr, *cur_fltr;
3881 enum ice_sw_lkup_type lkup_type;
3882 u16 vsi_list_id = 0, vsi_handle;
3883 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3884 enum ice_status status = ICE_SUCCESS;
3886 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3887 return ICE_ERR_PARAM;
3889 f_entry->fltr_info.fwd_id.hw_vsi_id =
3890 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3891 new_fltr = &f_entry->fltr_info;
3893 /* VLAN ID should only be 12 bits */
3894 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3895 return ICE_ERR_PARAM;
3897 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3898 return ICE_ERR_PARAM;
3900 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3901 lkup_type = new_fltr->lkup_type;
3902 vsi_handle = new_fltr->vsi_handle;
3903 rule_lock = &recp_list->filt_rule_lock;
3904 ice_acquire_lock(rule_lock);
3905 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3907 struct ice_vsi_list_map_info *map_info = NULL;
3909 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3910 /* All VLAN pruning rules use a VSI list. Check if
3911 * there is already a VSI list containing VSI that we
3912 * want to add. If found, use the same vsi_list_id for
3913 * this new VLAN rule or else create a new list.
3915 map_info = ice_find_vsi_list_entry(recp_list,
3919 status = ice_create_vsi_list_rule(hw,
3927 /* Convert the action to forwarding to a VSI list. */
3928 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3929 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3932 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3934 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3937 status = ICE_ERR_DOES_NOT_EXIST;
3940 /* reuse VSI list for new rule and increment ref_cnt */
3942 v_list_itr->vsi_list_info = map_info;
3943 map_info->ref_cnt++;
3945 v_list_itr->vsi_list_info =
3946 ice_create_vsi_list_map(hw, &vsi_handle,
3950 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3951 /* Update existing VSI list to add new VSI ID only if it used
3954 cur_fltr = &v_list_itr->fltr_info;
3955 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3958 /* If VLAN rule exists and VSI list being used by this rule is
3959 * referenced by more than 1 VLAN rule. Then create a new VSI
3960 * list appending previous VSI with new VSI and update existing
3961 * VLAN rule to point to new VSI list ID
3963 struct ice_fltr_info tmp_fltr;
3964 u16 vsi_handle_arr[2];
3967 /* Current implementation only supports reusing VSI list with
3968 * one VSI count. We should never hit below condition
3970 if (v_list_itr->vsi_count > 1 &&
3971 v_list_itr->vsi_list_info->ref_cnt > 1) {
3972 ice_debug(hw, ICE_DBG_SW,
3973 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3974 status = ICE_ERR_CFG;
3979 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3982 /* A rule already exists with the new VSI being added */
3983 if (cur_handle == vsi_handle) {
3984 status = ICE_ERR_ALREADY_EXISTS;
3988 vsi_handle_arr[0] = cur_handle;
3989 vsi_handle_arr[1] = vsi_handle;
3990 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3991 &vsi_list_id, lkup_type);
3995 tmp_fltr = v_list_itr->fltr_info;
3996 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3997 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3998 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3999 /* Update the previous switch rule to a new VSI list which
4000 * includes current VSI that is requested
4002 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4006 /* before overriding VSI list map info. decrement ref_cnt of
4009 v_list_itr->vsi_list_info->ref_cnt--;
4011 /* now update to newly created list */
4012 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4013 v_list_itr->vsi_list_info =
4014 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4016 v_list_itr->vsi_count++;
4020 ice_release_lock(rule_lock);
4025 * ice_add_vlan_rule - Add VLAN based filter rule
4026 * @hw: pointer to the hardware structure
4027 * @v_list: list of VLAN entries and forwarding information
4028 * @sw: pointer to switch info struct for which function add rule
4030 static enum ice_status
4031 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4032 struct ice_switch_info *sw)
4034 struct ice_fltr_list_entry *v_list_itr;
4035 struct ice_sw_recipe *recp_list;
4037 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4038 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4040 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4041 return ICE_ERR_PARAM;
4042 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4043 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4045 if (v_list_itr->status)
4046 return v_list_itr->status;
4052 * ice_add_vlan - Add a VLAN based filter rule
4053 * @hw: pointer to the hardware structure
4054 * @v_list: list of VLAN and forwarding information
4056 * Function add VLAN rule for logical port from HW struct
4058 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4061 return ICE_ERR_PARAM;
4063 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4067 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4068 * @hw: pointer to the hardware structure
4069 * @mv_list: list of MAC and VLAN filters
4070 * @sw: pointer to switch info struct for which function add rule
4071 * @lport: logic port number on which function add rule
4073 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4074 * pruning bits enabled, then it is the responsibility of the caller to make
4075 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4076 * VLAN won't be received on that VSI otherwise.
4078 static enum ice_status
4079 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4080 struct ice_switch_info *sw, u8 lport)
4082 struct ice_fltr_list_entry *mv_list_itr;
4083 struct ice_sw_recipe *recp_list;
4085 if (!mv_list || !hw)
4086 return ICE_ERR_PARAM;
4088 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4089 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4091 enum ice_sw_lkup_type l_type =
4092 mv_list_itr->fltr_info.lkup_type;
4094 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4095 return ICE_ERR_PARAM;
4096 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4097 mv_list_itr->status =
4098 ice_add_rule_internal(hw, recp_list, lport,
4100 if (mv_list_itr->status)
4101 return mv_list_itr->status;
4107 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4108 * @hw: pointer to the hardware structure
4109 * @mv_list: list of MAC VLAN addresses and forwarding information
4111 * Function add MAC VLAN rule for logical port from HW struct
4114 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4116 if (!mv_list || !hw)
4117 return ICE_ERR_PARAM;
4119 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4120 hw->port_info->lport);
4124 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4125 * @hw: pointer to the hardware structure
4126 * @em_list: list of ether type MAC filter, MAC is optional
4127 * @sw: pointer to switch info struct for which function add rule
4128 * @lport: logic port number on which function add rule
4130 * This function requires the caller to populate the entries in
4131 * the filter list with the necessary fields (including flags to
4132 * indicate Tx or Rx rules).
4134 static enum ice_status
4135 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4136 struct ice_switch_info *sw, u8 lport)
4138 struct ice_fltr_list_entry *em_list_itr;
4140 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4142 struct ice_sw_recipe *recp_list;
4143 enum ice_sw_lkup_type l_type;
4145 l_type = em_list_itr->fltr_info.lkup_type;
4146 recp_list = &sw->recp_list[l_type];
4148 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4149 l_type != ICE_SW_LKUP_ETHERTYPE)
4150 return ICE_ERR_PARAM;
4152 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4155 if (em_list_itr->status)
4156 return em_list_itr->status;
4162 * ice_add_eth_mac - Add a ethertype based filter rule
4163 * @hw: pointer to the hardware structure
4164 * @em_list: list of ethertype and forwarding information
4166 * Function add ethertype rule for logical port from HW struct
4169 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4171 if (!em_list || !hw)
4172 return ICE_ERR_PARAM;
4174 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4175 hw->port_info->lport);
4179 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4180 * @hw: pointer to the hardware structure
4181 * @em_list: list of ethertype or ethertype MAC entries
4182 * @sw: pointer to switch info struct for which function add rule
4184 static enum ice_status
4185 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4186 struct ice_switch_info *sw)
4188 struct ice_fltr_list_entry *em_list_itr, *tmp;
4190 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4192 struct ice_sw_recipe *recp_list;
4193 enum ice_sw_lkup_type l_type;
4195 l_type = em_list_itr->fltr_info.lkup_type;
4197 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4198 l_type != ICE_SW_LKUP_ETHERTYPE)
4199 return ICE_ERR_PARAM;
4201 recp_list = &sw->recp_list[l_type];
4202 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4204 if (em_list_itr->status)
4205 return em_list_itr->status;
4211 * ice_remove_eth_mac - remove a ethertype based filter rule
4212 * @hw: pointer to the hardware structure
4213 * @em_list: list of ethertype and forwarding information
4217 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4219 if (!em_list || !hw)
4220 return ICE_ERR_PARAM;
4222 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4226 * ice_rem_sw_rule_info
4227 * @hw: pointer to the hardware structure
4228 * @rule_head: pointer to the switch list structure that we want to delete
4231 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4233 if (!LIST_EMPTY(rule_head)) {
4234 struct ice_fltr_mgmt_list_entry *entry;
4235 struct ice_fltr_mgmt_list_entry *tmp;
4237 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4238 ice_fltr_mgmt_list_entry, list_entry) {
4239 LIST_DEL(&entry->list_entry);
4240 ice_free(hw, entry);
4246 * ice_rem_adv_rule_info
4247 * @hw: pointer to the hardware structure
4248 * @rule_head: pointer to the switch list structure that we want to delete
4251 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4253 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4254 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4256 if (LIST_EMPTY(rule_head))
4259 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4260 ice_adv_fltr_mgmt_list_entry, list_entry) {
4261 LIST_DEL(&lst_itr->list_entry);
4262 ice_free(hw, lst_itr->lkups);
4263 ice_free(hw, lst_itr);
4268 * ice_rem_all_sw_rules_info
4269 * @hw: pointer to the hardware structure
4271 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4273 struct ice_switch_info *sw = hw->switch_info;
4276 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4277 struct LIST_HEAD_TYPE *rule_head;
4279 rule_head = &sw->recp_list[i].filt_rules;
4280 if (!sw->recp_list[i].adv_rule)
4281 ice_rem_sw_rule_info(hw, rule_head);
4283 ice_rem_adv_rule_info(hw, rule_head);
4284 if (sw->recp_list[i].adv_rule &&
4285 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4286 sw->recp_list[i].adv_rule = false;
4291 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4292 * @pi: pointer to the port_info structure
4293 * @vsi_handle: VSI handle to set as default
4294 * @set: true to add the above mentioned switch rule, false to remove it
4295 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4297 * add filter rule to set/unset given VSI as default VSI for the switch
4298 * (represented by swid)
4301 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4304 struct ice_aqc_sw_rules_elem *s_rule;
4305 struct ice_fltr_info f_info;
4306 struct ice_hw *hw = pi->hw;
4307 enum ice_adminq_opc opcode;
4308 enum ice_status status;
4312 if (!ice_is_vsi_valid(hw, vsi_handle))
4313 return ICE_ERR_PARAM;
4314 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4316 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4317 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4318 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4320 return ICE_ERR_NO_MEMORY;
4322 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4324 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4325 f_info.flag = direction;
4326 f_info.fltr_act = ICE_FWD_TO_VSI;
4327 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4329 if (f_info.flag & ICE_FLTR_RX) {
4330 f_info.src = pi->lport;
4331 f_info.src_id = ICE_SRC_ID_LPORT;
4333 f_info.fltr_rule_id =
4334 pi->dflt_rx_vsi_rule_id;
4335 } else if (f_info.flag & ICE_FLTR_TX) {
4336 f_info.src_id = ICE_SRC_ID_VSI;
4337 f_info.src = hw_vsi_id;
4339 f_info.fltr_rule_id =
4340 pi->dflt_tx_vsi_rule_id;
4344 opcode = ice_aqc_opc_add_sw_rules;
4346 opcode = ice_aqc_opc_remove_sw_rules;
4348 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4350 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4351 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4354 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4356 if (f_info.flag & ICE_FLTR_TX) {
4357 pi->dflt_tx_vsi_num = hw_vsi_id;
4358 pi->dflt_tx_vsi_rule_id = index;
4359 } else if (f_info.flag & ICE_FLTR_RX) {
4360 pi->dflt_rx_vsi_num = hw_vsi_id;
4361 pi->dflt_rx_vsi_rule_id = index;
4364 if (f_info.flag & ICE_FLTR_TX) {
4365 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4366 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4367 } else if (f_info.flag & ICE_FLTR_RX) {
4368 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4369 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4374 ice_free(hw, s_rule);
4379 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4380 * @list_head: head of rule list
4381 * @f_info: rule information
4383 * Helper function to search for a unicast rule entry - this is to be used
4384 * to remove unicast MAC filter that is not shared with other VSIs on the
4387 * Returns pointer to entry storing the rule if found
4389 static struct ice_fltr_mgmt_list_entry *
4390 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4391 struct ice_fltr_info *f_info)
4393 struct ice_fltr_mgmt_list_entry *list_itr;
4395 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4397 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4398 sizeof(f_info->l_data)) &&
4399 f_info->fwd_id.hw_vsi_id ==
4400 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4401 f_info->flag == list_itr->fltr_info.flag)
4408 * ice_remove_mac_rule - remove a MAC based filter rule
4409 * @hw: pointer to the hardware structure
4410 * @m_list: list of MAC addresses and forwarding information
4411 * @recp_list: list from which function remove MAC address
4413 * This function removes either a MAC filter rule or a specific VSI from a
4414 * VSI list for a multicast MAC address.
4416 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4417 * ice_add_mac. Caller should be aware that this call will only work if all
4418 * the entries passed into m_list were added previously. It will not attempt to
4419 * do a partial remove of entries that were found.
4421 static enum ice_status
4422 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4423 struct ice_sw_recipe *recp_list)
4425 struct ice_fltr_list_entry *list_itr, *tmp;
4426 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4429 return ICE_ERR_PARAM;
4431 rule_lock = &recp_list->filt_rule_lock;
4432 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4434 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4435 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4438 if (l_type != ICE_SW_LKUP_MAC)
4439 return ICE_ERR_PARAM;
4441 vsi_handle = list_itr->fltr_info.vsi_handle;
4442 if (!ice_is_vsi_valid(hw, vsi_handle))
4443 return ICE_ERR_PARAM;
4445 list_itr->fltr_info.fwd_id.hw_vsi_id =
4446 ice_get_hw_vsi_num(hw, vsi_handle);
4447 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4448 /* Don't remove the unicast address that belongs to
4449 * another VSI on the switch, since it is not being
4452 ice_acquire_lock(rule_lock);
4453 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4454 &list_itr->fltr_info)) {
4455 ice_release_lock(rule_lock);
4456 return ICE_ERR_DOES_NOT_EXIST;
4458 ice_release_lock(rule_lock);
4460 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4462 if (list_itr->status)
4463 return list_itr->status;
4469 * ice_remove_mac - remove a MAC address based filter rule
4470 * @hw: pointer to the hardware structure
4471 * @m_list: list of MAC addresses and forwarding information
4474 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4476 struct ice_sw_recipe *recp_list;
4478 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4479 return ice_remove_mac_rule(hw, m_list, recp_list);
4483 * ice_remove_vlan_rule - Remove VLAN based filter rule
4484 * @hw: pointer to the hardware structure
4485 * @v_list: list of VLAN entries and forwarding information
4486 * @recp_list: list from which function remove VLAN
4488 static enum ice_status
4489 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4490 struct ice_sw_recipe *recp_list)
4492 struct ice_fltr_list_entry *v_list_itr, *tmp;
4494 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4496 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4498 if (l_type != ICE_SW_LKUP_VLAN)
4499 return ICE_ERR_PARAM;
4500 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4502 if (v_list_itr->status)
4503 return v_list_itr->status;
4509 * ice_remove_vlan - remove a VLAN address based filter rule
4510 * @hw: pointer to the hardware structure
4511 * @v_list: list of VLAN and forwarding information
4515 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4517 struct ice_sw_recipe *recp_list;
4520 return ICE_ERR_PARAM;
4522 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4523 return ice_remove_vlan_rule(hw, v_list, recp_list);
4527 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4528 * @hw: pointer to the hardware structure
4529 * @v_list: list of MAC VLAN entries and forwarding information
4530 * @recp_list: list from which function remove MAC VLAN
4532 static enum ice_status
4533 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4534 struct ice_sw_recipe *recp_list)
4536 struct ice_fltr_list_entry *v_list_itr, *tmp;
4538 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4539 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4541 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4543 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4544 return ICE_ERR_PARAM;
4545 v_list_itr->status =
4546 ice_remove_rule_internal(hw, recp_list,
4548 if (v_list_itr->status)
4549 return v_list_itr->status;
4555 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4556 * @hw: pointer to the hardware structure
4557 * @mv_list: list of MAC VLAN and forwarding information
4560 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4562 struct ice_sw_recipe *recp_list;
4564 if (!mv_list || !hw)
4565 return ICE_ERR_PARAM;
4567 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4568 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4572 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4573 * @fm_entry: filter entry to inspect
4574 * @vsi_handle: VSI handle to compare with filter info
4577 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4579 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4580 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4581 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4582 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4587 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4588 * @hw: pointer to the hardware structure
4589 * @vsi_handle: VSI handle to remove filters from
4590 * @vsi_list_head: pointer to the list to add entry to
4591 * @fi: pointer to fltr_info of filter entry to copy & add
4593 * Helper function, used when creating a list of filters to remove from
4594 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4595 * original filter entry, with the exception of fltr_info.fltr_act and
4596 * fltr_info.fwd_id fields. These are set such that later logic can
4597 * extract which VSI to remove the fltr from, and pass on that information.
4599 static enum ice_status
4600 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4601 struct LIST_HEAD_TYPE *vsi_list_head,
4602 struct ice_fltr_info *fi)
4604 struct ice_fltr_list_entry *tmp;
4606 /* this memory is freed up in the caller function
4607 * once filters for this VSI are removed
4609 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4611 return ICE_ERR_NO_MEMORY;
4613 tmp->fltr_info = *fi;
4615 /* Overwrite these fields to indicate which VSI to remove filter from,
4616 * so find and remove logic can extract the information from the
4617 * list entries. Note that original entries will still have proper
4620 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4621 tmp->fltr_info.vsi_handle = vsi_handle;
4622 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4624 LIST_ADD(&tmp->list_entry, vsi_list_head);
4630 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4631 * @hw: pointer to the hardware structure
4632 * @vsi_handle: VSI handle to remove filters from
4633 * @lkup_list_head: pointer to the list that has certain lookup type filters
4634 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4636 * Locates all filters in lkup_list_head that are used by the given VSI,
4637 * and adds COPIES of those entries to vsi_list_head (intended to be used
4638 * to remove the listed filters).
4639 * Note that this means all entries in vsi_list_head must be explicitly
4640 * deallocated by the caller when done with list.
4642 static enum ice_status
4643 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4644 struct LIST_HEAD_TYPE *lkup_list_head,
4645 struct LIST_HEAD_TYPE *vsi_list_head)
4647 struct ice_fltr_mgmt_list_entry *fm_entry;
4648 enum ice_status status = ICE_SUCCESS;
4650 /* check to make sure VSI ID is valid and within boundary */
4651 if (!ice_is_vsi_valid(hw, vsi_handle))
4652 return ICE_ERR_PARAM;
4654 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4655 ice_fltr_mgmt_list_entry, list_entry) {
4656 struct ice_fltr_info *fi;
4658 fi = &fm_entry->fltr_info;
4659 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4662 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4671 * ice_determine_promisc_mask
4672 * @fi: filter info to parse
4674 * Helper function to determine which ICE_PROMISC_ mask corresponds
4675 * to given filter into.
4677 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4679 u16 vid = fi->l_data.mac_vlan.vlan_id;
4680 u8 *macaddr = fi->l_data.mac.mac_addr;
4681 bool is_tx_fltr = false;
4682 u8 promisc_mask = 0;
4684 if (fi->flag == ICE_FLTR_TX)
4687 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4688 promisc_mask |= is_tx_fltr ?
4689 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4690 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4691 promisc_mask |= is_tx_fltr ?
4692 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4693 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4694 promisc_mask |= is_tx_fltr ?
4695 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4697 promisc_mask |= is_tx_fltr ?
4698 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4700 return promisc_mask;
4704 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4705 * @hw: pointer to the hardware structure
4706 * @vsi_handle: VSI handle to retrieve info from
4707 * @promisc_mask: pointer to mask to be filled in
4708 * @vid: VLAN ID of promisc VLAN VSI
4709 * @sw: pointer to switch info struct for which function add rule
4711 static enum ice_status
4712 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4713 u16 *vid, struct ice_switch_info *sw)
4715 struct ice_fltr_mgmt_list_entry *itr;
4716 struct LIST_HEAD_TYPE *rule_head;
4717 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4719 if (!ice_is_vsi_valid(hw, vsi_handle))
4720 return ICE_ERR_PARAM;
4724 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4725 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4727 ice_acquire_lock(rule_lock);
4728 LIST_FOR_EACH_ENTRY(itr, rule_head,
4729 ice_fltr_mgmt_list_entry, list_entry) {
4730 /* Continue if this filter doesn't apply to this VSI or the
4731 * VSI ID is not in the VSI map for this filter
4733 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4736 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4738 ice_release_lock(rule_lock);
4744 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4745 * @hw: pointer to the hardware structure
4746 * @vsi_handle: VSI handle to retrieve info from
4747 * @promisc_mask: pointer to mask to be filled in
4748 * @vid: VLAN ID of promisc VLAN VSI
4751 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4754 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4755 vid, hw->switch_info);
4759 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4760 * @hw: pointer to the hardware structure
4761 * @vsi_handle: VSI handle to retrieve info from
4762 * @promisc_mask: pointer to mask to be filled in
4763 * @vid: VLAN ID of promisc VLAN VSI
4764 * @sw: pointer to switch info struct for which function add rule
4766 static enum ice_status
4767 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4768 u16 *vid, struct ice_switch_info *sw)
4770 struct ice_fltr_mgmt_list_entry *itr;
4771 struct LIST_HEAD_TYPE *rule_head;
4772 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4774 if (!ice_is_vsi_valid(hw, vsi_handle))
4775 return ICE_ERR_PARAM;
4779 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4780 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4782 ice_acquire_lock(rule_lock);
4783 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4785 /* Continue if this filter doesn't apply to this VSI or the
4786 * VSI ID is not in the VSI map for this filter
4788 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4791 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4793 ice_release_lock(rule_lock);
4799 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4800 * @hw: pointer to the hardware structure
4801 * @vsi_handle: VSI handle to retrieve info from
4802 * @promisc_mask: pointer to mask to be filled in
4803 * @vid: VLAN ID of promisc VLAN VSI
4806 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4809 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4810 vid, hw->switch_info);
4814 * ice_remove_promisc - Remove promisc based filter rules
4815 * @hw: pointer to the hardware structure
4816 * @recp_id: recipe ID for which the rule needs to removed
4817 * @v_list: list of promisc entries
4819 static enum ice_status
4820 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4821 struct LIST_HEAD_TYPE *v_list)
4823 struct ice_fltr_list_entry *v_list_itr, *tmp;
4824 struct ice_sw_recipe *recp_list;
4826 recp_list = &hw->switch_info->recp_list[recp_id];
4827 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4829 v_list_itr->status =
4830 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4831 if (v_list_itr->status)
4832 return v_list_itr->status;
4838 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4839 * @hw: pointer to the hardware structure
4840 * @vsi_handle: VSI handle to clear mode
4841 * @promisc_mask: mask of promiscuous config bits to clear
4842 * @vid: VLAN ID to clear VLAN promiscuous
4843 * @sw: pointer to switch info struct for which function add rule
4845 static enum ice_status
4846 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4847 u16 vid, struct ice_switch_info *sw)
4849 struct ice_fltr_list_entry *fm_entry, *tmp;
4850 struct LIST_HEAD_TYPE remove_list_head;
4851 struct ice_fltr_mgmt_list_entry *itr;
4852 struct LIST_HEAD_TYPE *rule_head;
4853 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4854 enum ice_status status = ICE_SUCCESS;
4857 if (!ice_is_vsi_valid(hw, vsi_handle))
4858 return ICE_ERR_PARAM;
4860 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4861 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4863 recipe_id = ICE_SW_LKUP_PROMISC;
4865 rule_head = &sw->recp_list[recipe_id].filt_rules;
4866 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4868 INIT_LIST_HEAD(&remove_list_head);
4870 ice_acquire_lock(rule_lock);
4871 LIST_FOR_EACH_ENTRY(itr, rule_head,
4872 ice_fltr_mgmt_list_entry, list_entry) {
4873 struct ice_fltr_info *fltr_info;
4874 u8 fltr_promisc_mask = 0;
4876 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4878 fltr_info = &itr->fltr_info;
4880 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4881 vid != fltr_info->l_data.mac_vlan.vlan_id)
4884 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4886 /* Skip if filter is not completely specified by given mask */
4887 if (fltr_promisc_mask & ~promisc_mask)
4890 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4894 ice_release_lock(rule_lock);
4895 goto free_fltr_list;
4898 ice_release_lock(rule_lock);
4900 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4903 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4904 ice_fltr_list_entry, list_entry) {
4905 LIST_DEL(&fm_entry->list_entry);
4906 ice_free(hw, fm_entry);
4913 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4914 * @hw: pointer to the hardware structure
4915 * @vsi_handle: VSI handle to clear mode
4916 * @promisc_mask: mask of promiscuous config bits to clear
4917 * @vid: VLAN ID to clear VLAN promiscuous
4920 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4921 u8 promisc_mask, u16 vid)
4923 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4924 vid, hw->switch_info);
4928 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4929 * @hw: pointer to the hardware structure
4930 * @vsi_handle: VSI handle to configure
4931 * @promisc_mask: mask of promiscuous config bits
4932 * @vid: VLAN ID to set VLAN promiscuous
4933 * @lport: logical port number to configure promisc mode
4934 * @sw: pointer to switch info struct for which function add rule
4936 static enum ice_status
4937 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4938 u16 vid, u8 lport, struct ice_switch_info *sw)
4940 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4941 struct ice_fltr_list_entry f_list_entry;
4942 struct ice_fltr_info new_fltr;
4943 enum ice_status status = ICE_SUCCESS;
4949 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4951 if (!ice_is_vsi_valid(hw, vsi_handle))
4952 return ICE_ERR_PARAM;
4953 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4955 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4957 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4958 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4959 new_fltr.l_data.mac_vlan.vlan_id = vid;
4960 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4962 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4963 recipe_id = ICE_SW_LKUP_PROMISC;
4966 /* Separate filters must be set for each direction/packet type
4967 * combination, so we will loop over the mask value, store the
4968 * individual type, and clear it out in the input mask as it
4971 while (promisc_mask) {
4972 struct ice_sw_recipe *recp_list;
4978 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4979 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4980 pkt_type = UCAST_FLTR;
4981 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4982 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4983 pkt_type = UCAST_FLTR;
4985 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4986 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4987 pkt_type = MCAST_FLTR;
4988 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4989 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4990 pkt_type = MCAST_FLTR;
4992 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4993 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4994 pkt_type = BCAST_FLTR;
4995 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4996 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4997 pkt_type = BCAST_FLTR;
5001 /* Check for VLAN promiscuous flag */
5002 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5003 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5004 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5005 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5009 /* Set filter DA based on packet type */
5010 mac_addr = new_fltr.l_data.mac.mac_addr;
5011 if (pkt_type == BCAST_FLTR) {
5012 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5013 } else if (pkt_type == MCAST_FLTR ||
5014 pkt_type == UCAST_FLTR) {
5015 /* Use the dummy ether header DA */
5016 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5017 ICE_NONDMA_TO_NONDMA);
5018 if (pkt_type == MCAST_FLTR)
5019 mac_addr[0] |= 0x1; /* Set multicast bit */
5022 /* Need to reset this to zero for all iterations */
5025 new_fltr.flag |= ICE_FLTR_TX;
5026 new_fltr.src = hw_vsi_id;
5028 new_fltr.flag |= ICE_FLTR_RX;
5029 new_fltr.src = lport;
5032 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5033 new_fltr.vsi_handle = vsi_handle;
5034 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5035 f_list_entry.fltr_info = new_fltr;
5036 recp_list = &sw->recp_list[recipe_id];
5038 status = ice_add_rule_internal(hw, recp_list, lport,
5040 if (status != ICE_SUCCESS)
5041 goto set_promisc_exit;
5049 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5050 * @hw: pointer to the hardware structure
5051 * @vsi_handle: VSI handle to configure
5052 * @promisc_mask: mask of promiscuous config bits
5053 * @vid: VLAN ID to set VLAN promiscuous
5056 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5059 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5060 hw->port_info->lport,
5065 * _ice_set_vlan_vsi_promisc
5066 * @hw: pointer to the hardware structure
5067 * @vsi_handle: VSI handle to configure
5068 * @promisc_mask: mask of promiscuous config bits
5069 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5070 * @lport: logical port number to configure promisc mode
5071 * @sw: pointer to switch info struct for which function add rule
5073 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5075 static enum ice_status
5076 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5077 bool rm_vlan_promisc, u8 lport,
5078 struct ice_switch_info *sw)
5080 struct ice_fltr_list_entry *list_itr, *tmp;
5081 struct LIST_HEAD_TYPE vsi_list_head;
5082 struct LIST_HEAD_TYPE *vlan_head;
5083 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5084 enum ice_status status;
5087 INIT_LIST_HEAD(&vsi_list_head);
5088 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5089 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5090 ice_acquire_lock(vlan_lock);
5091 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5093 ice_release_lock(vlan_lock);
5095 goto free_fltr_list;
5097 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5099 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5100 if (rm_vlan_promisc)
5101 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5105 status = _ice_set_vsi_promisc(hw, vsi_handle,
5106 promisc_mask, vlan_id,
5113 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5114 ice_fltr_list_entry, list_entry) {
5115 LIST_DEL(&list_itr->list_entry);
5116 ice_free(hw, list_itr);
5122 * ice_set_vlan_vsi_promisc
5123 * @hw: pointer to the hardware structure
5124 * @vsi_handle: VSI handle to configure
5125 * @promisc_mask: mask of promiscuous config bits
5126 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5128 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5131 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5132 bool rm_vlan_promisc)
5134 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5135 rm_vlan_promisc, hw->port_info->lport,
5140 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5141 * @hw: pointer to the hardware structure
5142 * @vsi_handle: VSI handle to remove filters from
5143 * @recp_list: recipe list from which function remove fltr
5144 * @lkup: switch rule filter lookup type
5147 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5148 struct ice_sw_recipe *recp_list,
5149 enum ice_sw_lkup_type lkup)
5151 struct ice_fltr_list_entry *fm_entry;
5152 struct LIST_HEAD_TYPE remove_list_head;
5153 struct LIST_HEAD_TYPE *rule_head;
5154 struct ice_fltr_list_entry *tmp;
5155 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5156 enum ice_status status;
5158 INIT_LIST_HEAD(&remove_list_head);
5159 rule_lock = &recp_list[lkup].filt_rule_lock;
5160 rule_head = &recp_list[lkup].filt_rules;
5161 ice_acquire_lock(rule_lock);
5162 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5164 ice_release_lock(rule_lock);
5169 case ICE_SW_LKUP_MAC:
5170 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5172 case ICE_SW_LKUP_VLAN:
5173 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5175 case ICE_SW_LKUP_PROMISC:
5176 case ICE_SW_LKUP_PROMISC_VLAN:
5177 ice_remove_promisc(hw, lkup, &remove_list_head);
5179 case ICE_SW_LKUP_MAC_VLAN:
5180 ice_remove_mac_vlan(hw, &remove_list_head);
5182 case ICE_SW_LKUP_ETHERTYPE:
5183 case ICE_SW_LKUP_ETHERTYPE_MAC:
5184 ice_remove_eth_mac(hw, &remove_list_head);
5186 case ICE_SW_LKUP_DFLT:
5187 ice_debug(hw, ICE_DBG_SW,
5188 "Remove filters for this lookup type hasn't been implemented yet\n");
5190 case ICE_SW_LKUP_LAST:
5191 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5195 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5196 ice_fltr_list_entry, list_entry) {
5197 LIST_DEL(&fm_entry->list_entry);
5198 ice_free(hw, fm_entry);
5203 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5204 * @hw: pointer to the hardware structure
5205 * @vsi_handle: VSI handle to remove filters from
5206 * @sw: pointer to switch info struct
5209 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5210 struct ice_switch_info *sw)
5212 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5214 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5215 sw->recp_list, ICE_SW_LKUP_MAC);
5216 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5217 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5218 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5219 sw->recp_list, ICE_SW_LKUP_PROMISC);
5220 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5221 sw->recp_list, ICE_SW_LKUP_VLAN);
5222 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5223 sw->recp_list, ICE_SW_LKUP_DFLT);
5224 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5225 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5226 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5227 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5228 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5229 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5233 * ice_remove_vsi_fltr - Remove all filters for a VSI
5234 * @hw: pointer to the hardware structure
5235 * @vsi_handle: VSI handle to remove filters from
5237 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5239 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5243 * ice_alloc_res_cntr - allocating resource counter
5244 * @hw: pointer to the hardware structure
5245 * @type: type of resource
5246 * @alloc_shared: if set it is shared else dedicated
5247 * @num_items: number of entries requested for FD resource type
5248 * @counter_id: counter index returned by AQ call
5251 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5254 struct ice_aqc_alloc_free_res_elem *buf;
5255 enum ice_status status;
5258 /* Allocate resource */
5259 buf_len = sizeof(*buf);
5260 buf = (struct ice_aqc_alloc_free_res_elem *)
5261 ice_malloc(hw, buf_len);
5263 return ICE_ERR_NO_MEMORY;
5265 buf->num_elems = CPU_TO_LE16(num_items);
5266 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5267 ICE_AQC_RES_TYPE_M) | alloc_shared);
5269 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5270 ice_aqc_opc_alloc_res, NULL);
5274 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5282 * ice_free_res_cntr - free resource counter
5283 * @hw: pointer to the hardware structure
5284 * @type: type of resource
5285 * @alloc_shared: if set it is shared else dedicated
5286 * @num_items: number of entries to be freed for FD resource type
5287 * @counter_id: counter ID resource which needs to be freed
5290 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5293 struct ice_aqc_alloc_free_res_elem *buf;
5294 enum ice_status status;
5298 buf_len = sizeof(*buf);
5299 buf = (struct ice_aqc_alloc_free_res_elem *)
5300 ice_malloc(hw, buf_len);
5302 return ICE_ERR_NO_MEMORY;
5304 buf->num_elems = CPU_TO_LE16(num_items);
5305 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5306 ICE_AQC_RES_TYPE_M) | alloc_shared);
5307 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5309 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5310 ice_aqc_opc_free_res, NULL);
5312 ice_debug(hw, ICE_DBG_SW,
5313 "counter resource could not be freed\n");
5320 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5321 * @hw: pointer to the hardware structure
5322 * @counter_id: returns counter index
5324 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5326 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5327 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5332 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5333 * @hw: pointer to the hardware structure
5334 * @counter_id: counter index to be freed
5336 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5338 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5339 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5344 * ice_alloc_res_lg_act - add large action resource
5345 * @hw: pointer to the hardware structure
5346 * @l_id: large action ID to fill it in
5347 * @num_acts: number of actions to hold with a large action entry
5349 static enum ice_status
5350 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5352 struct ice_aqc_alloc_free_res_elem *sw_buf;
5353 enum ice_status status;
5356 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5357 return ICE_ERR_PARAM;
5359 /* Allocate resource for large action */
5360 buf_len = sizeof(*sw_buf);
5361 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5362 ice_malloc(hw, buf_len);
5364 return ICE_ERR_NO_MEMORY;
5366 sw_buf->num_elems = CPU_TO_LE16(1);
5368 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5369 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5370 * If num_acts is greater than 2, then use
5371 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5372 * The num_acts cannot exceed 4. This was ensured at the
5373 * beginning of the function.
5376 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5377 else if (num_acts == 2)
5378 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5380 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5382 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5383 ice_aqc_opc_alloc_res, NULL);
5385 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5387 ice_free(hw, sw_buf);
5392 * ice_add_mac_with_sw_marker - add filter with sw marker
5393 * @hw: pointer to the hardware structure
5394 * @f_info: filter info structure containing the MAC filter information
5395 * @sw_marker: sw marker to tag the Rx descriptor with
5398 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5401 struct ice_fltr_mgmt_list_entry *m_entry;
5402 struct ice_fltr_list_entry fl_info;
5403 struct ice_sw_recipe *recp_list;
5404 struct LIST_HEAD_TYPE l_head;
5405 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5406 enum ice_status ret;
5410 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5411 return ICE_ERR_PARAM;
5413 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5414 return ICE_ERR_PARAM;
5416 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5417 return ICE_ERR_PARAM;
5419 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5420 return ICE_ERR_PARAM;
5421 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5423 /* Add filter if it doesn't exist so then the adding of large
5424 * action always results in update
5427 INIT_LIST_HEAD(&l_head);
5428 fl_info.fltr_info = *f_info;
5429 LIST_ADD(&fl_info.list_entry, &l_head);
5431 entry_exists = false;
5432 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5433 hw->port_info->lport);
5434 if (ret == ICE_ERR_ALREADY_EXISTS)
5435 entry_exists = true;
5439 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5440 rule_lock = &recp_list->filt_rule_lock;
5441 ice_acquire_lock(rule_lock);
5442 /* Get the book keeping entry for the filter */
5443 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5447 /* If counter action was enabled for this rule then don't enable
5448 * sw marker large action
5450 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5451 ret = ICE_ERR_PARAM;
5455 /* if same marker was added before */
5456 if (m_entry->sw_marker_id == sw_marker) {
5457 ret = ICE_ERR_ALREADY_EXISTS;
5461 /* Allocate a hardware table entry to hold large act. Three actions
5462 * for marker based large action
5464 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5468 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5471 /* Update the switch rule to add the marker action */
5472 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5474 ice_release_lock(rule_lock);
5479 ice_release_lock(rule_lock);
5480 /* only remove entry if it did not exist previously */
5482 ret = ice_remove_mac(hw, &l_head);
5488 * ice_add_mac_with_counter - add filter with counter enabled
5489 * @hw: pointer to the hardware structure
5490 * @f_info: pointer to filter info structure containing the MAC filter
5494 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5496 struct ice_fltr_mgmt_list_entry *m_entry;
5497 struct ice_fltr_list_entry fl_info;
5498 struct ice_sw_recipe *recp_list;
5499 struct LIST_HEAD_TYPE l_head;
5500 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5501 enum ice_status ret;
5506 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5507 return ICE_ERR_PARAM;
5509 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5510 return ICE_ERR_PARAM;
5512 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5513 return ICE_ERR_PARAM;
5514 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5515 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5517 entry_exist = false;
5519 rule_lock = &recp_list->filt_rule_lock;
5521 /* Add filter if it doesn't exist so then the adding of large
5522 * action always results in update
5524 INIT_LIST_HEAD(&l_head);
5526 fl_info.fltr_info = *f_info;
5527 LIST_ADD(&fl_info.list_entry, &l_head);
5529 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5530 hw->port_info->lport);
5531 if (ret == ICE_ERR_ALREADY_EXISTS)
5536 ice_acquire_lock(rule_lock);
5537 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5539 ret = ICE_ERR_BAD_PTR;
5543 /* Don't enable counter for a filter for which sw marker was enabled */
5544 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5545 ret = ICE_ERR_PARAM;
5549 /* If a counter was already enabled then don't need to add again */
5550 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5551 ret = ICE_ERR_ALREADY_EXISTS;
5555 /* Allocate a hardware table entry to VLAN counter */
5556 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5560 /* Allocate a hardware table entry to hold large act. Two actions for
5561 * counter based large action
5563 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5567 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5570 /* Update the switch rule to add the counter action */
5571 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5573 ice_release_lock(rule_lock);
5578 ice_release_lock(rule_lock);
5579 /* only remove entry if it did not exist previously */
5581 ret = ice_remove_mac(hw, &l_head);
5586 /* This is mapping table entry that maps every word within a given protocol
5587 * structure to the real byte offset as per the specification of that
5589 * for example dst address is 3 words in ethertype header and corresponding
5590 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5591 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5592 * matching entry describing its field. This needs to be updated if new
5593 * structure is added to that union.
5595 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5596 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5597 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5598 { ICE_ETYPE_OL, { 0 } },
5599 { ICE_VLAN_OFOS, { 0, 2 } },
5600 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5601 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5602 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5603 26, 28, 30, 32, 34, 36, 38 } },
5604 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5605 26, 28, 30, 32, 34, 36, 38 } },
5606 { ICE_TCP_IL, { 0, 2 } },
5607 { ICE_UDP_OF, { 0, 2 } },
5608 { ICE_UDP_ILOS, { 0, 2 } },
5609 { ICE_SCTP_IL, { 0, 2 } },
5610 { ICE_VXLAN, { 8, 10, 12, 14 } },
5611 { ICE_GENEVE, { 8, 10, 12, 14 } },
5612 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5613 { ICE_NVGRE, { 0, 2, 4, 6 } },
5614 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5615 { ICE_PPPOE, { 0, 2, 4, 6 } },
5616 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5617 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5618 { ICE_ESP, { 0, 2, 4, 6 } },
5619 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5620 { ICE_NAT_T, { 8, 10, 12, 14 } },
5623 /* The following table describes preferred grouping of recipes.
5624 * If a recipe that needs to be programmed is a superset or matches one of the
5625 * following combinations, then the recipe needs to be chained as per the
5629 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5630 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5631 { ICE_MAC_IL, ICE_MAC_IL_HW },
5632 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5633 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5634 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5635 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5636 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5637 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5638 { ICE_TCP_IL, ICE_TCP_IL_HW },
5639 { ICE_UDP_OF, ICE_UDP_OF_HW },
5640 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5641 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5642 { ICE_VXLAN, ICE_UDP_OF_HW },
5643 { ICE_GENEVE, ICE_UDP_OF_HW },
5644 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5645 { ICE_NVGRE, ICE_GRE_OF_HW },
5646 { ICE_GTP, ICE_UDP_OF_HW },
5647 { ICE_PPPOE, ICE_PPPOE_HW },
5648 { ICE_PFCP, ICE_UDP_ILOS_HW },
5649 { ICE_L2TPV3, ICE_L2TPV3_HW },
5650 { ICE_ESP, ICE_ESP_HW },
5651 { ICE_AH, ICE_AH_HW },
5652 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5656 * ice_find_recp - find a recipe
5657 * @hw: pointer to the hardware structure
5658 * @lkup_exts: extension sequence to match
5660 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5662 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5663 enum ice_sw_tunnel_type tun_type)
5665 bool refresh_required = true;
5666 struct ice_sw_recipe *recp;
5669 /* Walk through existing recipes to find a match */
5670 recp = hw->switch_info->recp_list;
5671 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5672 /* If recipe was not created for this ID, in SW bookkeeping,
5673 * check if FW has an entry for this recipe. If the FW has an
5674 * entry update it in our SW bookkeeping and continue with the
5677 if (!recp[i].recp_created)
5678 if (ice_get_recp_frm_fw(hw,
5679 hw->switch_info->recp_list, i,
5683 /* Skip inverse action recipes */
5684 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5685 ICE_AQ_RECIPE_ACT_INV_ACT)
5688 /* if number of words we are looking for match */
5689 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5690 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5691 struct ice_fv_word *be = lkup_exts->fv_words;
5692 u16 *cr = recp[i].lkup_exts.field_mask;
5693 u16 *de = lkup_exts->field_mask;
5697 /* ar, cr, and qr are related to the recipe words, while
5698 * be, de, and pe are related to the lookup words
5700 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5701 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5703 if (ar[qr].off == be[pe].off &&
5704 ar[qr].prot_id == be[pe].prot_id &&
5706 /* Found the "pe"th word in the
5711 /* After walking through all the words in the
5712 * "i"th recipe if "p"th word was not found then
5713 * this recipe is not what we are looking for.
5714 * So break out from this loop and try the next
5717 if (qr >= recp[i].lkup_exts.n_val_words) {
5722 /* If for "i"th recipe the found was never set to false
5723 * then it means we found our match
5725 if (tun_type == recp[i].tun_type && found)
5726 return i; /* Return the recipe ID */
5729 return ICE_MAX_NUM_RECIPES;
5733 * ice_prot_type_to_id - get protocol ID from protocol type
5734 * @type: protocol type
5735 * @id: pointer to variable that will receive the ID
5737 * Returns true if found, false otherwise
5739 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5743 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5744 if (ice_prot_id_tbl[i].type == type) {
5745 *id = ice_prot_id_tbl[i].protocol_id;
5752 * ice_find_valid_words - count valid words
5753 * @rule: advanced rule with lookup information
5754 * @lkup_exts: byte offset extractions of the words that are valid
5756 * calculate valid words in a lookup rule using mask value
5759 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5760 struct ice_prot_lkup_ext *lkup_exts)
5762 u8 j, word, prot_id, ret_val;
5764 if (!ice_prot_type_to_id(rule->type, &prot_id))
5767 word = lkup_exts->n_val_words;
5769 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5770 if (((u16 *)&rule->m_u)[j] &&
5771 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5772 /* No more space to accommodate */
5773 if (word >= ICE_MAX_CHAIN_WORDS)
5775 lkup_exts->fv_words[word].off =
5776 ice_prot_ext[rule->type].offs[j];
5777 lkup_exts->fv_words[word].prot_id =
5778 ice_prot_id_tbl[rule->type].protocol_id;
5779 lkup_exts->field_mask[word] =
5780 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5784 ret_val = word - lkup_exts->n_val_words;
5785 lkup_exts->n_val_words = word;
5791 * ice_create_first_fit_recp_def - Create a recipe grouping
5792 * @hw: pointer to the hardware structure
5793 * @lkup_exts: an array of protocol header extractions
5794 * @rg_list: pointer to a list that stores new recipe groups
5795 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5797 * Using first fit algorithm, take all the words that are still not done
5798 * and start grouping them in 4-word groups. Each group makes up one
5801 static enum ice_status
5802 ice_create_first_fit_recp_def(struct ice_hw *hw,
5803 struct ice_prot_lkup_ext *lkup_exts,
5804 struct LIST_HEAD_TYPE *rg_list,
5807 struct ice_pref_recipe_group *grp = NULL;
5812 if (!lkup_exts->n_val_words) {
5813 struct ice_recp_grp_entry *entry;
5815 entry = (struct ice_recp_grp_entry *)
5816 ice_malloc(hw, sizeof(*entry));
5818 return ICE_ERR_NO_MEMORY;
5819 LIST_ADD(&entry->l_entry, rg_list);
5820 grp = &entry->r_group;
5822 grp->n_val_pairs = 0;
5825 /* Walk through every word in the rule to check if it is not done. If so
5826 * then this word needs to be part of a new recipe.
5828 for (j = 0; j < lkup_exts->n_val_words; j++)
5829 if (!ice_is_bit_set(lkup_exts->done, j)) {
5831 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5832 struct ice_recp_grp_entry *entry;
5834 entry = (struct ice_recp_grp_entry *)
5835 ice_malloc(hw, sizeof(*entry));
5837 return ICE_ERR_NO_MEMORY;
5838 LIST_ADD(&entry->l_entry, rg_list);
5839 grp = &entry->r_group;
5843 grp->pairs[grp->n_val_pairs].prot_id =
5844 lkup_exts->fv_words[j].prot_id;
5845 grp->pairs[grp->n_val_pairs].off =
5846 lkup_exts->fv_words[j].off;
5847 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5855 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5856 * @hw: pointer to the hardware structure
5857 * @fv_list: field vector with the extraction sequence information
5858 * @rg_list: recipe groupings with protocol-offset pairs
5860 * Helper function to fill in the field vector indices for protocol-offset
5861 * pairs. These indexes are then ultimately programmed into a recipe.
5863 static enum ice_status
5864 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5865 struct LIST_HEAD_TYPE *rg_list)
5867 struct ice_sw_fv_list_entry *fv;
5868 struct ice_recp_grp_entry *rg;
5869 struct ice_fv_word *fv_ext;
5871 if (LIST_EMPTY(fv_list))
5874 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5875 fv_ext = fv->fv_ptr->ew;
5877 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5880 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5881 struct ice_fv_word *pr;
5886 pr = &rg->r_group.pairs[i];
5887 mask = rg->r_group.mask[i];
5889 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5890 if (fv_ext[j].prot_id == pr->prot_id &&
5891 fv_ext[j].off == pr->off) {
5894 /* Store index of field vector */
5896 rg->fv_mask[i] = mask;
5900 /* Protocol/offset could not be found, caller gave an
5904 return ICE_ERR_PARAM;
5912 * ice_find_free_recp_res_idx - find free result indexes for recipe
5913 * @hw: pointer to hardware structure
5914 * @profiles: bitmap of profiles that will be associated with the new recipe
5915 * @free_idx: pointer to variable to receive the free index bitmap
5917 * The algorithm used here is:
5918 * 1. When creating a new recipe, create a set P which contains all
5919 * Profiles that will be associated with our new recipe
5921 * 2. For each Profile p in set P:
5922 * a. Add all recipes associated with Profile p into set R
5923 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5924 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5925 * i. Or just assume they all have the same possible indexes:
5927 * i.e., PossibleIndexes = 0x0000F00000000000
5929 * 3. For each Recipe r in set R:
5930 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5931 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5933 * FreeIndexes will contain the bits indicating the indexes free for use,
5934 * then the code needs to update the recipe[r].used_result_idx_bits to
5935 * indicate which indexes were selected for use by this recipe.
5938 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5939 ice_bitmap_t *free_idx)
5941 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5942 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5943 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5947 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5948 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5949 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5950 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5952 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5953 ice_set_bit(count, possible_idx);
5955 /* For each profile we are going to associate the recipe with, add the
5956 * recipes that are associated with that profile. This will give us
5957 * the set of recipes that our recipe may collide with. Also, determine
5958 * what possible result indexes are usable given this set of profiles.
5961 while (ICE_MAX_NUM_PROFILES >
5962 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5963 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5964 ICE_MAX_NUM_RECIPES);
5965 ice_and_bitmap(possible_idx, possible_idx,
5966 hw->switch_info->prof_res_bm[bit],
5971 /* For each recipe that our new recipe may collide with, determine
5972 * which indexes have been used.
5974 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5975 if (ice_is_bit_set(recipes, bit)) {
5976 ice_or_bitmap(used_idx, used_idx,
5977 hw->switch_info->recp_list[bit].res_idxs,
5981 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5983 /* return number of free indexes */
5986 while (ICE_MAX_FV_WORDS >
5987 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5996 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5997 * @hw: pointer to hardware structure
5998 * @rm: recipe management list entry
5999 * @match_tun_mask: tunnel mask that needs to be programmed
6000 * @profiles: bitmap of profiles that will be associated.
6002 static enum ice_status
6003 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6004 u16 match_tun_mask, ice_bitmap_t *profiles)
6006 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6007 struct ice_aqc_recipe_data_elem *tmp;
6008 struct ice_aqc_recipe_data_elem *buf;
6009 struct ice_recp_grp_entry *entry;
6010 enum ice_status status;
6016 /* When more than one recipe are required, another recipe is needed to
6017 * chain them together. Matching a tunnel metadata ID takes up one of
6018 * the match fields in the chaining recipe reducing the number of
6019 * chained recipes by one.
6021 /* check number of free result indices */
6022 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6023 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6025 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6026 free_res_idx, rm->n_grp_count);
6028 if (rm->n_grp_count > 1) {
6029 if (rm->n_grp_count > free_res_idx)
6030 return ICE_ERR_MAX_LIMIT;
6035 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6036 return ICE_ERR_MAX_LIMIT;
6038 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6039 ICE_MAX_NUM_RECIPES,
6042 return ICE_ERR_NO_MEMORY;
6044 buf = (struct ice_aqc_recipe_data_elem *)
6045 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6047 status = ICE_ERR_NO_MEMORY;
6051 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6052 recipe_count = ICE_MAX_NUM_RECIPES;
6053 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6055 if (status || recipe_count == 0)
6058 /* Allocate the recipe resources, and configure them according to the
6059 * match fields from protocol headers and extracted field vectors.
6061 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6062 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6065 status = ice_alloc_recipe(hw, &entry->rid);
6069 /* Clear the result index of the located recipe, as this will be
6070 * updated, if needed, later in the recipe creation process.
6072 tmp[0].content.result_indx = 0;
6074 buf[recps] = tmp[0];
6075 buf[recps].recipe_indx = (u8)entry->rid;
6076 /* if the recipe is a non-root recipe RID should be programmed
6077 * as 0 for the rules to be applied correctly.
6079 buf[recps].content.rid = 0;
6080 ice_memset(&buf[recps].content.lkup_indx, 0,
6081 sizeof(buf[recps].content.lkup_indx),
6084 /* All recipes use look-up index 0 to match switch ID. */
6085 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6086 buf[recps].content.mask[0] =
6087 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6088 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6091 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6092 buf[recps].content.lkup_indx[i] = 0x80;
6093 buf[recps].content.mask[i] = 0;
6096 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6097 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6098 buf[recps].content.mask[i + 1] =
6099 CPU_TO_LE16(entry->fv_mask[i]);
6102 if (rm->n_grp_count > 1) {
6103 /* Checks to see if there really is a valid result index
6106 if (chain_idx >= ICE_MAX_FV_WORDS) {
6107 ice_debug(hw, ICE_DBG_SW,
6108 "No chain index available\n");
6109 status = ICE_ERR_MAX_LIMIT;
6113 entry->chain_idx = chain_idx;
6114 buf[recps].content.result_indx =
6115 ICE_AQ_RECIPE_RESULT_EN |
6116 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6117 ICE_AQ_RECIPE_RESULT_DATA_M);
6118 ice_clear_bit(chain_idx, result_idx_bm);
6119 chain_idx = ice_find_first_bit(result_idx_bm,
6123 /* fill recipe dependencies */
6124 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6125 ICE_MAX_NUM_RECIPES);
6126 ice_set_bit(buf[recps].recipe_indx,
6127 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6128 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6132 if (rm->n_grp_count == 1) {
6133 rm->root_rid = buf[0].recipe_indx;
6134 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6135 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6136 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6137 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6138 sizeof(buf[0].recipe_bitmap),
6139 ICE_NONDMA_TO_NONDMA);
6141 status = ICE_ERR_BAD_PTR;
6144 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6145 * the recipe which is getting created if specified
6146 * by user. Usually any advanced switch filter, which results
6147 * into new extraction sequence, ended up creating a new recipe
6148 * of type ROOT and usually recipes are associated with profiles
6149 * Switch rule referreing newly created recipe, needs to have
6150 * either/or 'fwd' or 'join' priority, otherwise switch rule
6151 * evaluation will not happen correctly. In other words, if
6152 * switch rule to be evaluated on priority basis, then recipe
6153 * needs to have priority, otherwise it will be evaluated last.
6155 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6157 struct ice_recp_grp_entry *last_chain_entry;
6160 /* Allocate the last recipe that will chain the outcomes of the
6161 * other recipes together
6163 status = ice_alloc_recipe(hw, &rid);
6167 buf[recps].recipe_indx = (u8)rid;
6168 buf[recps].content.rid = (u8)rid;
6169 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6170 /* the new entry created should also be part of rg_list to
6171 * make sure we have complete recipe
6173 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6174 sizeof(*last_chain_entry));
6175 if (!last_chain_entry) {
6176 status = ICE_ERR_NO_MEMORY;
6179 last_chain_entry->rid = rid;
6180 ice_memset(&buf[recps].content.lkup_indx, 0,
6181 sizeof(buf[recps].content.lkup_indx),
6183 /* All recipes use look-up index 0 to match switch ID. */
6184 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6185 buf[recps].content.mask[0] =
6186 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6187 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6188 buf[recps].content.lkup_indx[i] =
6189 ICE_AQ_RECIPE_LKUP_IGNORE;
6190 buf[recps].content.mask[i] = 0;
6194 /* update r_bitmap with the recp that is used for chaining */
6195 ice_set_bit(rid, rm->r_bitmap);
6196 /* this is the recipe that chains all the other recipes so it
6197 * should not have a chaining ID to indicate the same
6199 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6200 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6202 last_chain_entry->fv_idx[i] = entry->chain_idx;
6203 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6204 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6205 ice_set_bit(entry->rid, rm->r_bitmap);
6207 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6208 if (sizeof(buf[recps].recipe_bitmap) >=
6209 sizeof(rm->r_bitmap)) {
6210 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6211 sizeof(buf[recps].recipe_bitmap),
6212 ICE_NONDMA_TO_NONDMA);
6214 status = ICE_ERR_BAD_PTR;
6217 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6219 /* To differentiate among different UDP tunnels, a meta data ID
6222 if (match_tun_mask) {
6223 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
6224 buf[recps].content.mask[i] =
6225 CPU_TO_LE16(match_tun_mask);
6229 rm->root_rid = (u8)rid;
6231 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6235 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6236 ice_release_change_lock(hw);
6240 /* Every recipe that just got created add it to the recipe
6243 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6244 struct ice_switch_info *sw = hw->switch_info;
6245 bool is_root, idx_found = false;
6246 struct ice_sw_recipe *recp;
6247 u16 idx, buf_idx = 0;
6249 /* find buffer index for copying some data */
6250 for (idx = 0; idx < rm->n_grp_count; idx++)
6251 if (buf[idx].recipe_indx == entry->rid) {
6257 status = ICE_ERR_OUT_OF_RANGE;
6261 recp = &sw->recp_list[entry->rid];
6262 is_root = (rm->root_rid == entry->rid);
6263 recp->is_root = is_root;
6265 recp->root_rid = entry->rid;
6266 recp->big_recp = (is_root && rm->n_grp_count > 1);
6268 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6269 entry->r_group.n_val_pairs *
6270 sizeof(struct ice_fv_word),
6271 ICE_NONDMA_TO_NONDMA);
6273 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6274 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6276 /* Copy non-result fv index values and masks to recipe. This
6277 * call will also update the result recipe bitmask.
6279 ice_collect_result_idx(&buf[buf_idx], recp);
6281 /* for non-root recipes, also copy to the root, this allows
6282 * easier matching of a complete chained recipe
6285 ice_collect_result_idx(&buf[buf_idx],
6286 &sw->recp_list[rm->root_rid]);
6288 recp->n_ext_words = entry->r_group.n_val_pairs;
6289 recp->chain_idx = entry->chain_idx;
6290 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6291 recp->n_grp_count = rm->n_grp_count;
6292 recp->tun_type = rm->tun_type;
6293 recp->recp_created = true;
6307 * ice_create_recipe_group - creates recipe group
6308 * @hw: pointer to hardware structure
6309 * @rm: recipe management list entry
6310 * @lkup_exts: lookup elements
6312 static enum ice_status
6313 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6314 struct ice_prot_lkup_ext *lkup_exts)
6316 enum ice_status status;
6319 rm->n_grp_count = 0;
6321 /* Create recipes for words that are marked not done by packing them
6324 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6325 &rm->rg_list, &recp_count);
6327 rm->n_grp_count += recp_count;
6328 rm->n_ext_words = lkup_exts->n_val_words;
6329 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6330 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6331 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6332 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6339 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6340 * @hw: pointer to hardware structure
6341 * @lkups: lookup elements or match criteria for the advanced recipe, one
6342 * structure per protocol header
6343 * @lkups_cnt: number of protocols
6344 * @bm: bitmap of field vectors to consider
6345 * @fv_list: pointer to a list that holds the returned field vectors
6347 static enum ice_status
6348 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6349 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6351 enum ice_status status;
6358 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6360 return ICE_ERR_NO_MEMORY;
6362 for (i = 0; i < lkups_cnt; i++)
6363 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6364 status = ICE_ERR_CFG;
6368 /* Find field vectors that include all specified protocol types */
6369 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6372 ice_free(hw, prot_ids);
6377 * ice_tun_type_match_mask - determine if tun type needs a match mask
6378 * @tun_type: tunnel type
6379 * @mask: mask to be used for the tunnel
6381 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6384 case ICE_SW_TUN_VXLAN_GPE:
6385 case ICE_SW_TUN_GENEVE:
6386 case ICE_SW_TUN_VXLAN:
6387 case ICE_SW_TUN_NVGRE:
6388 case ICE_SW_TUN_UDP:
6389 case ICE_ALL_TUNNELS:
6390 *mask = ICE_TUN_FLAG_MASK;
6393 case ICE_SW_TUN_GENEVE_VLAN:
6394 case ICE_SW_TUN_VXLAN_VLAN:
6395 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6405 * ice_add_special_words - Add words that are not protocols, such as metadata
6406 * @rinfo: other information regarding the rule e.g. priority and action info
6407 * @lkup_exts: lookup word structure
6409 static enum ice_status
6410 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6411 struct ice_prot_lkup_ext *lkup_exts)
6415 /* If this is a tunneled packet, then add recipe index to match the
6416 * tunnel bit in the packet metadata flags.
6418 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6419 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6420 u8 word = lkup_exts->n_val_words++;
6422 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6423 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6424 lkup_exts->field_mask[word] = mask;
6426 return ICE_ERR_MAX_LIMIT;
6433 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6434 * @hw: pointer to hardware structure
6435 * @rinfo: other information regarding the rule e.g. priority and action info
6436 * @bm: pointer to memory for returning the bitmap of field vectors
6439 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6442 enum ice_prof_type prof_type;
6444 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6446 switch (rinfo->tun_type) {
6448 prof_type = ICE_PROF_NON_TUN;
6450 case ICE_ALL_TUNNELS:
6451 prof_type = ICE_PROF_TUN_ALL;
6453 case ICE_SW_TUN_VXLAN_GPE:
6454 case ICE_SW_TUN_GENEVE:
6455 case ICE_SW_TUN_GENEVE_VLAN:
6456 case ICE_SW_TUN_VXLAN:
6457 case ICE_SW_TUN_VXLAN_VLAN:
6458 case ICE_SW_TUN_UDP:
6459 case ICE_SW_TUN_GTP:
6460 prof_type = ICE_PROF_TUN_UDP;
6462 case ICE_SW_TUN_NVGRE:
6463 prof_type = ICE_PROF_TUN_GRE;
6465 case ICE_SW_TUN_PPPOE:
6466 prof_type = ICE_PROF_TUN_PPPOE;
6468 case ICE_SW_TUN_PPPOE_PAY:
6469 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6471 case ICE_SW_TUN_PPPOE_IPV4:
6472 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6473 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6474 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6476 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6477 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6479 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6480 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6482 case ICE_SW_TUN_PPPOE_IPV6:
6483 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6484 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6485 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6487 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6488 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6490 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6491 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6493 case ICE_SW_TUN_PROFID_IPV6_ESP:
6494 case ICE_SW_TUN_IPV6_ESP:
6495 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6497 case ICE_SW_TUN_PROFID_IPV6_AH:
6498 case ICE_SW_TUN_IPV6_AH:
6499 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6501 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6502 case ICE_SW_TUN_IPV6_L2TPV3:
6503 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6505 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6506 case ICE_SW_TUN_IPV6_NAT_T:
6507 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6509 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6510 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6512 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6513 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6515 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6516 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6518 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6519 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6521 case ICE_SW_TUN_IPV4_NAT_T:
6522 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6524 case ICE_SW_TUN_IPV4_L2TPV3:
6525 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6527 case ICE_SW_TUN_IPV4_ESP:
6528 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6530 case ICE_SW_TUN_IPV4_AH:
6531 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6533 case ICE_SW_IPV4_TCP:
6534 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6536 case ICE_SW_IPV4_UDP:
6537 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6539 case ICE_SW_IPV6_TCP:
6540 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6542 case ICE_SW_IPV6_UDP:
6543 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6545 case ICE_SW_TUN_AND_NON_TUN:
6547 prof_type = ICE_PROF_ALL;
6551 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6555 * ice_is_prof_rule - determine if rule type is a profile rule
6556 * @type: the rule type
6558 * if the rule type is a profile rule, that means that there no field value
6559 * match required, in this case just a profile hit is required.
6561 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6564 case ICE_SW_TUN_PROFID_IPV6_ESP:
6565 case ICE_SW_TUN_PROFID_IPV6_AH:
6566 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6567 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6568 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6569 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6570 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6571 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6581 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6582 * @hw: pointer to hardware structure
6583 * @lkups: lookup elements or match criteria for the advanced recipe, one
6584 * structure per protocol header
6585 * @lkups_cnt: number of protocols
6586 * @rinfo: other information regarding the rule e.g. priority and action info
6587 * @rid: return the recipe ID of the recipe created
6589 static enum ice_status
6590 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6591 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6593 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6594 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6595 struct ice_prot_lkup_ext *lkup_exts;
6596 struct ice_recp_grp_entry *r_entry;
6597 struct ice_sw_fv_list_entry *fvit;
6598 struct ice_recp_grp_entry *r_tmp;
6599 struct ice_sw_fv_list_entry *tmp;
6600 enum ice_status status = ICE_SUCCESS;
6601 struct ice_sw_recipe *rm;
6602 u16 match_tun_mask = 0;
6606 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6607 return ICE_ERR_PARAM;
6609 lkup_exts = (struct ice_prot_lkup_ext *)
6610 ice_malloc(hw, sizeof(*lkup_exts));
6612 return ICE_ERR_NO_MEMORY;
6614 /* Determine the number of words to be matched and if it exceeds a
6615 * recipe's restrictions
6617 for (i = 0; i < lkups_cnt; i++) {
6620 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6621 status = ICE_ERR_CFG;
6622 goto err_free_lkup_exts;
6625 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6627 status = ICE_ERR_CFG;
6628 goto err_free_lkup_exts;
6632 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6634 status = ICE_ERR_NO_MEMORY;
6635 goto err_free_lkup_exts;
6638 /* Get field vectors that contain fields extracted from all the protocol
6639 * headers being programmed.
6641 INIT_LIST_HEAD(&rm->fv_list);
6642 INIT_LIST_HEAD(&rm->rg_list);
6644 /* Get bitmap of field vectors (profiles) that are compatible with the
6645 * rule request; only these will be searched in the subsequent call to
6648 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6650 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6654 /* Group match words into recipes using preferred recipe grouping
6657 status = ice_create_recipe_group(hw, rm, lkup_exts);
6661 /* For certain tunnel types it is necessary to use a metadata ID flag to
6662 * differentiate different tunnel types. A separate recipe needs to be
6663 * used for the metadata.
6665 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6666 rm->n_grp_count > 1)
6667 match_tun_mask = mask;
6669 /* set the recipe priority if specified */
6670 rm->priority = (u8)rinfo->priority;
6672 /* Find offsets from the field vector. Pick the first one for all the
6675 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6679 /* An empty FV list means to use all the profiles returned in the
6682 if (LIST_EMPTY(&rm->fv_list)) {
6685 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6686 if (ice_is_bit_set(fv_bitmap, j)) {
6687 struct ice_sw_fv_list_entry *fvl;
6689 fvl = (struct ice_sw_fv_list_entry *)
6690 ice_malloc(hw, sizeof(*fvl));
6694 fvl->profile_id = j;
6695 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6699 /* get bitmap of all profiles the recipe will be associated with */
6700 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6701 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6703 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6704 ice_set_bit((u16)fvit->profile_id, profiles);
6707 /* Create any special protocol/offset pairs, such as looking at tunnel
6708 * bits by extracting metadata
6710 status = ice_add_special_words(rinfo, lkup_exts);
6712 goto err_free_lkup_exts;
6714 /* Look for a recipe which matches our requested fv / mask list */
6715 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6716 if (*rid < ICE_MAX_NUM_RECIPES)
6717 /* Success if found a recipe that match the existing criteria */
6720 rm->tun_type = rinfo->tun_type;
6721 /* Recipe we need does not exist, add a recipe */
6722 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
6726 /* Associate all the recipes created with all the profiles in the
6727 * common field vector.
6729 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6731 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6734 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6735 (u8 *)r_bitmap, NULL);
6739 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6740 ICE_MAX_NUM_RECIPES);
6741 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6745 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6748 ice_release_change_lock(hw);
6753 /* Update profile to recipe bitmap array */
6754 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6755 ICE_MAX_NUM_RECIPES);
6757 /* Update recipe to profile bitmap array */
6758 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6759 if (ice_is_bit_set(r_bitmap, j))
6760 ice_set_bit((u16)fvit->profile_id,
6761 recipe_to_profile[j]);
6764 *rid = rm->root_rid;
6765 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6766 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6768 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6769 ice_recp_grp_entry, l_entry) {
6770 LIST_DEL(&r_entry->l_entry);
6771 ice_free(hw, r_entry);
6774 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6776 LIST_DEL(&fvit->list_entry);
6781 ice_free(hw, rm->root_buf);
6786 ice_free(hw, lkup_exts);
6792 * ice_find_dummy_packet - find dummy packet by tunnel type
6794 * @lkups: lookup elements or match criteria for the advanced recipe, one
6795 * structure per protocol header
6796 * @lkups_cnt: number of protocols
6797 * @tun_type: tunnel type from the match criteria
6798 * @pkt: dummy packet to fill according to filter match criteria
6799 * @pkt_len: packet length of dummy packet
6800 * @offsets: pointer to receive the pointer to the offsets for the packet
6803 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6804 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6806 const struct ice_dummy_pkt_offsets **offsets)
6808 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6812 for (i = 0; i < lkups_cnt; i++) {
6813 if (lkups[i].type == ICE_UDP_ILOS)
6815 else if (lkups[i].type == ICE_TCP_IL)
6817 else if (lkups[i].type == ICE_IPV6_OFOS)
6819 else if (lkups[i].type == ICE_VLAN_OFOS)
6821 else if (lkups[i].type == ICE_IPV4_OFOS &&
6822 lkups[i].h_u.ipv4_hdr.protocol ==
6823 ICE_IPV4_NVGRE_PROTO_ID &&
6824 lkups[i].m_u.ipv4_hdr.protocol ==
6827 else if (lkups[i].type == ICE_PPPOE &&
6828 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6829 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6830 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6833 else if (lkups[i].type == ICE_ETYPE_OL &&
6834 lkups[i].h_u.ethertype.ethtype_id ==
6835 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6836 lkups[i].m_u.ethertype.ethtype_id ==
6841 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6842 *pkt = dummy_ipv4_esp_pkt;
6843 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6844 *offsets = dummy_ipv4_esp_packet_offsets;
6848 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6849 *pkt = dummy_ipv6_esp_pkt;
6850 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6851 *offsets = dummy_ipv6_esp_packet_offsets;
6855 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6856 *pkt = dummy_ipv4_ah_pkt;
6857 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6858 *offsets = dummy_ipv4_ah_packet_offsets;
6862 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6863 *pkt = dummy_ipv6_ah_pkt;
6864 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6865 *offsets = dummy_ipv6_ah_packet_offsets;
6869 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6870 *pkt = dummy_ipv4_nat_pkt;
6871 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6872 *offsets = dummy_ipv4_nat_packet_offsets;
6876 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6877 *pkt = dummy_ipv6_nat_pkt;
6878 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6879 *offsets = dummy_ipv6_nat_packet_offsets;
6883 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6884 *pkt = dummy_ipv4_l2tpv3_pkt;
6885 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6886 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6890 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6891 *pkt = dummy_ipv6_l2tpv3_pkt;
6892 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6893 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6897 if (tun_type == ICE_SW_TUN_GTP) {
6898 *pkt = dummy_udp_gtp_packet;
6899 *pkt_len = sizeof(dummy_udp_gtp_packet);
6900 *offsets = dummy_udp_gtp_packet_offsets;
6904 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6905 *pkt = dummy_pppoe_ipv6_packet;
6906 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6907 *offsets = dummy_pppoe_packet_offsets;
6909 } else if (tun_type == ICE_SW_TUN_PPPOE ||
6910 tun_type == ICE_SW_TUN_PPPOE_PAY) {
6911 *pkt = dummy_pppoe_ipv4_packet;
6912 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6913 *offsets = dummy_pppoe_packet_offsets;
6917 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
6918 *pkt = dummy_pppoe_ipv4_packet;
6919 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6920 *offsets = dummy_pppoe_packet_ipv4_offsets;
6924 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
6925 *pkt = dummy_pppoe_ipv4_tcp_packet;
6926 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
6927 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
6931 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
6932 *pkt = dummy_pppoe_ipv4_udp_packet;
6933 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
6934 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
6938 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
6939 *pkt = dummy_pppoe_ipv6_packet;
6940 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6941 *offsets = dummy_pppoe_packet_ipv6_offsets;
6945 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
6946 *pkt = dummy_pppoe_ipv6_tcp_packet;
6947 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
6948 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
6952 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
6953 *pkt = dummy_pppoe_ipv6_udp_packet;
6954 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
6955 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
6959 if (tun_type == ICE_SW_IPV4_TCP) {
6960 *pkt = dummy_tcp_packet;
6961 *pkt_len = sizeof(dummy_tcp_packet);
6962 *offsets = dummy_tcp_packet_offsets;
6966 if (tun_type == ICE_SW_IPV4_UDP) {
6967 *pkt = dummy_udp_packet;
6968 *pkt_len = sizeof(dummy_udp_packet);
6969 *offsets = dummy_udp_packet_offsets;
6973 if (tun_type == ICE_SW_IPV6_TCP) {
6974 *pkt = dummy_tcp_ipv6_packet;
6975 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6976 *offsets = dummy_tcp_ipv6_packet_offsets;
6980 if (tun_type == ICE_SW_IPV6_UDP) {
6981 *pkt = dummy_udp_ipv6_packet;
6982 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6983 *offsets = dummy_udp_ipv6_packet_offsets;
6987 if (tun_type == ICE_ALL_TUNNELS) {
6988 *pkt = dummy_gre_udp_packet;
6989 *pkt_len = sizeof(dummy_gre_udp_packet);
6990 *offsets = dummy_gre_udp_packet_offsets;
6994 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6996 *pkt = dummy_gre_tcp_packet;
6997 *pkt_len = sizeof(dummy_gre_tcp_packet);
6998 *offsets = dummy_gre_tcp_packet_offsets;
7002 *pkt = dummy_gre_udp_packet;
7003 *pkt_len = sizeof(dummy_gre_udp_packet);
7004 *offsets = dummy_gre_udp_packet_offsets;
7008 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7009 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7010 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7011 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7013 *pkt = dummy_udp_tun_tcp_packet;
7014 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7015 *offsets = dummy_udp_tun_tcp_packet_offsets;
7019 *pkt = dummy_udp_tun_udp_packet;
7020 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7021 *offsets = dummy_udp_tun_udp_packet_offsets;
7027 *pkt = dummy_vlan_udp_packet;
7028 *pkt_len = sizeof(dummy_vlan_udp_packet);
7029 *offsets = dummy_vlan_udp_packet_offsets;
7032 *pkt = dummy_udp_packet;
7033 *pkt_len = sizeof(dummy_udp_packet);
7034 *offsets = dummy_udp_packet_offsets;
7036 } else if (udp && ipv6) {
7038 *pkt = dummy_vlan_udp_ipv6_packet;
7039 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7040 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7043 *pkt = dummy_udp_ipv6_packet;
7044 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7045 *offsets = dummy_udp_ipv6_packet_offsets;
7047 } else if ((tcp && ipv6) || ipv6) {
7049 *pkt = dummy_vlan_tcp_ipv6_packet;
7050 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7051 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7054 *pkt = dummy_tcp_ipv6_packet;
7055 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7056 *offsets = dummy_tcp_ipv6_packet_offsets;
7061 *pkt = dummy_vlan_tcp_packet;
7062 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7063 *offsets = dummy_vlan_tcp_packet_offsets;
7065 *pkt = dummy_tcp_packet;
7066 *pkt_len = sizeof(dummy_tcp_packet);
7067 *offsets = dummy_tcp_packet_offsets;
7072 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7074 * @lkups: lookup elements or match criteria for the advanced recipe, one
7075 * structure per protocol header
7076 * @lkups_cnt: number of protocols
7077 * @s_rule: stores rule information from the match criteria
7078 * @dummy_pkt: dummy packet to fill according to filter match criteria
7079 * @pkt_len: packet length of dummy packet
7080 * @offsets: offset info for the dummy packet
7082 static enum ice_status
7083 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7084 struct ice_aqc_sw_rules_elem *s_rule,
7085 const u8 *dummy_pkt, u16 pkt_len,
7086 const struct ice_dummy_pkt_offsets *offsets)
7091 /* Start with a packet with a pre-defined/dummy content. Then, fill
7092 * in the header values to be looked up or matched.
7094 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7096 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7098 for (i = 0; i < lkups_cnt; i++) {
7099 enum ice_protocol_type type;
7100 u16 offset = 0, len = 0, j;
7103 /* find the start of this layer; it should be found since this
7104 * was already checked when search for the dummy packet
7106 type = lkups[i].type;
7107 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7108 if (type == offsets[j].type) {
7109 offset = offsets[j].offset;
7114 /* this should never happen in a correct calling sequence */
7116 return ICE_ERR_PARAM;
7118 switch (lkups[i].type) {
7121 len = sizeof(struct ice_ether_hdr);
7124 len = sizeof(struct ice_ethtype_hdr);
7127 len = sizeof(struct ice_vlan_hdr);
7131 len = sizeof(struct ice_ipv4_hdr);
7135 len = sizeof(struct ice_ipv6_hdr);
7140 len = sizeof(struct ice_l4_hdr);
7143 len = sizeof(struct ice_sctp_hdr);
7146 len = sizeof(struct ice_nvgre);
7151 len = sizeof(struct ice_udp_tnl_hdr);
7155 len = sizeof(struct ice_udp_gtp_hdr);
7158 len = sizeof(struct ice_pppoe_hdr);
7161 len = sizeof(struct ice_esp_hdr);
7164 len = sizeof(struct ice_nat_t_hdr);
7167 len = sizeof(struct ice_ah_hdr);
7170 len = sizeof(struct ice_l2tpv3_sess_hdr);
7173 return ICE_ERR_PARAM;
7176 /* the length should be a word multiple */
7177 if (len % ICE_BYTES_PER_WORD)
7180 /* We have the offset to the header start, the length, the
7181 * caller's header values and mask. Use this information to
7182 * copy the data into the dummy packet appropriately based on
7183 * the mask. Note that we need to only write the bits as
7184 * indicated by the mask to make sure we don't improperly write
7185 * over any significant packet data.
7187 for (j = 0; j < len / sizeof(u16); j++)
7188 if (((u16 *)&lkups[i].m_u)[j])
7189 ((u16 *)(pkt + offset))[j] =
7190 (((u16 *)(pkt + offset))[j] &
7191 ~((u16 *)&lkups[i].m_u)[j]) |
7192 (((u16 *)&lkups[i].h_u)[j] &
7193 ((u16 *)&lkups[i].m_u)[j]);
7196 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7202 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7203 * @hw: pointer to the hardware structure
7204 * @tun_type: tunnel type
7205 * @pkt: dummy packet to fill in
7206 * @offsets: offset info for the dummy packet
7208 static enum ice_status
7209 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7210 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7215 case ICE_SW_TUN_AND_NON_TUN:
7216 case ICE_SW_TUN_VXLAN_GPE:
7217 case ICE_SW_TUN_VXLAN:
7218 case ICE_SW_TUN_VXLAN_VLAN:
7219 case ICE_SW_TUN_UDP:
7220 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7224 case ICE_SW_TUN_GENEVE:
7225 case ICE_SW_TUN_GENEVE_VLAN:
7226 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7231 /* Nothing needs to be done for this tunnel type */
7235 /* Find the outer UDP protocol header and insert the port number */
7236 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7237 if (offsets[i].type == ICE_UDP_OF) {
7238 struct ice_l4_hdr *hdr;
7241 offset = offsets[i].offset;
7242 hdr = (struct ice_l4_hdr *)&pkt[offset];
7243 hdr->dst_port = CPU_TO_BE16(open_port);
7253 * ice_find_adv_rule_entry - Search a rule entry
7254 * @hw: pointer to the hardware structure
7255 * @lkups: lookup elements or match criteria for the advanced recipe, one
7256 * structure per protocol header
7257 * @lkups_cnt: number of protocols
7258 * @recp_id: recipe ID for which we are finding the rule
7259 * @rinfo: other information regarding the rule e.g. priority and action info
7261 * Helper function to search for a given advance rule entry
7262 * Returns pointer to entry storing the rule if found
7264 static struct ice_adv_fltr_mgmt_list_entry *
7265 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7266 u16 lkups_cnt, u16 recp_id,
7267 struct ice_adv_rule_info *rinfo)
7269 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7270 struct ice_switch_info *sw = hw->switch_info;
7273 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7274 ice_adv_fltr_mgmt_list_entry, list_entry) {
7275 bool lkups_matched = true;
7277 if (lkups_cnt != list_itr->lkups_cnt)
7279 for (i = 0; i < list_itr->lkups_cnt; i++)
7280 if (memcmp(&list_itr->lkups[i], &lkups[i],
7282 lkups_matched = false;
7285 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7286 rinfo->tun_type == list_itr->rule_info.tun_type &&
7294 * ice_adv_add_update_vsi_list
7295 * @hw: pointer to the hardware structure
7296 * @m_entry: pointer to current adv filter management list entry
7297 * @cur_fltr: filter information from the book keeping entry
7298 * @new_fltr: filter information with the new VSI to be added
7300 * Call AQ command to add or update previously created VSI list with new VSI.
7302 * Helper function to do book keeping associated with adding filter information
7303 * The algorithm to do the booking keeping is described below :
7304 * When a VSI needs to subscribe to a given advanced filter
7305 * if only one VSI has been added till now
7306 * Allocate a new VSI list and add two VSIs
7307 * to this list using switch rule command
7308 * Update the previously created switch rule with the
7309 * newly created VSI list ID
7310 * if a VSI list was previously created
7311 * Add the new VSI to the previously created VSI list set
7312 * using the update switch rule command
7314 static enum ice_status
7315 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7316 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7317 struct ice_adv_rule_info *cur_fltr,
7318 struct ice_adv_rule_info *new_fltr)
7320 enum ice_status status;
7321 u16 vsi_list_id = 0;
7323 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7324 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7325 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7326 return ICE_ERR_NOT_IMPL;
7328 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7329 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7330 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7331 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7332 return ICE_ERR_NOT_IMPL;
7334 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7335 /* Only one entry existed in the mapping and it was not already
7336 * a part of a VSI list. So, create a VSI list with the old and
7339 struct ice_fltr_info tmp_fltr;
7340 u16 vsi_handle_arr[2];
7342 /* A rule already exists with the new VSI being added */
7343 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7344 new_fltr->sw_act.fwd_id.hw_vsi_id)
7345 return ICE_ERR_ALREADY_EXISTS;
7347 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7348 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7349 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7355 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7356 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7357 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7358 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7359 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7360 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7362 /* Update the previous switch rule of "forward to VSI" to
7365 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7369 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7370 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7371 m_entry->vsi_list_info =
7372 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7375 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7377 if (!m_entry->vsi_list_info)
7380 /* A rule already exists with the new VSI being added */
7381 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7384 /* Update the previously created VSI list set with
7385 * the new VSI ID passed in
7387 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7389 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7391 ice_aqc_opc_update_sw_rules,
7393 /* update VSI list mapping info with new VSI ID */
7395 ice_set_bit(vsi_handle,
7396 m_entry->vsi_list_info->vsi_map);
7399 m_entry->vsi_count++;
7404 * ice_add_adv_rule - helper function to create an advanced switch rule
7405 * @hw: pointer to the hardware structure
7406 * @lkups: information on the words that needs to be looked up. All words
7407 * together makes one recipe
7408 * @lkups_cnt: num of entries in the lkups array
7409 * @rinfo: other information related to the rule that needs to be programmed
7410 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7411 * ignored is case of error.
7413 * This function can program only 1 rule at a time. The lkups is used to
7414 * describe the all the words that forms the "lookup" portion of the recipe.
7415 * These words can span multiple protocols. Callers to this function need to
7416 * pass in a list of protocol headers with lookup information along and mask
7417 * that determines which words are valid from the given protocol header.
7418 * rinfo describes other information related to this rule such as forwarding
7419 * IDs, priority of this rule, etc.
7422 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7423 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7424 struct ice_rule_query_data *added_entry)
7426 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7427 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7428 const struct ice_dummy_pkt_offsets *pkt_offsets;
7429 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7430 struct LIST_HEAD_TYPE *rule_head;
7431 struct ice_switch_info *sw;
7432 enum ice_status status;
7433 const u8 *pkt = NULL;
7439 /* Initialize profile to result index bitmap */
7440 if (!hw->switch_info->prof_res_bm_init) {
7441 hw->switch_info->prof_res_bm_init = 1;
7442 ice_init_prof_result_bm(hw);
7445 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7446 if (!prof_rule && !lkups_cnt)
7447 return ICE_ERR_PARAM;
7449 /* get # of words we need to match */
7451 for (i = 0; i < lkups_cnt; i++) {
7454 ptr = (u16 *)&lkups[i].m_u;
7455 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7461 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7462 return ICE_ERR_PARAM;
7464 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7465 return ICE_ERR_PARAM;
7468 /* make sure that we can locate a dummy packet */
7469 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7472 status = ICE_ERR_PARAM;
7473 goto err_ice_add_adv_rule;
7476 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7477 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7478 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7479 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7482 vsi_handle = rinfo->sw_act.vsi_handle;
7483 if (!ice_is_vsi_valid(hw, vsi_handle))
7484 return ICE_ERR_PARAM;
7486 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7487 rinfo->sw_act.fwd_id.hw_vsi_id =
7488 ice_get_hw_vsi_num(hw, vsi_handle);
7489 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7490 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7492 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7495 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7497 /* we have to add VSI to VSI_LIST and increment vsi_count.
7498 * Also Update VSI list so that we can change forwarding rule
7499 * if the rule already exists, we will check if it exists with
7500 * same vsi_id, if not then add it to the VSI list if it already
7501 * exists if not then create a VSI list and add the existing VSI
7502 * ID and the new VSI ID to the list
7503 * We will add that VSI to the list
7505 status = ice_adv_add_update_vsi_list(hw, m_entry,
7506 &m_entry->rule_info,
7509 added_entry->rid = rid;
7510 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7511 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7515 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7516 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7518 return ICE_ERR_NO_MEMORY;
7519 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7520 switch (rinfo->sw_act.fltr_act) {
7521 case ICE_FWD_TO_VSI:
7522 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7523 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7524 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7527 act |= ICE_SINGLE_ACT_TO_Q;
7528 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7529 ICE_SINGLE_ACT_Q_INDEX_M;
7531 case ICE_FWD_TO_QGRP:
7532 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7533 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7534 act |= ICE_SINGLE_ACT_TO_Q;
7535 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7536 ICE_SINGLE_ACT_Q_INDEX_M;
7537 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7538 ICE_SINGLE_ACT_Q_REGION_M;
7540 case ICE_DROP_PACKET:
7541 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7542 ICE_SINGLE_ACT_VALID_BIT;
7545 status = ICE_ERR_CFG;
7546 goto err_ice_add_adv_rule;
7549 /* set the rule LOOKUP type based on caller specified 'RX'
7550 * instead of hardcoding it to be either LOOKUP_TX/RX
7552 * for 'RX' set the source to be the port number
7553 * for 'TX' set the source to be the source HW VSI number (determined
7557 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7558 s_rule->pdata.lkup_tx_rx.src =
7559 CPU_TO_LE16(hw->port_info->lport);
7561 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7562 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7565 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7566 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7568 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7569 pkt_len, pkt_offsets);
7571 goto err_ice_add_adv_rule;
7573 if (rinfo->tun_type != ICE_NON_TUN &&
7574 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7575 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7576 s_rule->pdata.lkup_tx_rx.hdr,
7579 goto err_ice_add_adv_rule;
7582 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7583 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7586 goto err_ice_add_adv_rule;
7587 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7588 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7590 status = ICE_ERR_NO_MEMORY;
7591 goto err_ice_add_adv_rule;
7594 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7595 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7596 ICE_NONDMA_TO_NONDMA);
7597 if (!adv_fltr->lkups && !prof_rule) {
7598 status = ICE_ERR_NO_MEMORY;
7599 goto err_ice_add_adv_rule;
7602 adv_fltr->lkups_cnt = lkups_cnt;
7603 adv_fltr->rule_info = *rinfo;
7604 adv_fltr->rule_info.fltr_rule_id =
7605 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7606 sw = hw->switch_info;
7607 sw->recp_list[rid].adv_rule = true;
7608 rule_head = &sw->recp_list[rid].filt_rules;
7610 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7611 adv_fltr->vsi_count = 1;
7613 /* Add rule entry to book keeping list */
7614 LIST_ADD(&adv_fltr->list_entry, rule_head);
7616 added_entry->rid = rid;
7617 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7618 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7620 err_ice_add_adv_rule:
7621 if (status && adv_fltr) {
7622 ice_free(hw, adv_fltr->lkups);
7623 ice_free(hw, adv_fltr);
7626 ice_free(hw, s_rule);
7632 * ice_adv_rem_update_vsi_list
7633 * @hw: pointer to the hardware structure
7634 * @vsi_handle: VSI handle of the VSI to remove
7635 * @fm_list: filter management entry for which the VSI list management needs to
7638 static enum ice_status
7639 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7640 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7642 struct ice_vsi_list_map_info *vsi_list_info;
7643 enum ice_sw_lkup_type lkup_type;
7644 enum ice_status status;
7647 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7648 fm_list->vsi_count == 0)
7649 return ICE_ERR_PARAM;
7651 /* A rule with the VSI being removed does not exist */
7652 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7653 return ICE_ERR_DOES_NOT_EXIST;
7655 lkup_type = ICE_SW_LKUP_LAST;
7656 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7657 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7658 ice_aqc_opc_update_sw_rules,
7663 fm_list->vsi_count--;
7664 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7665 vsi_list_info = fm_list->vsi_list_info;
7666 if (fm_list->vsi_count == 1) {
7667 struct ice_fltr_info tmp_fltr;
7670 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7672 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7673 return ICE_ERR_OUT_OF_RANGE;
7675 /* Make sure VSI list is empty before removing it below */
7676 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7678 ice_aqc_opc_update_sw_rules,
7683 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7684 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7685 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7686 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7687 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7688 tmp_fltr.fwd_id.hw_vsi_id =
7689 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7690 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7691 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7692 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7694 /* Update the previous switch rule of "MAC forward to VSI" to
7695 * "MAC fwd to VSI list"
7697 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7699 ice_debug(hw, ICE_DBG_SW,
7700 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7701 tmp_fltr.fwd_id.hw_vsi_id, status);
7704 fm_list->vsi_list_info->ref_cnt--;
7706 /* Remove the VSI list since it is no longer used */
7707 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7709 ice_debug(hw, ICE_DBG_SW,
7710 "Failed to remove VSI list %d, error %d\n",
7711 vsi_list_id, status);
7715 LIST_DEL(&vsi_list_info->list_entry);
7716 ice_free(hw, vsi_list_info);
7717 fm_list->vsi_list_info = NULL;
7724 * ice_rem_adv_rule - removes existing advanced switch rule
7725 * @hw: pointer to the hardware structure
7726 * @lkups: information on the words that needs to be looked up. All words
7727 * together makes one recipe
7728 * @lkups_cnt: num of entries in the lkups array
7729 * @rinfo: Its the pointer to the rule information for the rule
7731 * This function can be used to remove 1 rule at a time. The lkups is
7732 * used to describe all the words that forms the "lookup" portion of the
7733 * rule. These words can span multiple protocols. Callers to this function
7734 * need to pass in a list of protocol headers with lookup information along
7735 * and mask that determines which words are valid from the given protocol
7736 * header. rinfo describes other information related to this rule such as
7737 * forwarding IDs, priority of this rule, etc.
7740 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7741 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7743 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7744 struct ice_prot_lkup_ext lkup_exts;
7745 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7746 enum ice_status status = ICE_SUCCESS;
7747 bool remove_rule = false;
7748 u16 i, rid, vsi_handle;
7750 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7751 for (i = 0; i < lkups_cnt; i++) {
7754 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7757 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7762 /* Create any special protocol/offset pairs, such as looking at tunnel
7763 * bits by extracting metadata
7765 status = ice_add_special_words(rinfo, &lkup_exts);
7769 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7770 /* If did not find a recipe that match the existing criteria */
7771 if (rid == ICE_MAX_NUM_RECIPES)
7772 return ICE_ERR_PARAM;
7774 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7775 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7776 /* the rule is already removed */
7779 ice_acquire_lock(rule_lock);
7780 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7782 } else if (list_elem->vsi_count > 1) {
7783 remove_rule = false;
7784 vsi_handle = rinfo->sw_act.vsi_handle;
7785 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7787 vsi_handle = rinfo->sw_act.vsi_handle;
7788 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7790 ice_release_lock(rule_lock);
7793 if (list_elem->vsi_count == 0)
7796 ice_release_lock(rule_lock);
7798 struct ice_aqc_sw_rules_elem *s_rule;
7801 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7803 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7806 return ICE_ERR_NO_MEMORY;
7807 s_rule->pdata.lkup_tx_rx.act = 0;
7808 s_rule->pdata.lkup_tx_rx.index =
7809 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7810 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7811 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7813 ice_aqc_opc_remove_sw_rules, NULL);
7814 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
7815 struct ice_switch_info *sw = hw->switch_info;
7817 ice_acquire_lock(rule_lock);
7818 LIST_DEL(&list_elem->list_entry);
7819 ice_free(hw, list_elem->lkups);
7820 ice_free(hw, list_elem);
7821 ice_release_lock(rule_lock);
7822 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
7823 sw->recp_list[rid].adv_rule = false;
7825 ice_free(hw, s_rule);
7831 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7832 * @hw: pointer to the hardware structure
7833 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7835 * This function is used to remove 1 rule at a time. The removal is based on
7836 * the remove_entry parameter. This function will remove rule for a given
7837 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7840 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7841 struct ice_rule_query_data *remove_entry)
7843 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7844 struct LIST_HEAD_TYPE *list_head;
7845 struct ice_adv_rule_info rinfo;
7846 struct ice_switch_info *sw;
7848 sw = hw->switch_info;
7849 if (!sw->recp_list[remove_entry->rid].recp_created)
7850 return ICE_ERR_PARAM;
7851 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7852 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7854 if (list_itr->rule_info.fltr_rule_id ==
7855 remove_entry->rule_id) {
7856 rinfo = list_itr->rule_info;
7857 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7858 return ice_rem_adv_rule(hw, list_itr->lkups,
7859 list_itr->lkups_cnt, &rinfo);
7862 /* either list is empty or unable to find rule */
7863 return ICE_ERR_DOES_NOT_EXIST;
7867 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7869 * @hw: pointer to the hardware structure
7870 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7872 * This function is used to remove all the rules for a given VSI and as soon
7873 * as removing a rule fails, it will return immediately with the error code,
7874 * else it will return ICE_SUCCESS
7876 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7878 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7879 struct ice_vsi_list_map_info *map_info;
7880 struct LIST_HEAD_TYPE *list_head;
7881 struct ice_adv_rule_info rinfo;
7882 struct ice_switch_info *sw;
7883 enum ice_status status;
7884 u16 vsi_list_id = 0;
7887 sw = hw->switch_info;
7888 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7889 if (!sw->recp_list[rid].recp_created)
7891 if (!sw->recp_list[rid].adv_rule)
7893 list_head = &sw->recp_list[rid].filt_rules;
7895 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7896 ice_adv_fltr_mgmt_list_entry, list_entry) {
7897 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7902 rinfo = list_itr->rule_info;
7903 rinfo.sw_act.vsi_handle = vsi_handle;
7904 status = ice_rem_adv_rule(hw, list_itr->lkups,
7905 list_itr->lkups_cnt, &rinfo);
7915 * ice_replay_fltr - Replay all the filters stored by a specific list head
7916 * @hw: pointer to the hardware structure
7917 * @list_head: list for which filters needs to be replayed
7918 * @recp_id: Recipe ID for which rules need to be replayed
7920 static enum ice_status
7921 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7923 struct ice_fltr_mgmt_list_entry *itr;
7924 enum ice_status status = ICE_SUCCESS;
7925 struct ice_sw_recipe *recp_list;
7926 u8 lport = hw->port_info->lport;
7927 struct LIST_HEAD_TYPE l_head;
7929 if (LIST_EMPTY(list_head))
7932 recp_list = &hw->switch_info->recp_list[recp_id];
7933 /* Move entries from the given list_head to a temporary l_head so that
7934 * they can be replayed. Otherwise when trying to re-add the same
7935 * filter, the function will return already exists
7937 LIST_REPLACE_INIT(list_head, &l_head);
7939 /* Mark the given list_head empty by reinitializing it so filters
7940 * could be added again by *handler
7942 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7944 struct ice_fltr_list_entry f_entry;
7946 f_entry.fltr_info = itr->fltr_info;
7947 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7948 status = ice_add_rule_internal(hw, recp_list, lport,
7950 if (status != ICE_SUCCESS)
7955 /* Add a filter per VSI separately */
7960 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7962 if (!ice_is_vsi_valid(hw, vsi_handle))
7965 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7966 f_entry.fltr_info.vsi_handle = vsi_handle;
7967 f_entry.fltr_info.fwd_id.hw_vsi_id =
7968 ice_get_hw_vsi_num(hw, vsi_handle);
7969 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7970 if (recp_id == ICE_SW_LKUP_VLAN)
7971 status = ice_add_vlan_internal(hw, recp_list,
7974 status = ice_add_rule_internal(hw, recp_list,
7977 if (status != ICE_SUCCESS)
7982 /* Clear the filter management list */
7983 ice_rem_sw_rule_info(hw, &l_head);
7988 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7989 * @hw: pointer to the hardware structure
7991 * NOTE: This function does not clean up partially added filters on error.
7992 * It is up to caller of the function to issue a reset or fail early.
7994 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7996 struct ice_switch_info *sw = hw->switch_info;
7997 enum ice_status status = ICE_SUCCESS;
8000 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8001 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8003 status = ice_replay_fltr(hw, i, head);
8004 if (status != ICE_SUCCESS)
8011 * ice_replay_vsi_fltr - Replay filters for requested VSI
8012 * @hw: pointer to the hardware structure
8013 * @pi: pointer to port information structure
8014 * @sw: pointer to switch info struct for which function replays filters
8015 * @vsi_handle: driver VSI handle
8016 * @recp_id: Recipe ID for which rules need to be replayed
8017 * @list_head: list for which filters need to be replayed
8019 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8020 * It is required to pass valid VSI handle.
8022 static enum ice_status
8023 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8024 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8025 struct LIST_HEAD_TYPE *list_head)
8027 struct ice_fltr_mgmt_list_entry *itr;
8028 enum ice_status status = ICE_SUCCESS;
8029 struct ice_sw_recipe *recp_list;
8032 if (LIST_EMPTY(list_head))
8034 recp_list = &sw->recp_list[recp_id];
8035 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8037 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8039 struct ice_fltr_list_entry f_entry;
8041 f_entry.fltr_info = itr->fltr_info;
8042 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8043 itr->fltr_info.vsi_handle == vsi_handle) {
8044 /* update the src in case it is VSI num */
8045 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8046 f_entry.fltr_info.src = hw_vsi_id;
8047 status = ice_add_rule_internal(hw, recp_list,
8050 if (status != ICE_SUCCESS)
8054 if (!itr->vsi_list_info ||
8055 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8057 /* Clearing it so that the logic can add it back */
8058 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8059 f_entry.fltr_info.vsi_handle = vsi_handle;
8060 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8061 /* update the src in case it is VSI num */
8062 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8063 f_entry.fltr_info.src = hw_vsi_id;
8064 if (recp_id == ICE_SW_LKUP_VLAN)
8065 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8067 status = ice_add_rule_internal(hw, recp_list,
8070 if (status != ICE_SUCCESS)
8078 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8079 * @hw: pointer to the hardware structure
8080 * @vsi_handle: driver VSI handle
8081 * @list_head: list for which filters need to be replayed
8083 * Replay the advanced rule for the given VSI.
8085 static enum ice_status
8086 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8087 struct LIST_HEAD_TYPE *list_head)
8089 struct ice_rule_query_data added_entry = { 0 };
8090 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8091 enum ice_status status = ICE_SUCCESS;
8093 if (LIST_EMPTY(list_head))
8095 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8097 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8098 u16 lk_cnt = adv_fltr->lkups_cnt;
8100 if (vsi_handle != rinfo->sw_act.vsi_handle)
8102 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8111 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8112 * @hw: pointer to the hardware structure
8113 * @pi: pointer to port information structure
8114 * @vsi_handle: driver VSI handle
8116 * Replays filters for requested VSI via vsi_handle.
8119 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8122 struct ice_switch_info *sw = hw->switch_info;
8123 enum ice_status status;
8126 /* Update the recipes that were created */
8127 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8128 struct LIST_HEAD_TYPE *head;
8130 head = &sw->recp_list[i].filt_replay_rules;
8131 if (!sw->recp_list[i].adv_rule)
8132 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8135 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8136 if (status != ICE_SUCCESS)
8144 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8145 * @hw: pointer to the HW struct
8146 * @sw: pointer to switch info struct for which function removes filters
8148 * Deletes the filter replay rules for given switch
8150 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8157 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8158 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8159 struct LIST_HEAD_TYPE *l_head;
8161 l_head = &sw->recp_list[i].filt_replay_rules;
8162 if (!sw->recp_list[i].adv_rule)
8163 ice_rem_sw_rule_info(hw, l_head);
8165 ice_rem_adv_rule_info(hw, l_head);
8171 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8172 * @hw: pointer to the HW struct
8174 * Deletes the filter replay rules.
8176 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8178 ice_rm_sw_replay_rule_info(hw, hw->switch_info);