1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x08, 0x00, 0x00,
462 0x00, 0x00, /* 2 bytes for 4 byte alignment */
465 /* C-tag (802.1Q): IPv6 + UDP */
466 static const struct ice_dummy_pkt_offsets
467 dummy_vlan_udp_ipv6_packet_offsets[] = {
469 { ICE_ETYPE_OL, 12 },
470 { ICE_VLAN_OFOS, 14 },
471 { ICE_IPV6_OFOS, 18 },
472 { ICE_UDP_ILOS, 58 },
473 { ICE_PROTOCOL_LAST, 0 },
476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
477 static const u8 dummy_vlan_udp_ipv6_packet[] = {
478 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
482 0x81, 0x00, /* ICE_ETYPE_OL 12 */
484 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
487 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
498 0x00, 0x08, 0x00, 0x00,
500 0x00, 0x00, /* 2 bytes for 4 byte alignment */
503 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
505 { ICE_IPV4_OFOS, 14 },
508 { ICE_PROTOCOL_LAST, 0 },
511 static const u8 dummy_udp_gtp_packet[] = {
512 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
517 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
518 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x11, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
524 0x00, 0x1c, 0x00, 0x00,
526 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
527 0x00, 0x00, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x85,
530 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
531 0x00, 0x00, 0x00, 0x00,
534 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
536 { ICE_ETYPE_OL, 12 },
537 { ICE_VLAN_OFOS, 14},
539 { ICE_PROTOCOL_LAST, 0 },
542 static const u8 dummy_pppoe_ipv4_packet[] = {
543 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x81, 0x00, /* ICE_ETYPE_OL 12 */
549 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
551 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
554 0x00, 0x21, /* PPP Link Layer 24 */
556 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
559 0x00, 0x00, 0x00, 0x00,
560 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
565 static const u8 dummy_pppoe_ipv6_packet[] = {
566 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
567 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00,
570 0x81, 0x00, /* ICE_ETYPE_OL 12 */
572 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
574 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
577 0x00, 0x57, /* PPP Link Layer 24 */
579 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
580 0x00, 0x00, 0x3b, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
593 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
595 { ICE_IPV4_OFOS, 14 },
597 { ICE_PROTOCOL_LAST, 0 },
600 static const u8 dummy_ipv4_esp_pkt[] = {
601 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
602 0x00, 0x00, 0x00, 0x00,
603 0x00, 0x00, 0x00, 0x00,
606 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
607 0x00, 0x00, 0x40, 0x00,
608 0x40, 0x32, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
617 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
619 { ICE_IPV6_OFOS, 14 },
621 { ICE_PROTOCOL_LAST, 0 },
624 static const u8 dummy_ipv6_esp_pkt[] = {
625 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
626 0x00, 0x00, 0x00, 0x00,
627 0x00, 0x00, 0x00, 0x00,
630 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
631 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
632 0x00, 0x00, 0x00, 0x00,
633 0x00, 0x00, 0x00, 0x00,
634 0x00, 0x00, 0x00, 0x00,
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
646 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
648 { ICE_IPV4_OFOS, 14 },
650 { ICE_PROTOCOL_LAST, 0 },
653 static const u8 dummy_ipv4_ah_pkt[] = {
654 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x00,
659 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
660 0x00, 0x00, 0x40, 0x00,
661 0x40, 0x33, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
671 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
673 { ICE_IPV6_OFOS, 14 },
675 { ICE_PROTOCOL_LAST, 0 },
678 static const u8 dummy_ipv6_ah_pkt[] = {
679 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
684 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
685 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
686 0x00, 0x00, 0x00, 0x00,
687 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
696 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
701 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
703 { ICE_IPV4_OFOS, 14 },
704 { ICE_UDP_ILOS, 34 },
706 { ICE_PROTOCOL_LAST, 0 },
709 static const u8 dummy_ipv4_nat_pkt[] = {
710 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
715 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
716 0x00, 0x00, 0x40, 0x00,
717 0x40, 0x11, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
722 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
729 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
731 { ICE_IPV6_OFOS, 14 },
732 { ICE_UDP_ILOS, 54 },
734 { ICE_PROTOCOL_LAST, 0 },
737 static const u8 dummy_ipv6_nat_pkt[] = {
738 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
739 0x00, 0x00, 0x00, 0x00,
740 0x00, 0x00, 0x00, 0x00,
743 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
744 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00,
754 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
755 0x00, 0x00, 0x00, 0x00,
757 0x00, 0x00, 0x00, 0x00,
758 0x00, 0x00, 0x00, 0x00,
759 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
763 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
765 { ICE_IPV4_OFOS, 14 },
767 { ICE_PROTOCOL_LAST, 0 },
770 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
771 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
776 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
777 0x00, 0x00, 0x40, 0x00,
778 0x40, 0x73, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
783 0x00, 0x00, 0x00, 0x00,
784 0x00, 0x00, 0x00, 0x00,
785 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
788 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
790 { ICE_IPV6_OFOS, 14 },
792 { ICE_PROTOCOL_LAST, 0 },
795 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
796 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
797 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00,
801 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
802 0x00, 0x0c, 0x73, 0x40,
803 0x00, 0x00, 0x00, 0x00,
804 0x00, 0x00, 0x00, 0x00,
805 0x00, 0x00, 0x00, 0x00,
806 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
809 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
813 0x00, 0x00, 0x00, 0x00,
814 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
818 /* this is a recipe to profile association bitmap */
819 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
820 ICE_MAX_NUM_PROFILES);
822 /* this is a profile to recipe association bitmap */
823 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
824 ICE_MAX_NUM_RECIPES);
826 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
829 * ice_collect_result_idx - copy result index values
830 * @buf: buffer that contains the result index
831 * @recp: the recipe struct to copy data into
833 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
834 struct ice_sw_recipe *recp)
836 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
837 ice_set_bit(buf->content.result_indx &
838 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
842 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
843 * @hw: pointer to hardware structure
844 * @recps: struct that we need to populate
845 * @rid: recipe ID that we are populating
846 * @refresh_required: true if we should get recipe to profile mapping from FW
848 * This function is used to populate all the necessary entries into our
849 * bookkeeping so that we have a current list of all the recipes that are
850 * programmed in the firmware.
852 static enum ice_status
853 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
854 bool *refresh_required)
856 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
857 struct ice_aqc_recipe_data_elem *tmp;
858 u16 num_recps = ICE_MAX_NUM_RECIPES;
859 struct ice_prot_lkup_ext *lkup_exts;
860 enum ice_status status;
864 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
866 /* we need a buffer big enough to accommodate all the recipes */
867 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
868 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
870 return ICE_ERR_NO_MEMORY;
872 tmp[0].recipe_indx = rid;
873 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
874 /* non-zero status meaning recipe doesn't exist */
878 /* Get recipe to profile map so that we can get the fv from lkups that
879 * we read for a recipe from FW. Since we want to minimize the number of
880 * times we make this FW call, just make one call and cache the copy
881 * until a new recipe is added. This operation is only required the
882 * first time to get the changes from FW. Then to search existing
883 * entries we don't need to update the cache again until another recipe
886 if (*refresh_required) {
887 ice_get_recp_to_prof_map(hw);
888 *refresh_required = false;
891 /* Start populating all the entries for recps[rid] based on lkups from
892 * firmware. Note that we are only creating the root recipe in our
895 lkup_exts = &recps[rid].lkup_exts;
897 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
898 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
899 struct ice_recp_grp_entry *rg_entry;
900 u8 i, prof, idx, prot = 0;
904 rg_entry = (struct ice_recp_grp_entry *)
905 ice_malloc(hw, sizeof(*rg_entry));
907 status = ICE_ERR_NO_MEMORY;
911 idx = root_bufs.recipe_indx;
912 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
914 /* Mark all result indices in this chain */
915 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
916 ice_set_bit(root_bufs.content.result_indx &
917 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
919 /* get the first profile that is associated with rid */
920 prof = ice_find_first_bit(recipe_to_profile[idx],
921 ICE_MAX_NUM_PROFILES);
922 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
923 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
925 rg_entry->fv_idx[i] = lkup_indx;
926 rg_entry->fv_mask[i] =
927 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
929 /* If the recipe is a chained recipe then all its
930 * child recipe's result will have a result index.
931 * To fill fv_words we should not use those result
932 * index, we only need the protocol ids and offsets.
933 * We will skip all the fv_idx which stores result
934 * index in them. We also need to skip any fv_idx which
935 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
936 * valid offset value.
938 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
939 rg_entry->fv_idx[i]) ||
940 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
941 rg_entry->fv_idx[i] == 0)
944 ice_find_prot_off(hw, ICE_BLK_SW, prof,
945 rg_entry->fv_idx[i], &prot, &off);
946 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
947 lkup_exts->fv_words[fv_word_idx].off = off;
948 lkup_exts->field_mask[fv_word_idx] =
949 rg_entry->fv_mask[i];
952 /* populate rg_list with the data from the child entry of this
955 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
957 /* Propagate some data to the recipe database */
958 recps[idx].is_root = !!is_root;
959 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
960 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
961 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
962 recps[idx].chain_idx = root_bufs.content.result_indx &
963 ~ICE_AQ_RECIPE_RESULT_EN;
964 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
966 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
972 /* Only do the following for root recipes entries */
973 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
974 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
975 recps[idx].root_rid = root_bufs.content.rid &
976 ~ICE_AQ_RECIPE_ID_IS_ROOT;
977 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
980 /* Complete initialization of the root recipe entry */
981 lkup_exts->n_val_words = fv_word_idx;
982 recps[rid].big_recp = (num_recps > 1);
983 recps[rid].n_grp_count = (u8)num_recps;
984 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
985 ice_memdup(hw, tmp, recps[rid].n_grp_count *
986 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
987 if (!recps[rid].root_buf)
990 /* Copy result indexes */
991 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
992 recps[rid].recp_created = true;
1000 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1001 * @hw: pointer to hardware structure
1003 * This function is used to populate recipe_to_profile matrix where index to
1004 * this array is the recipe ID and the element is the mapping of which profiles
1005 * is this recipe mapped to.
1008 ice_get_recp_to_prof_map(struct ice_hw *hw)
1010 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1013 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1016 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1017 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1018 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1020 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1021 ICE_MAX_NUM_RECIPES);
1022 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1023 if (ice_is_bit_set(r_bitmap, j))
1024 ice_set_bit(i, recipe_to_profile[j]);
1029 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1030 * @hw: pointer to the HW struct
1031 * @recp_list: pointer to sw recipe list
1033 * Allocate memory for the entire recipe table and initialize the structures/
1034 * entries corresponding to basic recipes.
1037 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1039 struct ice_sw_recipe *recps;
1042 recps = (struct ice_sw_recipe *)
1043 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1045 return ICE_ERR_NO_MEMORY;
1047 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1048 recps[i].root_rid = i;
1049 INIT_LIST_HEAD(&recps[i].filt_rules);
1050 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1051 INIT_LIST_HEAD(&recps[i].rg_list);
1052 ice_init_lock(&recps[i].filt_rule_lock);
1061 * ice_aq_get_sw_cfg - get switch configuration
1062 * @hw: pointer to the hardware structure
1063 * @buf: pointer to the result buffer
1064 * @buf_size: length of the buffer available for response
1065 * @req_desc: pointer to requested descriptor
1066 * @num_elems: pointer to number of elements
1067 * @cd: pointer to command details structure or NULL
1069 * Get switch configuration (0x0200) to be placed in 'buff'.
1070 * This admin command returns information such as initial VSI/port number
1071 * and switch ID it belongs to.
1073 * NOTE: *req_desc is both an input/output parameter.
1074 * The caller of this function first calls this function with *request_desc set
1075 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1076 * configuration information has been returned; if non-zero (meaning not all
1077 * the information was returned), the caller should call this function again
1078 * with *req_desc set to the previous value returned by f/w to get the
1079 * next block of switch configuration information.
1081 * *num_elems is output only parameter. This reflects the number of elements
1082 * in response buffer. The caller of this function to use *num_elems while
1083 * parsing the response buffer.
1085 static enum ice_status
1086 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1087 u16 buf_size, u16 *req_desc, u16 *num_elems,
1088 struct ice_sq_cd *cd)
1090 struct ice_aqc_get_sw_cfg *cmd;
1091 enum ice_status status;
1092 struct ice_aq_desc desc;
1094 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1095 cmd = &desc.params.get_sw_conf;
1096 cmd->element = CPU_TO_LE16(*req_desc);
1098 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1100 *req_desc = LE16_TO_CPU(cmd->element);
1101 *num_elems = LE16_TO_CPU(cmd->num_elems);
1108 * ice_alloc_sw - allocate resources specific to switch
1109 * @hw: pointer to the HW struct
1110 * @ena_stats: true to turn on VEB stats
1111 * @shared_res: true for shared resource, false for dedicated resource
1112 * @sw_id: switch ID returned
1113 * @counter_id: VEB counter ID returned
1115 * allocates switch resources (SWID and VEB counter) (0x0208)
1118 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1121 struct ice_aqc_alloc_free_res_elem *sw_buf;
1122 struct ice_aqc_res_elem *sw_ele;
1123 enum ice_status status;
1126 buf_len = sizeof(*sw_buf);
1127 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1128 ice_malloc(hw, buf_len);
1130 return ICE_ERR_NO_MEMORY;
1132 /* Prepare buffer for switch ID.
1133 * The number of resource entries in buffer is passed as 1 since only a
1134 * single switch/VEB instance is allocated, and hence a single sw_id
1137 sw_buf->num_elems = CPU_TO_LE16(1);
1139 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1140 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1141 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1143 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1144 ice_aqc_opc_alloc_res, NULL);
1147 goto ice_alloc_sw_exit;
1149 sw_ele = &sw_buf->elem[0];
1150 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1153 /* Prepare buffer for VEB Counter */
1154 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1155 struct ice_aqc_alloc_free_res_elem *counter_buf;
1156 struct ice_aqc_res_elem *counter_ele;
1158 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1159 ice_malloc(hw, buf_len);
1161 status = ICE_ERR_NO_MEMORY;
1162 goto ice_alloc_sw_exit;
1165 /* The number of resource entries in buffer is passed as 1 since
1166 * only a single switch/VEB instance is allocated, and hence a
1167 * single VEB counter is requested.
1169 counter_buf->num_elems = CPU_TO_LE16(1);
1170 counter_buf->res_type =
1171 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1172 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1173 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1177 ice_free(hw, counter_buf);
1178 goto ice_alloc_sw_exit;
1180 counter_ele = &counter_buf->elem[0];
1181 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1182 ice_free(hw, counter_buf);
1186 ice_free(hw, sw_buf);
1191 * ice_free_sw - free resources specific to switch
1192 * @hw: pointer to the HW struct
1193 * @sw_id: switch ID returned
1194 * @counter_id: VEB counter ID returned
1196 * free switch resources (SWID and VEB counter) (0x0209)
1198 * NOTE: This function frees multiple resources. It continues
1199 * releasing other resources even after it encounters error.
1200 * The error code returned is the last error it encountered.
1202 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1204 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1205 enum ice_status status, ret_status;
1208 buf_len = sizeof(*sw_buf);
1209 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1210 ice_malloc(hw, buf_len);
1212 return ICE_ERR_NO_MEMORY;
1214 /* Prepare buffer to free for switch ID res.
1215 * The number of resource entries in buffer is passed as 1 since only a
1216 * single switch/VEB instance is freed, and hence a single sw_id
1219 sw_buf->num_elems = CPU_TO_LE16(1);
1220 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1221 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1223 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1224 ice_aqc_opc_free_res, NULL);
1227 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1229 /* Prepare buffer to free for VEB Counter resource */
1230 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1231 ice_malloc(hw, buf_len);
1233 ice_free(hw, sw_buf);
1234 return ICE_ERR_NO_MEMORY;
1237 /* The number of resource entries in buffer is passed as 1 since only a
1238 * single switch/VEB instance is freed, and hence a single VEB counter
1241 counter_buf->num_elems = CPU_TO_LE16(1);
1242 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1243 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1245 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1246 ice_aqc_opc_free_res, NULL);
1248 ice_debug(hw, ICE_DBG_SW,
1249 "VEB counter resource could not be freed\n");
1250 ret_status = status;
1253 ice_free(hw, counter_buf);
1254 ice_free(hw, sw_buf);
1260 * @hw: pointer to the HW struct
1261 * @vsi_ctx: pointer to a VSI context struct
1262 * @cd: pointer to command details structure or NULL
1264 * Add a VSI context to the hardware (0x0210)
1267 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1268 struct ice_sq_cd *cd)
1270 struct ice_aqc_add_update_free_vsi_resp *res;
1271 struct ice_aqc_add_get_update_free_vsi *cmd;
1272 struct ice_aq_desc desc;
1273 enum ice_status status;
1275 cmd = &desc.params.vsi_cmd;
1276 res = &desc.params.add_update_free_vsi_res;
1278 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1280 if (!vsi_ctx->alloc_from_pool)
1281 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1282 ICE_AQ_VSI_IS_VALID);
1284 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1286 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1288 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1289 sizeof(vsi_ctx->info), cd);
1292 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1293 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1294 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1302 * @hw: pointer to the HW struct
1303 * @vsi_ctx: pointer to a VSI context struct
1304 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1305 * @cd: pointer to command details structure or NULL
1307 * Free VSI context info from hardware (0x0213)
1310 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1311 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1313 struct ice_aqc_add_update_free_vsi_resp *resp;
1314 struct ice_aqc_add_get_update_free_vsi *cmd;
1315 struct ice_aq_desc desc;
1316 enum ice_status status;
1318 cmd = &desc.params.vsi_cmd;
1319 resp = &desc.params.add_update_free_vsi_res;
1321 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1323 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1325 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1327 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1329 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1330 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1338 * @hw: pointer to the HW struct
1339 * @vsi_ctx: pointer to a VSI context struct
1340 * @cd: pointer to command details structure or NULL
1342 * Update VSI context in the hardware (0x0211)
1345 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1346 struct ice_sq_cd *cd)
1348 struct ice_aqc_add_update_free_vsi_resp *resp;
1349 struct ice_aqc_add_get_update_free_vsi *cmd;
1350 struct ice_aq_desc desc;
1351 enum ice_status status;
1353 cmd = &desc.params.vsi_cmd;
1354 resp = &desc.params.add_update_free_vsi_res;
1356 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1358 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1360 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1362 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1363 sizeof(vsi_ctx->info), cd);
1366 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1367 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1374 * ice_is_vsi_valid - check whether the VSI is valid or not
1375 * @hw: pointer to the HW struct
1376 * @vsi_handle: VSI handle
1378 * check whether the VSI is valid or not
1380 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1382 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1386 * ice_get_hw_vsi_num - return the HW VSI number
1387 * @hw: pointer to the HW struct
1388 * @vsi_handle: VSI handle
1390 * return the HW VSI number
1391 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1393 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1395 return hw->vsi_ctx[vsi_handle]->vsi_num;
1399 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1400 * @hw: pointer to the HW struct
1401 * @vsi_handle: VSI handle
1403 * return the VSI context entry for a given VSI handle
1405 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1407 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1411 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1412 * @hw: pointer to the HW struct
1413 * @vsi_handle: VSI handle
1414 * @vsi: VSI context pointer
1416 * save the VSI context entry for a given VSI handle
1419 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1421 hw->vsi_ctx[vsi_handle] = vsi;
1425 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1426 * @hw: pointer to the HW struct
1427 * @vsi_handle: VSI handle
1429 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1431 struct ice_vsi_ctx *vsi;
1434 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1437 ice_for_each_traffic_class(i) {
1438 if (vsi->lan_q_ctx[i]) {
1439 ice_free(hw, vsi->lan_q_ctx[i]);
1440 vsi->lan_q_ctx[i] = NULL;
1446 * ice_clear_vsi_ctx - clear the VSI context entry
1447 * @hw: pointer to the HW struct
1448 * @vsi_handle: VSI handle
1450 * clear the VSI context entry
1452 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1454 struct ice_vsi_ctx *vsi;
1456 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1458 ice_clear_vsi_q_ctx(hw, vsi_handle);
1460 hw->vsi_ctx[vsi_handle] = NULL;
1465 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1466 * @hw: pointer to the HW struct
1468 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1472 for (i = 0; i < ICE_MAX_VSI; i++)
1473 ice_clear_vsi_ctx(hw, i);
1477 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1478 * @hw: pointer to the HW struct
1479 * @vsi_handle: unique VSI handle provided by drivers
1480 * @vsi_ctx: pointer to a VSI context struct
1481 * @cd: pointer to command details structure or NULL
1483 * Add a VSI context to the hardware also add it into the VSI handle list.
1484 * If this function gets called after reset for existing VSIs then update
1485 * with the new HW VSI number in the corresponding VSI handle list entry.
1488 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1489 struct ice_sq_cd *cd)
1491 struct ice_vsi_ctx *tmp_vsi_ctx;
1492 enum ice_status status;
1494 if (vsi_handle >= ICE_MAX_VSI)
1495 return ICE_ERR_PARAM;
1496 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1499 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1501 /* Create a new VSI context */
1502 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1503 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1505 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1506 return ICE_ERR_NO_MEMORY;
1508 *tmp_vsi_ctx = *vsi_ctx;
1510 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1512 /* update with new HW VSI num */
1513 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1520 * ice_free_vsi- free VSI context from hardware and VSI handle list
1521 * @hw: pointer to the HW struct
1522 * @vsi_handle: unique VSI handle
1523 * @vsi_ctx: pointer to a VSI context struct
1524 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1525 * @cd: pointer to command details structure or NULL
1527 * Free VSI context info from hardware as well as from VSI handle list
1530 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1531 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1533 enum ice_status status;
1535 if (!ice_is_vsi_valid(hw, vsi_handle))
1536 return ICE_ERR_PARAM;
1537 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1538 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1540 ice_clear_vsi_ctx(hw, vsi_handle);
1546 * @hw: pointer to the HW struct
1547 * @vsi_handle: unique VSI handle
1548 * @vsi_ctx: pointer to a VSI context struct
1549 * @cd: pointer to command details structure or NULL
1551 * Update VSI context in the hardware
1554 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1555 struct ice_sq_cd *cd)
1557 if (!ice_is_vsi_valid(hw, vsi_handle))
1558 return ICE_ERR_PARAM;
1559 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1560 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1564 * ice_aq_get_vsi_params
1565 * @hw: pointer to the HW struct
1566 * @vsi_ctx: pointer to a VSI context struct
1567 * @cd: pointer to command details structure or NULL
1569 * Get VSI context info from hardware (0x0212)
1572 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1573 struct ice_sq_cd *cd)
1575 struct ice_aqc_add_get_update_free_vsi *cmd;
1576 struct ice_aqc_get_vsi_resp *resp;
1577 struct ice_aq_desc desc;
1578 enum ice_status status;
1580 cmd = &desc.params.vsi_cmd;
1581 resp = &desc.params.get_vsi_resp;
1583 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1585 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1587 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1588 sizeof(vsi_ctx->info), cd);
1590 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1592 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1593 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1600 * ice_aq_add_update_mir_rule - add/update a mirror rule
1601 * @hw: pointer to the HW struct
1602 * @rule_type: Rule Type
1603 * @dest_vsi: VSI number to which packets will be mirrored
1604 * @count: length of the list
1605 * @mr_buf: buffer for list of mirrored VSI numbers
1606 * @cd: pointer to command details structure or NULL
1609 * Add/Update Mirror Rule (0x260).
1612 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1613 u16 count, struct ice_mir_rule_buf *mr_buf,
1614 struct ice_sq_cd *cd, u16 *rule_id)
1616 struct ice_aqc_add_update_mir_rule *cmd;
1617 struct ice_aq_desc desc;
1618 enum ice_status status;
1619 __le16 *mr_list = NULL;
1622 switch (rule_type) {
1623 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1624 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1625 /* Make sure count and mr_buf are set for these rule_types */
1626 if (!(count && mr_buf))
1627 return ICE_ERR_PARAM;
1629 buf_size = count * sizeof(__le16);
1630 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1632 return ICE_ERR_NO_MEMORY;
1634 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1635 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1636 /* Make sure count and mr_buf are not set for these
1639 if (count || mr_buf)
1640 return ICE_ERR_PARAM;
1643 ice_debug(hw, ICE_DBG_SW,
1644 "Error due to unsupported rule_type %u\n", rule_type);
1645 return ICE_ERR_OUT_OF_RANGE;
1648 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1650 /* Pre-process 'mr_buf' items for add/update of virtual port
1651 * ingress/egress mirroring (but not physical port ingress/egress
1657 for (i = 0; i < count; i++) {
1660 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1662 /* Validate specified VSI number, make sure it is less
1663 * than ICE_MAX_VSI, if not return with error.
1665 if (id >= ICE_MAX_VSI) {
1666 ice_debug(hw, ICE_DBG_SW,
1667 "Error VSI index (%u) out-of-range\n",
1669 ice_free(hw, mr_list);
1670 return ICE_ERR_OUT_OF_RANGE;
1673 /* add VSI to mirror rule */
1676 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1677 else /* remove VSI from mirror rule */
1678 mr_list[i] = CPU_TO_LE16(id);
1682 cmd = &desc.params.add_update_rule;
1683 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1684 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1685 ICE_AQC_RULE_ID_VALID_M);
1686 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1687 cmd->num_entries = CPU_TO_LE16(count);
1688 cmd->dest = CPU_TO_LE16(dest_vsi);
1690 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1692 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1694 ice_free(hw, mr_list);
1700 * ice_aq_delete_mir_rule - delete a mirror rule
1701 * @hw: pointer to the HW struct
1702 * @rule_id: Mirror rule ID (to be deleted)
1703 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1704 * otherwise it is returned to the shared pool
1705 * @cd: pointer to command details structure or NULL
1707 * Delete Mirror Rule (0x261).
1710 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1711 struct ice_sq_cd *cd)
1713 struct ice_aqc_delete_mir_rule *cmd;
1714 struct ice_aq_desc desc;
1716 /* rule_id should be in the range 0...63 */
1717 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1718 return ICE_ERR_OUT_OF_RANGE;
1720 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1722 cmd = &desc.params.del_rule;
1723 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1724 cmd->rule_id = CPU_TO_LE16(rule_id);
1727 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1729 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1733 * ice_aq_alloc_free_vsi_list
1734 * @hw: pointer to the HW struct
1735 * @vsi_list_id: VSI list ID returned or used for lookup
1736 * @lkup_type: switch rule filter lookup type
1737 * @opc: switch rules population command type - pass in the command opcode
1739 * allocates or free a VSI list resource
1741 static enum ice_status
1742 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1743 enum ice_sw_lkup_type lkup_type,
1744 enum ice_adminq_opc opc)
1746 struct ice_aqc_alloc_free_res_elem *sw_buf;
1747 struct ice_aqc_res_elem *vsi_ele;
1748 enum ice_status status;
1751 buf_len = sizeof(*sw_buf);
1752 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1753 ice_malloc(hw, buf_len);
1755 return ICE_ERR_NO_MEMORY;
1756 sw_buf->num_elems = CPU_TO_LE16(1);
1758 if (lkup_type == ICE_SW_LKUP_MAC ||
1759 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1760 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1761 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1762 lkup_type == ICE_SW_LKUP_PROMISC ||
1763 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1764 lkup_type == ICE_SW_LKUP_LAST) {
1765 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1766 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1768 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1770 status = ICE_ERR_PARAM;
1771 goto ice_aq_alloc_free_vsi_list_exit;
1774 if (opc == ice_aqc_opc_free_res)
1775 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1777 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1779 goto ice_aq_alloc_free_vsi_list_exit;
1781 if (opc == ice_aqc_opc_alloc_res) {
1782 vsi_ele = &sw_buf->elem[0];
1783 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1786 ice_aq_alloc_free_vsi_list_exit:
1787 ice_free(hw, sw_buf);
1792 * ice_aq_set_storm_ctrl - Sets storm control configuration
1793 * @hw: pointer to the HW struct
1794 * @bcast_thresh: represents the upper threshold for broadcast storm control
1795 * @mcast_thresh: represents the upper threshold for multicast storm control
1796 * @ctl_bitmask: storm control control knobs
1798 * Sets the storm control configuration (0x0280)
1801 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1804 struct ice_aqc_storm_cfg *cmd;
1805 struct ice_aq_desc desc;
1807 cmd = &desc.params.storm_conf;
1809 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1811 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1812 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1813 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1815 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1819 * ice_aq_get_storm_ctrl - gets storm control configuration
1820 * @hw: pointer to the HW struct
1821 * @bcast_thresh: represents the upper threshold for broadcast storm control
1822 * @mcast_thresh: represents the upper threshold for multicast storm control
1823 * @ctl_bitmask: storm control control knobs
1825 * Gets the storm control configuration (0x0281)
1828 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1831 enum ice_status status;
1832 struct ice_aq_desc desc;
1834 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1836 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1838 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1841 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1844 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1847 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1854 * ice_aq_sw_rules - add/update/remove switch rules
1855 * @hw: pointer to the HW struct
1856 * @rule_list: pointer to switch rule population list
1857 * @rule_list_sz: total size of the rule list in bytes
1858 * @num_rules: number of switch rules in the rule_list
1859 * @opc: switch rules population command type - pass in the command opcode
1860 * @cd: pointer to command details structure or NULL
1862 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1864 static enum ice_status
1865 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1866 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1868 struct ice_aq_desc desc;
1870 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1872 if (opc != ice_aqc_opc_add_sw_rules &&
1873 opc != ice_aqc_opc_update_sw_rules &&
1874 opc != ice_aqc_opc_remove_sw_rules)
1875 return ICE_ERR_PARAM;
1877 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1879 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1880 desc.params.sw_rules.num_rules_fltr_entry_index =
1881 CPU_TO_LE16(num_rules);
1882 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1886 * ice_aq_add_recipe - add switch recipe
1887 * @hw: pointer to the HW struct
1888 * @s_recipe_list: pointer to switch rule population list
1889 * @num_recipes: number of switch recipes in the list
1890 * @cd: pointer to command details structure or NULL
1895 ice_aq_add_recipe(struct ice_hw *hw,
1896 struct ice_aqc_recipe_data_elem *s_recipe_list,
1897 u16 num_recipes, struct ice_sq_cd *cd)
1899 struct ice_aqc_add_get_recipe *cmd;
1900 struct ice_aq_desc desc;
1903 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1904 cmd = &desc.params.add_get_recipe;
1905 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1907 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1908 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1910 buf_size = num_recipes * sizeof(*s_recipe_list);
1912 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1916 * ice_aq_get_recipe - get switch recipe
1917 * @hw: pointer to the HW struct
1918 * @s_recipe_list: pointer to switch rule population list
1919 * @num_recipes: pointer to the number of recipes (input and output)
1920 * @recipe_root: root recipe number of recipe(s) to retrieve
1921 * @cd: pointer to command details structure or NULL
1925 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1926 * On output, *num_recipes will equal the number of entries returned in
1929 * The caller must supply enough space in s_recipe_list to hold all possible
1930 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1933 ice_aq_get_recipe(struct ice_hw *hw,
1934 struct ice_aqc_recipe_data_elem *s_recipe_list,
1935 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1937 struct ice_aqc_add_get_recipe *cmd;
1938 struct ice_aq_desc desc;
1939 enum ice_status status;
1942 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1943 return ICE_ERR_PARAM;
1945 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1946 cmd = &desc.params.add_get_recipe;
1947 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1949 cmd->return_index = CPU_TO_LE16(recipe_root);
1950 cmd->num_sub_recipes = 0;
1952 buf_size = *num_recipes * sizeof(*s_recipe_list);
1954 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1955 /* cppcheck-suppress constArgument */
1956 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1962 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1963 * @hw: pointer to the HW struct
1964 * @profile_id: package profile ID to associate the recipe with
1965 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1966 * @cd: pointer to command details structure or NULL
1967 * Recipe to profile association (0x0291)
1970 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1971 struct ice_sq_cd *cd)
1973 struct ice_aqc_recipe_to_profile *cmd;
1974 struct ice_aq_desc desc;
1976 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1977 cmd = &desc.params.recipe_to_profile;
1978 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1979 cmd->profile_id = CPU_TO_LE16(profile_id);
1980 /* Set the recipe ID bit in the bitmask to let the device know which
1981 * profile we are associating the recipe to
1983 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1984 ICE_NONDMA_TO_NONDMA);
1986 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1990 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1991 * @hw: pointer to the HW struct
1992 * @profile_id: package profile ID to associate the recipe with
1993 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1994 * @cd: pointer to command details structure or NULL
1995 * Associate profile ID with given recipe (0x0293)
1998 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1999 struct ice_sq_cd *cd)
2001 struct ice_aqc_recipe_to_profile *cmd;
2002 struct ice_aq_desc desc;
2003 enum ice_status status;
2005 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2006 cmd = &desc.params.recipe_to_profile;
2007 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2008 cmd->profile_id = CPU_TO_LE16(profile_id);
2010 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2012 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2013 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2019 * ice_alloc_recipe - add recipe resource
2020 * @hw: pointer to the hardware structure
2021 * @rid: recipe ID returned as response to AQ call
2023 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2025 struct ice_aqc_alloc_free_res_elem *sw_buf;
2026 enum ice_status status;
2029 buf_len = sizeof(*sw_buf);
2030 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2032 return ICE_ERR_NO_MEMORY;
2034 sw_buf->num_elems = CPU_TO_LE16(1);
2035 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2036 ICE_AQC_RES_TYPE_S) |
2037 ICE_AQC_RES_TYPE_FLAG_SHARED);
2038 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2039 ice_aqc_opc_alloc_res, NULL);
2041 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2042 ice_free(hw, sw_buf);
2047 /* ice_init_port_info - Initialize port_info with switch configuration data
2048 * @pi: pointer to port_info
2049 * @vsi_port_num: VSI number or port number
2050 * @type: Type of switch element (port or VSI)
2051 * @swid: switch ID of the switch the element is attached to
2052 * @pf_vf_num: PF or VF number
2053 * @is_vf: true if the element is a VF, false otherwise
2056 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2057 u16 swid, u16 pf_vf_num, bool is_vf)
2060 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2061 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2063 pi->pf_vf_num = pf_vf_num;
2065 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2066 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2069 ice_debug(pi->hw, ICE_DBG_SW,
2070 "incorrect VSI/port type received\n");
2075 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2076 * @hw: pointer to the hardware structure
2078 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2080 struct ice_aqc_get_sw_cfg_resp *rbuf;
2081 enum ice_status status;
2088 num_total_ports = 1;
2090 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2091 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2094 return ICE_ERR_NO_MEMORY;
2096 /* Multiple calls to ice_aq_get_sw_cfg may be required
2097 * to get all the switch configuration information. The need
2098 * for additional calls is indicated by ice_aq_get_sw_cfg
2099 * writing a non-zero value in req_desc
2102 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2103 &req_desc, &num_elems, NULL);
2108 for (i = 0; i < num_elems; i++) {
2109 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2110 u16 pf_vf_num, swid, vsi_port_num;
2114 ele = rbuf[i].elements;
2115 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2116 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2118 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2119 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2121 swid = LE16_TO_CPU(ele->swid);
2123 if (LE16_TO_CPU(ele->pf_vf_num) &
2124 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2127 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2128 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2131 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2132 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2133 if (j == num_total_ports) {
2134 ice_debug(hw, ICE_DBG_SW,
2135 "more ports than expected\n");
2136 status = ICE_ERR_CFG;
2139 ice_init_port_info(hw->port_info,
2140 vsi_port_num, res_type, swid,
2148 } while (req_desc && !status);
2151 ice_free(hw, (void *)rbuf);
2156 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2157 * @hw: pointer to the hardware structure
2158 * @fi: filter info structure to fill/update
2160 * This helper function populates the lb_en and lan_en elements of the provided
2161 * ice_fltr_info struct using the switch's type and characteristics of the
2162 * switch rule being configured.
2164 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2169 if ((fi->flag & ICE_FLTR_RX) &&
2170 (fi->fltr_act == ICE_FWD_TO_VSI ||
2171 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2172 fi->lkup_type == ICE_SW_LKUP_LAST)
2175 if ((fi->flag & ICE_FLTR_TX) &&
2176 (fi->fltr_act == ICE_FWD_TO_VSI ||
2177 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2178 fi->fltr_act == ICE_FWD_TO_Q ||
2179 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2180 /* Setting LB for prune actions will result in replicated
2181 * packets to the internal switch that will be dropped.
2183 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2186 /* Set lan_en to TRUE if
2187 * 1. The switch is a VEB AND
2189 * 2.1 The lookup is a directional lookup like ethertype,
2190 * promiscuous, ethertype-MAC, promiscuous-VLAN
2191 * and default-port OR
2192 * 2.2 The lookup is VLAN, OR
2193 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2194 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2198 * The switch is a VEPA.
2200 * In all other cases, the LAN enable has to be set to false.
2203 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2204 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2205 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2206 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2207 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2208 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2209 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2210 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2211 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2212 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2221 * ice_fill_sw_rule - Helper function to fill switch rule structure
2222 * @hw: pointer to the hardware structure
2223 * @f_info: entry containing packet forwarding information
2224 * @s_rule: switch rule structure to be filled in based on mac_entry
2225 * @opc: switch rules population command type - pass in the command opcode
2228 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2229 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2231 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2239 if (opc == ice_aqc_opc_remove_sw_rules) {
2240 s_rule->pdata.lkup_tx_rx.act = 0;
2241 s_rule->pdata.lkup_tx_rx.index =
2242 CPU_TO_LE16(f_info->fltr_rule_id);
2243 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2247 eth_hdr_sz = sizeof(dummy_eth_header);
2248 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2250 /* initialize the ether header with a dummy header */
2251 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2252 ice_fill_sw_info(hw, f_info);
2254 switch (f_info->fltr_act) {
2255 case ICE_FWD_TO_VSI:
2256 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2257 ICE_SINGLE_ACT_VSI_ID_M;
2258 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2259 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2260 ICE_SINGLE_ACT_VALID_BIT;
2262 case ICE_FWD_TO_VSI_LIST:
2263 act |= ICE_SINGLE_ACT_VSI_LIST;
2264 act |= (f_info->fwd_id.vsi_list_id <<
2265 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2266 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2267 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2268 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2269 ICE_SINGLE_ACT_VALID_BIT;
2272 act |= ICE_SINGLE_ACT_TO_Q;
2273 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2274 ICE_SINGLE_ACT_Q_INDEX_M;
2276 case ICE_DROP_PACKET:
2277 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2278 ICE_SINGLE_ACT_VALID_BIT;
2280 case ICE_FWD_TO_QGRP:
2281 q_rgn = f_info->qgrp_size > 0 ?
2282 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2283 act |= ICE_SINGLE_ACT_TO_Q;
2284 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2285 ICE_SINGLE_ACT_Q_INDEX_M;
2286 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2287 ICE_SINGLE_ACT_Q_REGION_M;
2294 act |= ICE_SINGLE_ACT_LB_ENABLE;
2296 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2298 switch (f_info->lkup_type) {
2299 case ICE_SW_LKUP_MAC:
2300 daddr = f_info->l_data.mac.mac_addr;
2302 case ICE_SW_LKUP_VLAN:
2303 vlan_id = f_info->l_data.vlan.vlan_id;
2304 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2305 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2306 act |= ICE_SINGLE_ACT_PRUNE;
2307 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2310 case ICE_SW_LKUP_ETHERTYPE_MAC:
2311 daddr = f_info->l_data.ethertype_mac.mac_addr;
2313 case ICE_SW_LKUP_ETHERTYPE:
2314 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2315 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2317 case ICE_SW_LKUP_MAC_VLAN:
2318 daddr = f_info->l_data.mac_vlan.mac_addr;
2319 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2321 case ICE_SW_LKUP_PROMISC_VLAN:
2322 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2324 case ICE_SW_LKUP_PROMISC:
2325 daddr = f_info->l_data.mac_vlan.mac_addr;
2331 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2332 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2333 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2335 /* Recipe set depending on lookup type */
2336 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2337 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2338 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2341 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2342 ICE_NONDMA_TO_NONDMA);
2344 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2345 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2346 *off = CPU_TO_BE16(vlan_id);
2349 /* Create the switch rule with the final dummy Ethernet header */
2350 if (opc != ice_aqc_opc_update_sw_rules)
2351 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2355 * ice_add_marker_act
2356 * @hw: pointer to the hardware structure
2357 * @m_ent: the management entry for which sw marker needs to be added
2358 * @sw_marker: sw marker to tag the Rx descriptor with
2359 * @l_id: large action resource ID
2361 * Create a large action to hold software marker and update the switch rule
2362 * entry pointed by m_ent with newly created large action
2364 static enum ice_status
2365 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2366 u16 sw_marker, u16 l_id)
2368 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2369 /* For software marker we need 3 large actions
2370 * 1. FWD action: FWD TO VSI or VSI LIST
2371 * 2. GENERIC VALUE action to hold the profile ID
2372 * 3. GENERIC VALUE action to hold the software marker ID
2374 const u16 num_lg_acts = 3;
2375 enum ice_status status;
2381 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2382 return ICE_ERR_PARAM;
2384 /* Create two back-to-back switch rules and submit them to the HW using
2385 * one memory buffer:
2389 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2390 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2391 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2393 return ICE_ERR_NO_MEMORY;
2395 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2397 /* Fill in the first switch rule i.e. large action */
2398 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2399 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2400 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2402 /* First action VSI forwarding or VSI list forwarding depending on how
2405 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2406 m_ent->fltr_info.fwd_id.hw_vsi_id;
2408 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2409 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2410 ICE_LG_ACT_VSI_LIST_ID_M;
2411 if (m_ent->vsi_count > 1)
2412 act |= ICE_LG_ACT_VSI_LIST;
2413 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2415 /* Second action descriptor type */
2416 act = ICE_LG_ACT_GENERIC;
2418 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2419 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2421 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2422 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2424 /* Third action Marker value */
2425 act |= ICE_LG_ACT_GENERIC;
2426 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2427 ICE_LG_ACT_GENERIC_VALUE_M;
2429 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2431 /* call the fill switch rule to fill the lookup Tx Rx structure */
2432 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2433 ice_aqc_opc_update_sw_rules);
2435 /* Update the action to point to the large action ID */
2436 rx_tx->pdata.lkup_tx_rx.act =
2437 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2438 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2439 ICE_SINGLE_ACT_PTR_VAL_M));
2441 /* Use the filter rule ID of the previously created rule with single
2442 * act. Once the update happens, hardware will treat this as large
2445 rx_tx->pdata.lkup_tx_rx.index =
2446 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2448 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2449 ice_aqc_opc_update_sw_rules, NULL);
2451 m_ent->lg_act_idx = l_id;
2452 m_ent->sw_marker_id = sw_marker;
2455 ice_free(hw, lg_act);
2460 * ice_add_counter_act - add/update filter rule with counter action
2461 * @hw: pointer to the hardware structure
2462 * @m_ent: the management entry for which counter needs to be added
2463 * @counter_id: VLAN counter ID returned as part of allocate resource
2464 * @l_id: large action resource ID
2466 static enum ice_status
2467 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2468 u16 counter_id, u16 l_id)
2470 struct ice_aqc_sw_rules_elem *lg_act;
2471 struct ice_aqc_sw_rules_elem *rx_tx;
2472 enum ice_status status;
2473 /* 2 actions will be added while adding a large action counter */
2474 const int num_acts = 2;
2481 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2482 return ICE_ERR_PARAM;
2484 /* Create two back-to-back switch rules and submit them to the HW using
2485 * one memory buffer:
2489 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2490 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2491 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2494 return ICE_ERR_NO_MEMORY;
2496 rx_tx = (struct ice_aqc_sw_rules_elem *)
2497 ((u8 *)lg_act + lg_act_size);
2499 /* Fill in the first switch rule i.e. large action */
2500 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2501 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2502 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2504 /* First action VSI forwarding or VSI list forwarding depending on how
2507 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2508 m_ent->fltr_info.fwd_id.hw_vsi_id;
2510 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2511 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2512 ICE_LG_ACT_VSI_LIST_ID_M;
2513 if (m_ent->vsi_count > 1)
2514 act |= ICE_LG_ACT_VSI_LIST;
2515 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2517 /* Second action counter ID */
2518 act = ICE_LG_ACT_STAT_COUNT;
2519 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2520 ICE_LG_ACT_STAT_COUNT_M;
2521 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2523 /* call the fill switch rule to fill the lookup Tx Rx structure */
2524 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2525 ice_aqc_opc_update_sw_rules);
2527 act = ICE_SINGLE_ACT_PTR;
2528 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2529 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2531 /* Use the filter rule ID of the previously created rule with single
2532 * act. Once the update happens, hardware will treat this as large
2535 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2536 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2538 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2539 ice_aqc_opc_update_sw_rules, NULL);
2541 m_ent->lg_act_idx = l_id;
2542 m_ent->counter_index = counter_id;
2545 ice_free(hw, lg_act);
2550 * ice_create_vsi_list_map
2551 * @hw: pointer to the hardware structure
2552 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2553 * @num_vsi: number of VSI handles in the array
2554 * @vsi_list_id: VSI list ID generated as part of allocate resource
2556 * Helper function to create a new entry of VSI list ID to VSI mapping
2557 * using the given VSI list ID
2559 static struct ice_vsi_list_map_info *
2560 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2563 struct ice_switch_info *sw = hw->switch_info;
2564 struct ice_vsi_list_map_info *v_map;
2567 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2572 v_map->vsi_list_id = vsi_list_id;
2574 for (i = 0; i < num_vsi; i++)
2575 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2577 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2582 * ice_update_vsi_list_rule
2583 * @hw: pointer to the hardware structure
2584 * @vsi_handle_arr: array of VSI handles to form a VSI list
2585 * @num_vsi: number of VSI handles in the array
2586 * @vsi_list_id: VSI list ID generated as part of allocate resource
2587 * @remove: Boolean value to indicate if this is a remove action
2588 * @opc: switch rules population command type - pass in the command opcode
2589 * @lkup_type: lookup type of the filter
2591 * Call AQ command to add a new switch rule or update existing switch rule
2592 * using the given VSI list ID
2594 static enum ice_status
2595 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2596 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2597 enum ice_sw_lkup_type lkup_type)
2599 struct ice_aqc_sw_rules_elem *s_rule;
2600 enum ice_status status;
2606 return ICE_ERR_PARAM;
2608 if (lkup_type == ICE_SW_LKUP_MAC ||
2609 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2610 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2611 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2612 lkup_type == ICE_SW_LKUP_PROMISC ||
2613 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2614 lkup_type == ICE_SW_LKUP_LAST)
2615 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2616 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2617 else if (lkup_type == ICE_SW_LKUP_VLAN)
2618 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2619 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2621 return ICE_ERR_PARAM;
2623 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2624 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2626 return ICE_ERR_NO_MEMORY;
2627 for (i = 0; i < num_vsi; i++) {
2628 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2629 status = ICE_ERR_PARAM;
2632 /* AQ call requires hw_vsi_id(s) */
2633 s_rule->pdata.vsi_list.vsi[i] =
2634 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2637 s_rule->type = CPU_TO_LE16(rule_type);
2638 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2639 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2641 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2644 ice_free(hw, s_rule);
2649 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2650 * @hw: pointer to the HW struct
2651 * @vsi_handle_arr: array of VSI handles to form a VSI list
2652 * @num_vsi: number of VSI handles in the array
2653 * @vsi_list_id: stores the ID of the VSI list to be created
2654 * @lkup_type: switch rule filter's lookup type
2656 static enum ice_status
2657 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2658 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2660 enum ice_status status;
2662 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2663 ice_aqc_opc_alloc_res);
2667 /* Update the newly created VSI list to include the specified VSIs */
2668 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2669 *vsi_list_id, false,
2670 ice_aqc_opc_add_sw_rules, lkup_type);
2674 * ice_create_pkt_fwd_rule
2675 * @hw: pointer to the hardware structure
2676 * @recp_list: corresponding filter management list
2677 * @f_entry: entry containing packet forwarding information
2679 * Create switch rule with given filter information and add an entry
2680 * to the corresponding filter management list to track this switch rule
2683 static enum ice_status
2684 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2685 struct ice_fltr_list_entry *f_entry)
2687 struct ice_fltr_mgmt_list_entry *fm_entry;
2688 struct ice_aqc_sw_rules_elem *s_rule;
2689 enum ice_status status;
2691 s_rule = (struct ice_aqc_sw_rules_elem *)
2692 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2694 return ICE_ERR_NO_MEMORY;
2695 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2696 ice_malloc(hw, sizeof(*fm_entry));
2698 status = ICE_ERR_NO_MEMORY;
2699 goto ice_create_pkt_fwd_rule_exit;
2702 fm_entry->fltr_info = f_entry->fltr_info;
2704 /* Initialize all the fields for the management entry */
2705 fm_entry->vsi_count = 1;
2706 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2707 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2708 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2710 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2711 ice_aqc_opc_add_sw_rules);
2713 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2714 ice_aqc_opc_add_sw_rules, NULL);
2716 ice_free(hw, fm_entry);
2717 goto ice_create_pkt_fwd_rule_exit;
2720 f_entry->fltr_info.fltr_rule_id =
2721 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2722 fm_entry->fltr_info.fltr_rule_id =
2723 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2725 /* The book keeping entries will get removed when base driver
2726 * calls remove filter AQ command
2728 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2730 ice_create_pkt_fwd_rule_exit:
2731 ice_free(hw, s_rule);
2736 * ice_update_pkt_fwd_rule
2737 * @hw: pointer to the hardware structure
2738 * @f_info: filter information for switch rule
2740 * Call AQ command to update a previously created switch rule with a
2743 static enum ice_status
2744 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2746 struct ice_aqc_sw_rules_elem *s_rule;
2747 enum ice_status status;
2749 s_rule = (struct ice_aqc_sw_rules_elem *)
2750 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2752 return ICE_ERR_NO_MEMORY;
2754 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2756 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2758 /* Update switch rule with new rule set to forward VSI list */
2759 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2760 ice_aqc_opc_update_sw_rules, NULL);
2762 ice_free(hw, s_rule);
2767 * ice_update_sw_rule_bridge_mode
2768 * @hw: pointer to the HW struct
2770 * Updates unicast switch filter rules based on VEB/VEPA mode
2772 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2774 struct ice_switch_info *sw = hw->switch_info;
2775 struct ice_fltr_mgmt_list_entry *fm_entry;
2776 enum ice_status status = ICE_SUCCESS;
2777 struct LIST_HEAD_TYPE *rule_head;
2778 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2780 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2781 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2783 ice_acquire_lock(rule_lock);
2784 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2786 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2787 u8 *addr = fi->l_data.mac.mac_addr;
2789 /* Update unicast Tx rules to reflect the selected
2792 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2793 (fi->fltr_act == ICE_FWD_TO_VSI ||
2794 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2795 fi->fltr_act == ICE_FWD_TO_Q ||
2796 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2797 status = ice_update_pkt_fwd_rule(hw, fi);
2803 ice_release_lock(rule_lock);
2809 * ice_add_update_vsi_list
2810 * @hw: pointer to the hardware structure
2811 * @m_entry: pointer to current filter management list entry
2812 * @cur_fltr: filter information from the book keeping entry
2813 * @new_fltr: filter information with the new VSI to be added
2815 * Call AQ command to add or update previously created VSI list with new VSI.
2817 * Helper function to do book keeping associated with adding filter information
2818 * The algorithm to do the book keeping is described below :
2819 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2820 * if only one VSI has been added till now
2821 * Allocate a new VSI list and add two VSIs
2822 * to this list using switch rule command
2823 * Update the previously created switch rule with the
2824 * newly created VSI list ID
2825 * if a VSI list was previously created
2826 * Add the new VSI to the previously created VSI list set
2827 * using the update switch rule command
2829 static enum ice_status
2830 ice_add_update_vsi_list(struct ice_hw *hw,
2831 struct ice_fltr_mgmt_list_entry *m_entry,
2832 struct ice_fltr_info *cur_fltr,
2833 struct ice_fltr_info *new_fltr)
2835 enum ice_status status = ICE_SUCCESS;
2836 u16 vsi_list_id = 0;
2838 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2839 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2840 return ICE_ERR_NOT_IMPL;
2842 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2843 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2844 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2845 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2846 return ICE_ERR_NOT_IMPL;
2848 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2849 /* Only one entry existed in the mapping and it was not already
2850 * a part of a VSI list. So, create a VSI list with the old and
2853 struct ice_fltr_info tmp_fltr;
2854 u16 vsi_handle_arr[2];
2856 /* A rule already exists with the new VSI being added */
2857 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2858 return ICE_ERR_ALREADY_EXISTS;
2860 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2861 vsi_handle_arr[1] = new_fltr->vsi_handle;
2862 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2864 new_fltr->lkup_type);
2868 tmp_fltr = *new_fltr;
2869 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2870 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2871 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2872 /* Update the previous switch rule of "MAC forward to VSI" to
2873 * "MAC fwd to VSI list"
2875 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2879 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2880 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2881 m_entry->vsi_list_info =
2882 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2885 /* If this entry was large action then the large action needs
2886 * to be updated to point to FWD to VSI list
2888 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2890 ice_add_marker_act(hw, m_entry,
2891 m_entry->sw_marker_id,
2892 m_entry->lg_act_idx);
2894 u16 vsi_handle = new_fltr->vsi_handle;
2895 enum ice_adminq_opc opcode;
2897 if (!m_entry->vsi_list_info)
2900 /* A rule already exists with the new VSI being added */
2901 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2904 /* Update the previously created VSI list set with
2905 * the new VSI ID passed in
2907 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2908 opcode = ice_aqc_opc_update_sw_rules;
2910 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2911 vsi_list_id, false, opcode,
2912 new_fltr->lkup_type);
2913 /* update VSI list mapping info with new VSI ID */
2915 ice_set_bit(vsi_handle,
2916 m_entry->vsi_list_info->vsi_map);
2919 m_entry->vsi_count++;
2924 * ice_find_rule_entry - Search a rule entry
2925 * @list_head: head of rule list
2926 * @f_info: rule information
2928 * Helper function to search for a given rule entry
2929 * Returns pointer to entry storing the rule if found
2931 static struct ice_fltr_mgmt_list_entry *
2932 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2933 struct ice_fltr_info *f_info)
2935 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2937 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2939 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2940 sizeof(f_info->l_data)) &&
2941 f_info->flag == list_itr->fltr_info.flag) {
2950 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2951 * @recp_list: VSI lists needs to be searched
2952 * @vsi_handle: VSI handle to be found in VSI list
2953 * @vsi_list_id: VSI list ID found containing vsi_handle
2955 * Helper function to search a VSI list with single entry containing given VSI
2956 * handle element. This can be extended further to search VSI list with more
2957 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2959 static struct ice_vsi_list_map_info *
2960 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2963 struct ice_vsi_list_map_info *map_info = NULL;
2964 struct LIST_HEAD_TYPE *list_head;
2966 list_head = &recp_list->filt_rules;
2967 if (recp_list->adv_rule) {
2968 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2970 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2971 ice_adv_fltr_mgmt_list_entry,
2973 if (list_itr->vsi_list_info) {
2974 map_info = list_itr->vsi_list_info;
2975 if (ice_is_bit_set(map_info->vsi_map,
2977 *vsi_list_id = map_info->vsi_list_id;
2983 struct ice_fltr_mgmt_list_entry *list_itr;
2985 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2986 ice_fltr_mgmt_list_entry,
2988 if (list_itr->vsi_count == 1 &&
2989 list_itr->vsi_list_info) {
2990 map_info = list_itr->vsi_list_info;
2991 if (ice_is_bit_set(map_info->vsi_map,
2993 *vsi_list_id = map_info->vsi_list_id;
3003 * ice_add_rule_internal - add rule for a given lookup type
3004 * @hw: pointer to the hardware structure
3005 * @recp_list: recipe list for which rule has to be added
3006 * @lport: logic port number on which function add rule
3007 * @f_entry: structure containing MAC forwarding information
3009 * Adds or updates the rule lists for a given recipe
3011 static enum ice_status
3012 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3013 u8 lport, struct ice_fltr_list_entry *f_entry)
3015 struct ice_fltr_info *new_fltr, *cur_fltr;
3016 struct ice_fltr_mgmt_list_entry *m_entry;
3017 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3018 enum ice_status status = ICE_SUCCESS;
3020 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3021 return ICE_ERR_PARAM;
3023 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3024 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3025 f_entry->fltr_info.fwd_id.hw_vsi_id =
3026 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3028 rule_lock = &recp_list->filt_rule_lock;
3030 ice_acquire_lock(rule_lock);
3031 new_fltr = &f_entry->fltr_info;
3032 if (new_fltr->flag & ICE_FLTR_RX)
3033 new_fltr->src = lport;
3034 else if (new_fltr->flag & ICE_FLTR_TX)
3036 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3038 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3040 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3041 goto exit_add_rule_internal;
3044 cur_fltr = &m_entry->fltr_info;
3045 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3047 exit_add_rule_internal:
3048 ice_release_lock(rule_lock);
3053 * ice_remove_vsi_list_rule
3054 * @hw: pointer to the hardware structure
3055 * @vsi_list_id: VSI list ID generated as part of allocate resource
3056 * @lkup_type: switch rule filter lookup type
3058 * The VSI list should be emptied before this function is called to remove the
3061 static enum ice_status
3062 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3063 enum ice_sw_lkup_type lkup_type)
3065 struct ice_aqc_sw_rules_elem *s_rule;
3066 enum ice_status status;
3069 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3070 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3072 return ICE_ERR_NO_MEMORY;
3074 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3075 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3077 /* Free the vsi_list resource that we allocated. It is assumed that the
3078 * list is empty at this point.
3080 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3081 ice_aqc_opc_free_res);
3083 ice_free(hw, s_rule);
3088 * ice_rem_update_vsi_list
3089 * @hw: pointer to the hardware structure
3090 * @vsi_handle: VSI handle of the VSI to remove
3091 * @fm_list: filter management entry for which the VSI list management needs to
3094 static enum ice_status
3095 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3096 struct ice_fltr_mgmt_list_entry *fm_list)
3098 enum ice_sw_lkup_type lkup_type;
3099 enum ice_status status = ICE_SUCCESS;
3102 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3103 fm_list->vsi_count == 0)
3104 return ICE_ERR_PARAM;
3106 /* A rule with the VSI being removed does not exist */
3107 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3108 return ICE_ERR_DOES_NOT_EXIST;
3110 lkup_type = fm_list->fltr_info.lkup_type;
3111 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3112 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3113 ice_aqc_opc_update_sw_rules,
3118 fm_list->vsi_count--;
3119 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3121 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3122 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3123 struct ice_vsi_list_map_info *vsi_list_info =
3124 fm_list->vsi_list_info;
3127 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3129 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3130 return ICE_ERR_OUT_OF_RANGE;
3132 /* Make sure VSI list is empty before removing it below */
3133 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3135 ice_aqc_opc_update_sw_rules,
3140 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3141 tmp_fltr_info.fwd_id.hw_vsi_id =
3142 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3143 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3144 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3146 ice_debug(hw, ICE_DBG_SW,
3147 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3148 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3152 fm_list->fltr_info = tmp_fltr_info;
3155 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3156 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3157 struct ice_vsi_list_map_info *vsi_list_info =
3158 fm_list->vsi_list_info;
3160 /* Remove the VSI list since it is no longer used */
3161 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3163 ice_debug(hw, ICE_DBG_SW,
3164 "Failed to remove VSI list %d, error %d\n",
3165 vsi_list_id, status);
3169 LIST_DEL(&vsi_list_info->list_entry);
3170 ice_free(hw, vsi_list_info);
3171 fm_list->vsi_list_info = NULL;
3178 * ice_remove_rule_internal - Remove a filter rule of a given type
3180 * @hw: pointer to the hardware structure
3181 * @recp_list: recipe list for which the rule needs to removed
3182 * @f_entry: rule entry containing filter information
3184 static enum ice_status
3185 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3186 struct ice_fltr_list_entry *f_entry)
3188 struct ice_fltr_mgmt_list_entry *list_elem;
3189 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3190 enum ice_status status = ICE_SUCCESS;
3191 bool remove_rule = false;
3194 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3195 return ICE_ERR_PARAM;
3196 f_entry->fltr_info.fwd_id.hw_vsi_id =
3197 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3199 rule_lock = &recp_list->filt_rule_lock;
3200 ice_acquire_lock(rule_lock);
3201 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3202 &f_entry->fltr_info);
3204 status = ICE_ERR_DOES_NOT_EXIST;
3208 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3210 } else if (!list_elem->vsi_list_info) {
3211 status = ICE_ERR_DOES_NOT_EXIST;
3213 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3214 /* a ref_cnt > 1 indicates that the vsi_list is being
3215 * shared by multiple rules. Decrement the ref_cnt and
3216 * remove this rule, but do not modify the list, as it
3217 * is in-use by other rules.
3219 list_elem->vsi_list_info->ref_cnt--;
3222 /* a ref_cnt of 1 indicates the vsi_list is only used
3223 * by one rule. However, the original removal request is only
3224 * for a single VSI. Update the vsi_list first, and only
3225 * remove the rule if there are no further VSIs in this list.
3227 vsi_handle = f_entry->fltr_info.vsi_handle;
3228 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3231 /* if VSI count goes to zero after updating the VSI list */
3232 if (list_elem->vsi_count == 0)
3237 /* Remove the lookup rule */
3238 struct ice_aqc_sw_rules_elem *s_rule;
3240 s_rule = (struct ice_aqc_sw_rules_elem *)
3241 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3243 status = ICE_ERR_NO_MEMORY;
3247 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3248 ice_aqc_opc_remove_sw_rules);
3250 status = ice_aq_sw_rules(hw, s_rule,
3251 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3252 ice_aqc_opc_remove_sw_rules, NULL);
3254 /* Remove a book keeping from the list */
3255 ice_free(hw, s_rule);
3260 LIST_DEL(&list_elem->list_entry);
3261 ice_free(hw, list_elem);
3264 ice_release_lock(rule_lock);
3269 * ice_aq_get_res_alloc - get allocated resources
3270 * @hw: pointer to the HW struct
3271 * @num_entries: pointer to u16 to store the number of resource entries returned
3272 * @buf: pointer to user-supplied buffer
3273 * @buf_size: size of buff
3274 * @cd: pointer to command details structure or NULL
3276 * The user-supplied buffer must be large enough to store the resource
3277 * information for all resource types. Each resource type is an
3278 * ice_aqc_get_res_resp_data_elem structure.
3281 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3282 u16 buf_size, struct ice_sq_cd *cd)
3284 struct ice_aqc_get_res_alloc *resp;
3285 enum ice_status status;
3286 struct ice_aq_desc desc;
3289 return ICE_ERR_BAD_PTR;
3291 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3292 return ICE_ERR_INVAL_SIZE;
3294 resp = &desc.params.get_res;
3296 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3297 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3299 if (!status && num_entries)
3300 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3306 * ice_aq_get_res_descs - get allocated resource descriptors
3307 * @hw: pointer to the hardware structure
3308 * @num_entries: number of resource entries in buffer
3309 * @buf: Indirect buffer to hold data parameters and response
3310 * @buf_size: size of buffer for indirect commands
3311 * @res_type: resource type
3312 * @res_shared: is resource shared
3313 * @desc_id: input - first desc ID to start; output - next desc ID
3314 * @cd: pointer to command details structure or NULL
3317 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3318 struct ice_aqc_get_allocd_res_desc_resp *buf,
3319 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3320 struct ice_sq_cd *cd)
3322 struct ice_aqc_get_allocd_res_desc *cmd;
3323 struct ice_aq_desc desc;
3324 enum ice_status status;
3326 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3328 cmd = &desc.params.get_res_desc;
3331 return ICE_ERR_PARAM;
3333 if (buf_size != (num_entries * sizeof(*buf)))
3334 return ICE_ERR_PARAM;
3336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3338 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3339 ICE_AQC_RES_TYPE_M) | (res_shared ?
3340 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3341 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3343 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3345 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3351 * ice_add_mac_rule - Add a MAC address based filter rule
3352 * @hw: pointer to the hardware structure
3353 * @m_list: list of MAC addresses and forwarding information
3354 * @sw: pointer to switch info struct for which function add rule
3355 * @lport: logic port number on which function add rule
3357 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3358 * multiple unicast addresses, the function assumes that all the
3359 * addresses are unique in a given add_mac call. It doesn't
3360 * check for duplicates in this case, removing duplicates from a given
3361 * list should be taken care of in the caller of this function.
3363 static enum ice_status
3364 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3365 struct ice_switch_info *sw, u8 lport)
3367 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3368 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3369 struct ice_fltr_list_entry *m_list_itr;
3370 struct LIST_HEAD_TYPE *rule_head;
3371 u16 total_elem_left, s_rule_size;
3372 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3373 enum ice_status status = ICE_SUCCESS;
3374 u16 num_unicast = 0;
3378 rule_lock = &recp_list->filt_rule_lock;
3379 rule_head = &recp_list->filt_rules;
3381 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3383 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3387 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3388 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3389 if (!ice_is_vsi_valid(hw, vsi_handle))
3390 return ICE_ERR_PARAM;
3391 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3392 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3393 /* update the src in case it is VSI num */
3394 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3395 return ICE_ERR_PARAM;
3396 m_list_itr->fltr_info.src = hw_vsi_id;
3397 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3398 IS_ZERO_ETHER_ADDR(add))
3399 return ICE_ERR_PARAM;
3400 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3401 /* Don't overwrite the unicast address */
3402 ice_acquire_lock(rule_lock);
3403 if (ice_find_rule_entry(rule_head,
3404 &m_list_itr->fltr_info)) {
3405 ice_release_lock(rule_lock);
3406 return ICE_ERR_ALREADY_EXISTS;
3408 ice_release_lock(rule_lock);
3410 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3411 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3412 m_list_itr->status =
3413 ice_add_rule_internal(hw, recp_list, lport,
3415 if (m_list_itr->status)
3416 return m_list_itr->status;
3420 ice_acquire_lock(rule_lock);
3421 /* Exit if no suitable entries were found for adding bulk switch rule */
3423 status = ICE_SUCCESS;
3424 goto ice_add_mac_exit;
3427 /* Allocate switch rule buffer for the bulk update for unicast */
3428 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3429 s_rule = (struct ice_aqc_sw_rules_elem *)
3430 ice_calloc(hw, num_unicast, s_rule_size);
3432 status = ICE_ERR_NO_MEMORY;
3433 goto ice_add_mac_exit;
3437 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3439 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3440 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3442 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3443 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3444 ice_aqc_opc_add_sw_rules);
3445 r_iter = (struct ice_aqc_sw_rules_elem *)
3446 ((u8 *)r_iter + s_rule_size);
3450 /* Call AQ bulk switch rule update for all unicast addresses */
3452 /* Call AQ switch rule in AQ_MAX chunk */
3453 for (total_elem_left = num_unicast; total_elem_left > 0;
3454 total_elem_left -= elem_sent) {
3455 struct ice_aqc_sw_rules_elem *entry = r_iter;
3457 elem_sent = MIN_T(u8, total_elem_left,
3458 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3459 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3460 elem_sent, ice_aqc_opc_add_sw_rules,
3463 goto ice_add_mac_exit;
3464 r_iter = (struct ice_aqc_sw_rules_elem *)
3465 ((u8 *)r_iter + (elem_sent * s_rule_size));
3468 /* Fill up rule ID based on the value returned from FW */
3470 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3472 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3473 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3474 struct ice_fltr_mgmt_list_entry *fm_entry;
3476 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3477 f_info->fltr_rule_id =
3478 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3479 f_info->fltr_act = ICE_FWD_TO_VSI;
3480 /* Create an entry to track this MAC address */
3481 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3482 ice_malloc(hw, sizeof(*fm_entry));
3484 status = ICE_ERR_NO_MEMORY;
3485 goto ice_add_mac_exit;
3487 fm_entry->fltr_info = *f_info;
3488 fm_entry->vsi_count = 1;
3489 /* The book keeping entries will get removed when
3490 * base driver calls remove filter AQ command
3493 LIST_ADD(&fm_entry->list_entry, rule_head);
3494 r_iter = (struct ice_aqc_sw_rules_elem *)
3495 ((u8 *)r_iter + s_rule_size);
3500 ice_release_lock(rule_lock);
3502 ice_free(hw, s_rule);
3507 * ice_add_mac - Add a MAC address based filter rule
3508 * @hw: pointer to the hardware structure
3509 * @m_list: list of MAC addresses and forwarding information
3511 * Function add MAC rule for logical port from HW struct
3514 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3517 return ICE_ERR_PARAM;
3519 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3520 hw->port_info->lport);
3524 * ice_add_vlan_internal - Add one VLAN based filter rule
3525 * @hw: pointer to the hardware structure
3526 * @recp_list: recipe list for which rule has to be added
3527 * @f_entry: filter entry containing one VLAN information
3529 static enum ice_status
3530 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3531 struct ice_fltr_list_entry *f_entry)
3533 struct ice_fltr_mgmt_list_entry *v_list_itr;
3534 struct ice_fltr_info *new_fltr, *cur_fltr;
3535 enum ice_sw_lkup_type lkup_type;
3536 u16 vsi_list_id = 0, vsi_handle;
3537 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3538 enum ice_status status = ICE_SUCCESS;
3540 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3541 return ICE_ERR_PARAM;
3543 f_entry->fltr_info.fwd_id.hw_vsi_id =
3544 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3545 new_fltr = &f_entry->fltr_info;
3547 /* VLAN ID should only be 12 bits */
3548 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3549 return ICE_ERR_PARAM;
3551 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3552 return ICE_ERR_PARAM;
3554 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3555 lkup_type = new_fltr->lkup_type;
3556 vsi_handle = new_fltr->vsi_handle;
3557 rule_lock = &recp_list->filt_rule_lock;
3558 ice_acquire_lock(rule_lock);
3559 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3561 struct ice_vsi_list_map_info *map_info = NULL;
3563 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3564 /* All VLAN pruning rules use a VSI list. Check if
3565 * there is already a VSI list containing VSI that we
3566 * want to add. If found, use the same vsi_list_id for
3567 * this new VLAN rule or else create a new list.
3569 map_info = ice_find_vsi_list_entry(recp_list,
3573 status = ice_create_vsi_list_rule(hw,
3581 /* Convert the action to forwarding to a VSI list. */
3582 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3583 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3586 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3588 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3591 status = ICE_ERR_DOES_NOT_EXIST;
3594 /* reuse VSI list for new rule and increment ref_cnt */
3596 v_list_itr->vsi_list_info = map_info;
3597 map_info->ref_cnt++;
3599 v_list_itr->vsi_list_info =
3600 ice_create_vsi_list_map(hw, &vsi_handle,
3604 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3605 /* Update existing VSI list to add new VSI ID only if it used
3608 cur_fltr = &v_list_itr->fltr_info;
3609 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3612 /* If VLAN rule exists and VSI list being used by this rule is
3613 * referenced by more than 1 VLAN rule. Then create a new VSI
3614 * list appending previous VSI with new VSI and update existing
3615 * VLAN rule to point to new VSI list ID
3617 struct ice_fltr_info tmp_fltr;
3618 u16 vsi_handle_arr[2];
3621 /* Current implementation only supports reusing VSI list with
3622 * one VSI count. We should never hit below condition
3624 if (v_list_itr->vsi_count > 1 &&
3625 v_list_itr->vsi_list_info->ref_cnt > 1) {
3626 ice_debug(hw, ICE_DBG_SW,
3627 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3628 status = ICE_ERR_CFG;
3633 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3636 /* A rule already exists with the new VSI being added */
3637 if (cur_handle == vsi_handle) {
3638 status = ICE_ERR_ALREADY_EXISTS;
3642 vsi_handle_arr[0] = cur_handle;
3643 vsi_handle_arr[1] = vsi_handle;
3644 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3645 &vsi_list_id, lkup_type);
3649 tmp_fltr = v_list_itr->fltr_info;
3650 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3651 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3652 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3653 /* Update the previous switch rule to a new VSI list which
3654 * includes current VSI that is requested
3656 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3660 /* before overriding VSI list map info. decrement ref_cnt of
3663 v_list_itr->vsi_list_info->ref_cnt--;
3665 /* now update to newly created list */
3666 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3667 v_list_itr->vsi_list_info =
3668 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3670 v_list_itr->vsi_count++;
3674 ice_release_lock(rule_lock);
3679 * ice_add_vlan_rule - Add VLAN based filter rule
3680 * @hw: pointer to the hardware structure
3681 * @v_list: list of VLAN entries and forwarding information
3682 * @sw: pointer to switch info struct for which function add rule
3684 static enum ice_status
3685 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3686 struct ice_switch_info *sw)
3688 struct ice_fltr_list_entry *v_list_itr;
3689 struct ice_sw_recipe *recp_list;
3691 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3692 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3694 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3695 return ICE_ERR_PARAM;
3696 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3697 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3699 if (v_list_itr->status)
3700 return v_list_itr->status;
3706 * ice_add_vlan - Add a VLAN based filter rule
3707 * @hw: pointer to the hardware structure
3708 * @v_list: list of VLAN and forwarding information
3710 * Function add VLAN rule for logical port from HW struct
3713 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3716 return ICE_ERR_PARAM;
3718 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3722 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3723 * @hw: pointer to the hardware structure
3724 * @mv_list: list of MAC and VLAN filters
3725 * @sw: pointer to switch info struct for which function add rule
3726 * @lport: logic port number on which function add rule
3728 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3729 * pruning bits enabled, then it is the responsibility of the caller to make
3730 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3731 * VLAN won't be received on that VSI otherwise.
3733 static enum ice_status
3734 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3735 struct ice_switch_info *sw, u8 lport)
3737 struct ice_fltr_list_entry *mv_list_itr;
3738 struct ice_sw_recipe *recp_list;
3740 if (!mv_list || !hw)
3741 return ICE_ERR_PARAM;
3743 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3744 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3746 enum ice_sw_lkup_type l_type =
3747 mv_list_itr->fltr_info.lkup_type;
3749 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3750 return ICE_ERR_PARAM;
3751 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3752 mv_list_itr->status =
3753 ice_add_rule_internal(hw, recp_list, lport,
3755 if (mv_list_itr->status)
3756 return mv_list_itr->status;
3762 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3763 * @hw: pointer to the hardware structure
3764 * @mv_list: list of MAC VLAN addresses and forwarding information
3766 * Function add MAC VLAN rule for logical port from HW struct
3769 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3771 if (!mv_list || !hw)
3772 return ICE_ERR_PARAM;
3774 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3775 hw->port_info->lport);
3779 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3780 * @hw: pointer to the hardware structure
3781 * @em_list: list of ether type MAC filter, MAC is optional
3782 * @sw: pointer to switch info struct for which function add rule
3783 * @lport: logic port number on which function add rule
3785 * This function requires the caller to populate the entries in
3786 * the filter list with the necessary fields (including flags to
3787 * indicate Tx or Rx rules).
3789 static enum ice_status
3790 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3791 struct ice_switch_info *sw, u8 lport)
3793 struct ice_fltr_list_entry *em_list_itr;
3795 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3797 struct ice_sw_recipe *recp_list;
3798 enum ice_sw_lkup_type l_type;
3800 l_type = em_list_itr->fltr_info.lkup_type;
3801 recp_list = &sw->recp_list[l_type];
3803 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3804 l_type != ICE_SW_LKUP_ETHERTYPE)
3805 return ICE_ERR_PARAM;
3807 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3810 if (em_list_itr->status)
3811 return em_list_itr->status;
3818 * ice_add_eth_mac - Add a ethertype based filter rule
3819 * @hw: pointer to the hardware structure
3820 * @em_list: list of ethertype and forwarding information
3822 * Function add ethertype rule for logical port from HW struct
3824 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3826 if (!em_list || !hw)
3827 return ICE_ERR_PARAM;
3829 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3830 hw->port_info->lport);
3834 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3835 * @hw: pointer to the hardware structure
3836 * @em_list: list of ethertype or ethertype MAC entries
3837 * @sw: pointer to switch info struct for which function add rule
3839 static enum ice_status
3840 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3841 struct ice_switch_info *sw)
3843 struct ice_fltr_list_entry *em_list_itr, *tmp;
3845 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3847 struct ice_sw_recipe *recp_list;
3848 enum ice_sw_lkup_type l_type;
3850 l_type = em_list_itr->fltr_info.lkup_type;
3852 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3853 l_type != ICE_SW_LKUP_ETHERTYPE)
3854 return ICE_ERR_PARAM;
3856 recp_list = &sw->recp_list[l_type];
3857 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3859 if (em_list_itr->status)
3860 return em_list_itr->status;
3866 * ice_remove_eth_mac - remove a ethertype based filter rule
3867 * @hw: pointer to the hardware structure
3868 * @em_list: list of ethertype and forwarding information
3872 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3874 if (!em_list || !hw)
3875 return ICE_ERR_PARAM;
3877 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3881 * ice_rem_sw_rule_info
3882 * @hw: pointer to the hardware structure
3883 * @rule_head: pointer to the switch list structure that we want to delete
3886 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3888 if (!LIST_EMPTY(rule_head)) {
3889 struct ice_fltr_mgmt_list_entry *entry;
3890 struct ice_fltr_mgmt_list_entry *tmp;
3892 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3893 ice_fltr_mgmt_list_entry, list_entry) {
3894 LIST_DEL(&entry->list_entry);
3895 ice_free(hw, entry);
3901 * ice_rem_adv_rule_info
3902 * @hw: pointer to the hardware structure
3903 * @rule_head: pointer to the switch list structure that we want to delete
3906 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3908 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3909 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3911 if (LIST_EMPTY(rule_head))
3914 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3915 ice_adv_fltr_mgmt_list_entry, list_entry) {
3916 LIST_DEL(&lst_itr->list_entry);
3917 ice_free(hw, lst_itr->lkups);
3918 ice_free(hw, lst_itr);
3923 * ice_rem_all_sw_rules_info
3924 * @hw: pointer to the hardware structure
3926 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3928 struct ice_switch_info *sw = hw->switch_info;
3931 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3932 struct LIST_HEAD_TYPE *rule_head;
3934 rule_head = &sw->recp_list[i].filt_rules;
3935 if (!sw->recp_list[i].adv_rule)
3936 ice_rem_sw_rule_info(hw, rule_head);
3938 ice_rem_adv_rule_info(hw, rule_head);
3943 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3944 * @pi: pointer to the port_info structure
3945 * @vsi_handle: VSI handle to set as default
3946 * @set: true to add the above mentioned switch rule, false to remove it
3947 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3949 * add filter rule to set/unset given VSI as default VSI for the switch
3950 * (represented by swid)
3953 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3956 struct ice_aqc_sw_rules_elem *s_rule;
3957 struct ice_fltr_info f_info;
3958 struct ice_hw *hw = pi->hw;
3959 enum ice_adminq_opc opcode;
3960 enum ice_status status;
3964 if (!ice_is_vsi_valid(hw, vsi_handle))
3965 return ICE_ERR_PARAM;
3966 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3968 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3969 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3970 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3972 return ICE_ERR_NO_MEMORY;
3974 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3976 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3977 f_info.flag = direction;
3978 f_info.fltr_act = ICE_FWD_TO_VSI;
3979 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3981 if (f_info.flag & ICE_FLTR_RX) {
3982 f_info.src = pi->lport;
3983 f_info.src_id = ICE_SRC_ID_LPORT;
3985 f_info.fltr_rule_id =
3986 pi->dflt_rx_vsi_rule_id;
3987 } else if (f_info.flag & ICE_FLTR_TX) {
3988 f_info.src_id = ICE_SRC_ID_VSI;
3989 f_info.src = hw_vsi_id;
3991 f_info.fltr_rule_id =
3992 pi->dflt_tx_vsi_rule_id;
3996 opcode = ice_aqc_opc_add_sw_rules;
3998 opcode = ice_aqc_opc_remove_sw_rules;
4000 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4002 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4003 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4006 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4008 if (f_info.flag & ICE_FLTR_TX) {
4009 pi->dflt_tx_vsi_num = hw_vsi_id;
4010 pi->dflt_tx_vsi_rule_id = index;
4011 } else if (f_info.flag & ICE_FLTR_RX) {
4012 pi->dflt_rx_vsi_num = hw_vsi_id;
4013 pi->dflt_rx_vsi_rule_id = index;
4016 if (f_info.flag & ICE_FLTR_TX) {
4017 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4018 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4019 } else if (f_info.flag & ICE_FLTR_RX) {
4020 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4021 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4026 ice_free(hw, s_rule);
4031 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4032 * @list_head: head of rule list
4033 * @f_info: rule information
4035 * Helper function to search for a unicast rule entry - this is to be used
4036 * to remove unicast MAC filter that is not shared with other VSIs on the
4039 * Returns pointer to entry storing the rule if found
4041 static struct ice_fltr_mgmt_list_entry *
4042 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4043 struct ice_fltr_info *f_info)
4045 struct ice_fltr_mgmt_list_entry *list_itr;
4047 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4049 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4050 sizeof(f_info->l_data)) &&
4051 f_info->fwd_id.hw_vsi_id ==
4052 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4053 f_info->flag == list_itr->fltr_info.flag)
4060 * ice_remove_mac_rule - remove a MAC based filter rule
4061 * @hw: pointer to the hardware structure
4062 * @m_list: list of MAC addresses and forwarding information
4063 * @recp_list: list from which function remove MAC address
4065 * This function removes either a MAC filter rule or a specific VSI from a
4066 * VSI list for a multicast MAC address.
4068 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4069 * ice_add_mac. Caller should be aware that this call will only work if all
4070 * the entries passed into m_list were added previously. It will not attempt to
4071 * do a partial remove of entries that were found.
4073 static enum ice_status
4074 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4075 struct ice_sw_recipe *recp_list)
4077 struct ice_fltr_list_entry *list_itr, *tmp;
4078 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4081 return ICE_ERR_PARAM;
4083 rule_lock = &recp_list->filt_rule_lock;
4084 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4086 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4087 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4090 if (l_type != ICE_SW_LKUP_MAC)
4091 return ICE_ERR_PARAM;
4093 vsi_handle = list_itr->fltr_info.vsi_handle;
4094 if (!ice_is_vsi_valid(hw, vsi_handle))
4095 return ICE_ERR_PARAM;
4097 list_itr->fltr_info.fwd_id.hw_vsi_id =
4098 ice_get_hw_vsi_num(hw, vsi_handle);
4099 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4100 /* Don't remove the unicast address that belongs to
4101 * another VSI on the switch, since it is not being
4104 ice_acquire_lock(rule_lock);
4105 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4106 &list_itr->fltr_info)) {
4107 ice_release_lock(rule_lock);
4108 return ICE_ERR_DOES_NOT_EXIST;
4110 ice_release_lock(rule_lock);
4112 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4114 if (list_itr->status)
4115 return list_itr->status;
4121 * ice_remove_mac - remove a MAC address based filter rule
4122 * @hw: pointer to the hardware structure
4123 * @m_list: list of MAC addresses and forwarding information
4127 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4129 struct ice_sw_recipe *recp_list;
4131 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4132 return ice_remove_mac_rule(hw, m_list, recp_list);
4136 * ice_remove_vlan_rule - Remove VLAN based filter rule
4137 * @hw: pointer to the hardware structure
4138 * @v_list: list of VLAN entries and forwarding information
4139 * @recp_list: list from which function remove VLAN
4141 static enum ice_status
4142 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4143 struct ice_sw_recipe *recp_list)
4145 struct ice_fltr_list_entry *v_list_itr, *tmp;
4147 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4149 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4151 if (l_type != ICE_SW_LKUP_VLAN)
4152 return ICE_ERR_PARAM;
4153 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4155 if (v_list_itr->status)
4156 return v_list_itr->status;
4162 * ice_remove_vlan - remove a VLAN address based filter rule
4163 * @hw: pointer to the hardware structure
4164 * @v_list: list of VLAN and forwarding information
4168 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4170 struct ice_sw_recipe *recp_list;
4173 return ICE_ERR_PARAM;
4175 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4176 return ice_remove_vlan_rule(hw, v_list, recp_list);
4180 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4181 * @hw: pointer to the hardware structure
4182 * @v_list: list of MAC VLAN entries and forwarding information
4183 * @recp_list: list from which function remove MAC VLAN
4185 static enum ice_status
4186 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4187 struct ice_sw_recipe *recp_list)
4189 struct ice_fltr_list_entry *v_list_itr, *tmp;
4191 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4192 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4194 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4196 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4197 return ICE_ERR_PARAM;
4198 v_list_itr->status =
4199 ice_remove_rule_internal(hw, recp_list,
4201 if (v_list_itr->status)
4202 return v_list_itr->status;
4208 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4209 * @hw: pointer to the hardware structure
4210 * @mv_list: list of MAC VLAN and forwarding information
4213 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4215 struct ice_sw_recipe *recp_list;
4217 if (!mv_list || !hw)
4218 return ICE_ERR_PARAM;
4220 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4221 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4225 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4226 * @fm_entry: filter entry to inspect
4227 * @vsi_handle: VSI handle to compare with filter info
4230 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4232 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4233 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4234 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4235 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4240 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4241 * @hw: pointer to the hardware structure
4242 * @vsi_handle: VSI handle to remove filters from
4243 * @vsi_list_head: pointer to the list to add entry to
4244 * @fi: pointer to fltr_info of filter entry to copy & add
4246 * Helper function, used when creating a list of filters to remove from
4247 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4248 * original filter entry, with the exception of fltr_info.fltr_act and
4249 * fltr_info.fwd_id fields. These are set such that later logic can
4250 * extract which VSI to remove the fltr from, and pass on that information.
4252 static enum ice_status
4253 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4254 struct LIST_HEAD_TYPE *vsi_list_head,
4255 struct ice_fltr_info *fi)
4257 struct ice_fltr_list_entry *tmp;
4259 /* this memory is freed up in the caller function
4260 * once filters for this VSI are removed
4262 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4264 return ICE_ERR_NO_MEMORY;
4266 tmp->fltr_info = *fi;
4268 /* Overwrite these fields to indicate which VSI to remove filter from,
4269 * so find and remove logic can extract the information from the
4270 * list entries. Note that original entries will still have proper
4273 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4274 tmp->fltr_info.vsi_handle = vsi_handle;
4275 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4277 LIST_ADD(&tmp->list_entry, vsi_list_head);
4283 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4284 * @hw: pointer to the hardware structure
4285 * @vsi_handle: VSI handle to remove filters from
4286 * @lkup_list_head: pointer to the list that has certain lookup type filters
4287 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4289 * Locates all filters in lkup_list_head that are used by the given VSI,
4290 * and adds COPIES of those entries to vsi_list_head (intended to be used
4291 * to remove the listed filters).
4292 * Note that this means all entries in vsi_list_head must be explicitly
4293 * deallocated by the caller when done with list.
4295 static enum ice_status
4296 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4297 struct LIST_HEAD_TYPE *lkup_list_head,
4298 struct LIST_HEAD_TYPE *vsi_list_head)
4300 struct ice_fltr_mgmt_list_entry *fm_entry;
4301 enum ice_status status = ICE_SUCCESS;
4303 /* check to make sure VSI ID is valid and within boundary */
4304 if (!ice_is_vsi_valid(hw, vsi_handle))
4305 return ICE_ERR_PARAM;
4307 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4308 ice_fltr_mgmt_list_entry, list_entry) {
4309 struct ice_fltr_info *fi;
4311 fi = &fm_entry->fltr_info;
4312 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4315 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4324 * ice_determine_promisc_mask
4325 * @fi: filter info to parse
4327 * Helper function to determine which ICE_PROMISC_ mask corresponds
4328 * to given filter into.
4330 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4332 u16 vid = fi->l_data.mac_vlan.vlan_id;
4333 u8 *macaddr = fi->l_data.mac.mac_addr;
4334 bool is_tx_fltr = false;
4335 u8 promisc_mask = 0;
4337 if (fi->flag == ICE_FLTR_TX)
4340 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4341 promisc_mask |= is_tx_fltr ?
4342 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4343 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4344 promisc_mask |= is_tx_fltr ?
4345 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4346 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4347 promisc_mask |= is_tx_fltr ?
4348 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4350 promisc_mask |= is_tx_fltr ?
4351 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4353 return promisc_mask;
4357 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4358 * @hw: pointer to the hardware structure
4359 * @vsi_handle: VSI handle to retrieve info from
4360 * @promisc_mask: pointer to mask to be filled in
4361 * @vid: VLAN ID of promisc VLAN VSI
4362 * @sw: pointer to switch info struct for which function add rule
4364 static enum ice_status
4365 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4366 u16 *vid, struct ice_switch_info *sw)
4368 struct ice_fltr_mgmt_list_entry *itr;
4369 struct LIST_HEAD_TYPE *rule_head;
4370 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4372 if (!ice_is_vsi_valid(hw, vsi_handle))
4373 return ICE_ERR_PARAM;
4377 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4378 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4380 ice_acquire_lock(rule_lock);
4381 LIST_FOR_EACH_ENTRY(itr, rule_head,
4382 ice_fltr_mgmt_list_entry, list_entry) {
4383 /* Continue if this filter doesn't apply to this VSI or the
4384 * VSI ID is not in the VSI map for this filter
4386 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4389 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4391 ice_release_lock(rule_lock);
4397 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4398 * @hw: pointer to the hardware structure
4399 * @vsi_handle: VSI handle to retrieve info from
4400 * @promisc_mask: pointer to mask to be filled in
4401 * @vid: VLAN ID of promisc VLAN VSI
4404 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4407 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4408 vid, hw->switch_info);
4412 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4413 * @hw: pointer to the hardware structure
4414 * @vsi_handle: VSI handle to retrieve info from
4415 * @promisc_mask: pointer to mask to be filled in
4416 * @vid: VLAN ID of promisc VLAN VSI
4417 * @sw: pointer to switch info struct for which function add rule
4419 static enum ice_status
4420 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4421 u16 *vid, struct ice_switch_info *sw)
4423 struct ice_fltr_mgmt_list_entry *itr;
4424 struct LIST_HEAD_TYPE *rule_head;
4425 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4427 if (!ice_is_vsi_valid(hw, vsi_handle))
4428 return ICE_ERR_PARAM;
4432 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4433 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4435 ice_acquire_lock(rule_lock);
4436 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4438 /* Continue if this filter doesn't apply to this VSI or the
4439 * VSI ID is not in the VSI map for this filter
4441 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4444 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4446 ice_release_lock(rule_lock);
4452 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4453 * @hw: pointer to the hardware structure
4454 * @vsi_handle: VSI handle to retrieve info from
4455 * @promisc_mask: pointer to mask to be filled in
4456 * @vid: VLAN ID of promisc VLAN VSI
4459 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4462 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4463 vid, hw->switch_info);
4467 * ice_remove_promisc - Remove promisc based filter rules
4468 * @hw: pointer to the hardware structure
4469 * @recp_id: recipe ID for which the rule needs to removed
4470 * @v_list: list of promisc entries
4472 static enum ice_status
4473 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4474 struct LIST_HEAD_TYPE *v_list)
4476 struct ice_fltr_list_entry *v_list_itr, *tmp;
4477 struct ice_sw_recipe *recp_list;
4479 recp_list = &hw->switch_info->recp_list[recp_id];
4480 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4482 v_list_itr->status =
4483 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4484 if (v_list_itr->status)
4485 return v_list_itr->status;
4491 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4492 * @hw: pointer to the hardware structure
4493 * @vsi_handle: VSI handle to clear mode
4494 * @promisc_mask: mask of promiscuous config bits to clear
4495 * @vid: VLAN ID to clear VLAN promiscuous
4496 * @sw: pointer to switch info struct for which function add rule
4498 static enum ice_status
4499 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4500 u16 vid, struct ice_switch_info *sw)
4502 struct ice_fltr_list_entry *fm_entry, *tmp;
4503 struct LIST_HEAD_TYPE remove_list_head;
4504 struct ice_fltr_mgmt_list_entry *itr;
4505 struct LIST_HEAD_TYPE *rule_head;
4506 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4507 enum ice_status status = ICE_SUCCESS;
4510 if (!ice_is_vsi_valid(hw, vsi_handle))
4511 return ICE_ERR_PARAM;
4513 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4514 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4516 recipe_id = ICE_SW_LKUP_PROMISC;
4518 rule_head = &sw->recp_list[recipe_id].filt_rules;
4519 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4521 INIT_LIST_HEAD(&remove_list_head);
4523 ice_acquire_lock(rule_lock);
4524 LIST_FOR_EACH_ENTRY(itr, rule_head,
4525 ice_fltr_mgmt_list_entry, list_entry) {
4526 struct ice_fltr_info *fltr_info;
4527 u8 fltr_promisc_mask = 0;
4529 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4531 fltr_info = &itr->fltr_info;
4533 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4534 vid != fltr_info->l_data.mac_vlan.vlan_id)
4537 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4539 /* Skip if filter is not completely specified by given mask */
4540 if (fltr_promisc_mask & ~promisc_mask)
4543 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4547 ice_release_lock(rule_lock);
4548 goto free_fltr_list;
4551 ice_release_lock(rule_lock);
4553 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4556 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4557 ice_fltr_list_entry, list_entry) {
4558 LIST_DEL(&fm_entry->list_entry);
4559 ice_free(hw, fm_entry);
4566 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4567 * @hw: pointer to the hardware structure
4568 * @vsi_handle: VSI handle to clear mode
4569 * @promisc_mask: mask of promiscuous config bits to clear
4570 * @vid: VLAN ID to clear VLAN promiscuous
4573 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4574 u8 promisc_mask, u16 vid)
4576 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4577 vid, hw->switch_info);
4581 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4582 * @hw: pointer to the hardware structure
4583 * @vsi_handle: VSI handle to configure
4584 * @promisc_mask: mask of promiscuous config bits
4585 * @vid: VLAN ID to set VLAN promiscuous
4586 * @lport: logical port number to configure promisc mode
4587 * @sw: pointer to switch info struct for which function add rule
4589 static enum ice_status
4590 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4591 u16 vid, u8 lport, struct ice_switch_info *sw)
4593 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4594 struct ice_fltr_list_entry f_list_entry;
4595 struct ice_fltr_info new_fltr;
4596 enum ice_status status = ICE_SUCCESS;
4602 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4604 if (!ice_is_vsi_valid(hw, vsi_handle))
4605 return ICE_ERR_PARAM;
4606 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4608 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4610 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4611 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4612 new_fltr.l_data.mac_vlan.vlan_id = vid;
4613 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4615 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4616 recipe_id = ICE_SW_LKUP_PROMISC;
4619 /* Separate filters must be set for each direction/packet type
4620 * combination, so we will loop over the mask value, store the
4621 * individual type, and clear it out in the input mask as it
4624 while (promisc_mask) {
4625 struct ice_sw_recipe *recp_list;
4631 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4632 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4633 pkt_type = UCAST_FLTR;
4634 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4635 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4636 pkt_type = UCAST_FLTR;
4638 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4639 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4640 pkt_type = MCAST_FLTR;
4641 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4642 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4643 pkt_type = MCAST_FLTR;
4645 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4646 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4647 pkt_type = BCAST_FLTR;
4648 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4649 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4650 pkt_type = BCAST_FLTR;
4654 /* Check for VLAN promiscuous flag */
4655 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4656 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4657 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4658 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4662 /* Set filter DA based on packet type */
4663 mac_addr = new_fltr.l_data.mac.mac_addr;
4664 if (pkt_type == BCAST_FLTR) {
4665 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4666 } else if (pkt_type == MCAST_FLTR ||
4667 pkt_type == UCAST_FLTR) {
4668 /* Use the dummy ether header DA */
4669 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4670 ICE_NONDMA_TO_NONDMA);
4671 if (pkt_type == MCAST_FLTR)
4672 mac_addr[0] |= 0x1; /* Set multicast bit */
4675 /* Need to reset this to zero for all iterations */
4678 new_fltr.flag |= ICE_FLTR_TX;
4679 new_fltr.src = hw_vsi_id;
4681 new_fltr.flag |= ICE_FLTR_RX;
4682 new_fltr.src = lport;
4685 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4686 new_fltr.vsi_handle = vsi_handle;
4687 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4688 f_list_entry.fltr_info = new_fltr;
4689 recp_list = &sw->recp_list[recipe_id];
4691 status = ice_add_rule_internal(hw, recp_list, lport,
4693 if (status != ICE_SUCCESS)
4694 goto set_promisc_exit;
4702 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4703 * @hw: pointer to the hardware structure
4704 * @vsi_handle: VSI handle to configure
4705 * @promisc_mask: mask of promiscuous config bits
4706 * @vid: VLAN ID to set VLAN promiscuous
4709 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4712 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
4713 hw->port_info->lport,
4718 * _ice_set_vlan_vsi_promisc
4719 * @hw: pointer to the hardware structure
4720 * @vsi_handle: VSI handle to configure
4721 * @promisc_mask: mask of promiscuous config bits
4722 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4723 * @lport: logical port number to configure promisc mode
4724 * @sw: pointer to switch info struct for which function add rule
4726 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4728 static enum ice_status
4729 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4730 bool rm_vlan_promisc, u8 lport,
4731 struct ice_switch_info *sw)
4733 struct ice_fltr_list_entry *list_itr, *tmp;
4734 struct LIST_HEAD_TYPE vsi_list_head;
4735 struct LIST_HEAD_TYPE *vlan_head;
4736 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4737 enum ice_status status;
4740 INIT_LIST_HEAD(&vsi_list_head);
4741 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4742 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4743 ice_acquire_lock(vlan_lock);
4744 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4746 ice_release_lock(vlan_lock);
4748 goto free_fltr_list;
4750 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4752 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4753 if (rm_vlan_promisc)
4754 status = _ice_clear_vsi_promisc(hw, vsi_handle,
4758 status = _ice_set_vsi_promisc(hw, vsi_handle,
4759 promisc_mask, vlan_id,
4766 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4767 ice_fltr_list_entry, list_entry) {
4768 LIST_DEL(&list_itr->list_entry);
4769 ice_free(hw, list_itr);
4775 * ice_set_vlan_vsi_promisc
4776 * @hw: pointer to the hardware structure
4777 * @vsi_handle: VSI handle to configure
4778 * @promisc_mask: mask of promiscuous config bits
4779 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4781 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4784 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4785 bool rm_vlan_promisc)
4787 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
4788 rm_vlan_promisc, hw->port_info->lport,
4793 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4794 * @hw: pointer to the hardware structure
4795 * @vsi_handle: VSI handle to remove filters from
4796 * @recp_list: recipe list from which function remove fltr
4797 * @lkup: switch rule filter lookup type
4800 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4801 struct ice_sw_recipe *recp_list,
4802 enum ice_sw_lkup_type lkup)
4804 struct ice_fltr_list_entry *fm_entry;
4805 struct LIST_HEAD_TYPE remove_list_head;
4806 struct LIST_HEAD_TYPE *rule_head;
4807 struct ice_fltr_list_entry *tmp;
4808 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4809 enum ice_status status;
4811 INIT_LIST_HEAD(&remove_list_head);
4812 rule_lock = &recp_list[lkup].filt_rule_lock;
4813 rule_head = &recp_list[lkup].filt_rules;
4814 ice_acquire_lock(rule_lock);
4815 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4817 ice_release_lock(rule_lock);
4822 case ICE_SW_LKUP_MAC:
4823 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4825 case ICE_SW_LKUP_VLAN:
4826 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4828 case ICE_SW_LKUP_PROMISC:
4829 case ICE_SW_LKUP_PROMISC_VLAN:
4830 ice_remove_promisc(hw, lkup, &remove_list_head);
4832 case ICE_SW_LKUP_MAC_VLAN:
4833 ice_remove_mac_vlan(hw, &remove_list_head);
4835 case ICE_SW_LKUP_ETHERTYPE:
4836 case ICE_SW_LKUP_ETHERTYPE_MAC:
4837 ice_remove_eth_mac(hw, &remove_list_head);
4839 case ICE_SW_LKUP_DFLT:
4840 ice_debug(hw, ICE_DBG_SW,
4841 "Remove filters for this lookup type hasn't been implemented yet\n");
4843 case ICE_SW_LKUP_LAST:
4844 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4848 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4849 ice_fltr_list_entry, list_entry) {
4850 LIST_DEL(&fm_entry->list_entry);
4851 ice_free(hw, fm_entry);
4856 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4857 * @hw: pointer to the hardware structure
4858 * @vsi_handle: VSI handle to remove filters from
4859 * @sw: pointer to switch info struct
4862 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4863 struct ice_switch_info *sw)
4865 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4867 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4868 sw->recp_list, ICE_SW_LKUP_MAC);
4869 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4870 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4871 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4872 sw->recp_list, ICE_SW_LKUP_PROMISC);
4873 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4874 sw->recp_list, ICE_SW_LKUP_VLAN);
4875 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4876 sw->recp_list, ICE_SW_LKUP_DFLT);
4877 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4878 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4879 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4880 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4881 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4882 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4886 * ice_remove_vsi_fltr - Remove all filters for a VSI
4887 * @hw: pointer to the hardware structure
4888 * @vsi_handle: VSI handle to remove filters from
4890 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4892 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4896 * ice_alloc_res_cntr - allocating resource counter
4897 * @hw: pointer to the hardware structure
4898 * @type: type of resource
4899 * @alloc_shared: if set it is shared else dedicated
4900 * @num_items: number of entries requested for FD resource type
4901 * @counter_id: counter index returned by AQ call
4904 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4907 struct ice_aqc_alloc_free_res_elem *buf;
4908 enum ice_status status;
4911 /* Allocate resource */
4912 buf_len = sizeof(*buf);
4913 buf = (struct ice_aqc_alloc_free_res_elem *)
4914 ice_malloc(hw, buf_len);
4916 return ICE_ERR_NO_MEMORY;
4918 buf->num_elems = CPU_TO_LE16(num_items);
4919 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4920 ICE_AQC_RES_TYPE_M) | alloc_shared);
4922 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4923 ice_aqc_opc_alloc_res, NULL);
4927 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4935 * ice_free_res_cntr - free resource counter
4936 * @hw: pointer to the hardware structure
4937 * @type: type of resource
4938 * @alloc_shared: if set it is shared else dedicated
4939 * @num_items: number of entries to be freed for FD resource type
4940 * @counter_id: counter ID resource which needs to be freed
4943 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4946 struct ice_aqc_alloc_free_res_elem *buf;
4947 enum ice_status status;
4951 buf_len = sizeof(*buf);
4952 buf = (struct ice_aqc_alloc_free_res_elem *)
4953 ice_malloc(hw, buf_len);
4955 return ICE_ERR_NO_MEMORY;
4957 buf->num_elems = CPU_TO_LE16(num_items);
4958 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4959 ICE_AQC_RES_TYPE_M) | alloc_shared);
4960 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4962 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4963 ice_aqc_opc_free_res, NULL);
4965 ice_debug(hw, ICE_DBG_SW,
4966 "counter resource could not be freed\n");
4973 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4974 * @hw: pointer to the hardware structure
4975 * @counter_id: returns counter index
4977 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4979 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4980 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4985 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4986 * @hw: pointer to the hardware structure
4987 * @counter_id: counter index to be freed
4989 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4991 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4992 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4997 * ice_alloc_res_lg_act - add large action resource
4998 * @hw: pointer to the hardware structure
4999 * @l_id: large action ID to fill it in
5000 * @num_acts: number of actions to hold with a large action entry
5002 static enum ice_status
5003 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5005 struct ice_aqc_alloc_free_res_elem *sw_buf;
5006 enum ice_status status;
5009 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5010 return ICE_ERR_PARAM;
5012 /* Allocate resource for large action */
5013 buf_len = sizeof(*sw_buf);
5014 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5015 ice_malloc(hw, buf_len);
5017 return ICE_ERR_NO_MEMORY;
5019 sw_buf->num_elems = CPU_TO_LE16(1);
5021 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5022 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5023 * If num_acts is greater than 2, then use
5024 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5025 * The num_acts cannot exceed 4. This was ensured at the
5026 * beginning of the function.
5029 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5030 else if (num_acts == 2)
5031 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5033 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5035 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5036 ice_aqc_opc_alloc_res, NULL);
5038 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5040 ice_free(hw, sw_buf);
5045 * ice_add_mac_with_sw_marker - add filter with sw marker
5046 * @hw: pointer to the hardware structure
5047 * @f_info: filter info structure containing the MAC filter information
5048 * @sw_marker: sw marker to tag the Rx descriptor with
5051 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5054 struct ice_fltr_mgmt_list_entry *m_entry;
5055 struct ice_fltr_list_entry fl_info;
5056 struct ice_sw_recipe *recp_list;
5057 struct LIST_HEAD_TYPE l_head;
5058 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5059 enum ice_status ret;
5063 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5064 return ICE_ERR_PARAM;
5066 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5067 return ICE_ERR_PARAM;
5069 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5070 return ICE_ERR_PARAM;
5072 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5073 return ICE_ERR_PARAM;
5074 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5076 /* Add filter if it doesn't exist so then the adding of large
5077 * action always results in update
5080 INIT_LIST_HEAD(&l_head);
5081 fl_info.fltr_info = *f_info;
5082 LIST_ADD(&fl_info.list_entry, &l_head);
5084 entry_exists = false;
5085 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5086 hw->port_info->lport);
5087 if (ret == ICE_ERR_ALREADY_EXISTS)
5088 entry_exists = true;
5092 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5093 rule_lock = &recp_list->filt_rule_lock;
5094 ice_acquire_lock(rule_lock);
5095 /* Get the book keeping entry for the filter */
5096 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5100 /* If counter action was enabled for this rule then don't enable
5101 * sw marker large action
5103 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5104 ret = ICE_ERR_PARAM;
5108 /* if same marker was added before */
5109 if (m_entry->sw_marker_id == sw_marker) {
5110 ret = ICE_ERR_ALREADY_EXISTS;
5114 /* Allocate a hardware table entry to hold large act. Three actions
5115 * for marker based large action
5117 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5121 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5124 /* Update the switch rule to add the marker action */
5125 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5127 ice_release_lock(rule_lock);
5132 ice_release_lock(rule_lock);
5133 /* only remove entry if it did not exist previously */
5135 ret = ice_remove_mac(hw, &l_head);
5141 * ice_add_mac_with_counter - add filter with counter enabled
5142 * @hw: pointer to the hardware structure
5143 * @f_info: pointer to filter info structure containing the MAC filter
5147 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5149 struct ice_fltr_mgmt_list_entry *m_entry;
5150 struct ice_fltr_list_entry fl_info;
5151 struct ice_sw_recipe *recp_list;
5152 struct LIST_HEAD_TYPE l_head;
5153 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5154 enum ice_status ret;
5159 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5160 return ICE_ERR_PARAM;
5162 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5163 return ICE_ERR_PARAM;
5165 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5166 return ICE_ERR_PARAM;
5167 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5168 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5170 entry_exist = false;
5172 rule_lock = &recp_list->filt_rule_lock;
5174 /* Add filter if it doesn't exist so then the adding of large
5175 * action always results in update
5177 INIT_LIST_HEAD(&l_head);
5179 fl_info.fltr_info = *f_info;
5180 LIST_ADD(&fl_info.list_entry, &l_head);
5182 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5183 hw->port_info->lport);
5184 if (ret == ICE_ERR_ALREADY_EXISTS)
5189 ice_acquire_lock(rule_lock);
5190 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5192 ret = ICE_ERR_BAD_PTR;
5196 /* Don't enable counter for a filter for which sw marker was enabled */
5197 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5198 ret = ICE_ERR_PARAM;
5202 /* If a counter was already enabled then don't need to add again */
5203 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5204 ret = ICE_ERR_ALREADY_EXISTS;
5208 /* Allocate a hardware table entry to VLAN counter */
5209 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5213 /* Allocate a hardware table entry to hold large act. Two actions for
5214 * counter based large action
5216 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5220 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5223 /* Update the switch rule to add the counter action */
5224 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5226 ice_release_lock(rule_lock);
5231 ice_release_lock(rule_lock);
5232 /* only remove entry if it did not exist previously */
5234 ret = ice_remove_mac(hw, &l_head);
5239 /* This is mapping table entry that maps every word within a given protocol
5240 * structure to the real byte offset as per the specification of that
5242 * for example dst address is 3 words in ethertype header and corresponding
5243 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5244 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5245 * matching entry describing its field. This needs to be updated if new
5246 * structure is added to that union.
5248 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5249 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5250 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5251 { ICE_ETYPE_OL, { 0 } },
5252 { ICE_VLAN_OFOS, { 0, 2 } },
5253 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5254 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5255 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5256 26, 28, 30, 32, 34, 36, 38 } },
5257 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5258 26, 28, 30, 32, 34, 36, 38 } },
5259 { ICE_TCP_IL, { 0, 2 } },
5260 { ICE_UDP_OF, { 0, 2 } },
5261 { ICE_UDP_ILOS, { 0, 2 } },
5262 { ICE_SCTP_IL, { 0, 2 } },
5263 { ICE_VXLAN, { 8, 10, 12, 14 } },
5264 { ICE_GENEVE, { 8, 10, 12, 14 } },
5265 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5266 { ICE_NVGRE, { 0, 2, 4, 6 } },
5267 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5268 { ICE_PPPOE, { 0, 2, 4, 6 } },
5269 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5270 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5271 { ICE_ESP, { 0, 2, 4, 6 } },
5272 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5273 { ICE_NAT_T, { 8, 10, 12, 14 } },
5276 /* The following table describes preferred grouping of recipes.
5277 * If a recipe that needs to be programmed is a superset or matches one of the
5278 * following combinations, then the recipe needs to be chained as per the
5282 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5283 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5284 { ICE_MAC_IL, ICE_MAC_IL_HW },
5285 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5286 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5287 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5288 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5289 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5290 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5291 { ICE_TCP_IL, ICE_TCP_IL_HW },
5292 { ICE_UDP_OF, ICE_UDP_OF_HW },
5293 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5294 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5295 { ICE_VXLAN, ICE_UDP_OF_HW },
5296 { ICE_GENEVE, ICE_UDP_OF_HW },
5297 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5298 { ICE_NVGRE, ICE_GRE_OF_HW },
5299 { ICE_GTP, ICE_UDP_OF_HW },
5300 { ICE_PPPOE, ICE_PPPOE_HW },
5301 { ICE_PFCP, ICE_UDP_ILOS_HW },
5302 { ICE_L2TPV3, ICE_L2TPV3_HW },
5303 { ICE_ESP, ICE_ESP_HW },
5304 { ICE_AH, ICE_AH_HW },
5305 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5309 * ice_find_recp - find a recipe
5310 * @hw: pointer to the hardware structure
5311 * @lkup_exts: extension sequence to match
5313 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5315 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5316 enum ice_sw_tunnel_type tun_type)
5318 bool refresh_required = true;
5319 struct ice_sw_recipe *recp;
5322 /* Walk through existing recipes to find a match */
5323 recp = hw->switch_info->recp_list;
5324 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5325 /* If recipe was not created for this ID, in SW bookkeeping,
5326 * check if FW has an entry for this recipe. If the FW has an
5327 * entry update it in our SW bookkeeping and continue with the
5330 if (!recp[i].recp_created)
5331 if (ice_get_recp_frm_fw(hw,
5332 hw->switch_info->recp_list, i,
5336 /* Skip inverse action recipes */
5337 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5338 ICE_AQ_RECIPE_ACT_INV_ACT)
5341 /* if number of words we are looking for match */
5342 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5343 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5344 struct ice_fv_word *be = lkup_exts->fv_words;
5345 u16 *cr = recp[i].lkup_exts.field_mask;
5346 u16 *de = lkup_exts->field_mask;
5350 /* ar, cr, and qr are related to the recipe words, while
5351 * be, de and pe are related to the lookup words
5353 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5354 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5356 if (ar[qr].off == be[pe].off &&
5357 ar[qr].prot_id == be[pe].prot_id &&
5359 /* Found the "pe"th word in the
5364 /* After walking through all the words in the
5365 * "i"th recipe if "p"th word was not found then
5366 * this recipe is not what we are looking for.
5367 * So break out from this loop and try the next
5370 if (qr >= recp[i].lkup_exts.n_val_words) {
5375 /* If for "i"th recipe the found was never set to false
5376 * then it means we found our match
5378 if ((tun_type == recp[i].tun_type ||
5379 tun_type == ICE_SW_TUN_AND_NON_TUN) && found)
5380 return i; /* Return the recipe ID */
5383 return ICE_MAX_NUM_RECIPES;
5387 * ice_prot_type_to_id - get protocol ID from protocol type
5388 * @type: protocol type
5389 * @id: pointer to variable that will receive the ID
5391 * Returns true if found, false otherwise
5393 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5397 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5398 if (ice_prot_id_tbl[i].type == type) {
5399 *id = ice_prot_id_tbl[i].protocol_id;
5406 * ice_find_valid_words - count valid words
5407 * @rule: advanced rule with lookup information
5408 * @lkup_exts: byte offset extractions of the words that are valid
5410 * calculate valid words in a lookup rule using mask value
5413 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5414 struct ice_prot_lkup_ext *lkup_exts)
5416 u8 j, word, prot_id, ret_val;
5418 if (!ice_prot_type_to_id(rule->type, &prot_id))
5421 word = lkup_exts->n_val_words;
5423 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5424 if (((u16 *)&rule->m_u)[j] &&
5425 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5426 /* No more space to accommodate */
5427 if (word >= ICE_MAX_CHAIN_WORDS)
5429 lkup_exts->fv_words[word].off =
5430 ice_prot_ext[rule->type].offs[j];
5431 lkup_exts->fv_words[word].prot_id =
5432 ice_prot_id_tbl[rule->type].protocol_id;
5433 lkup_exts->field_mask[word] =
5434 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5438 ret_val = word - lkup_exts->n_val_words;
5439 lkup_exts->n_val_words = word;
5445 * ice_create_first_fit_recp_def - Create a recipe grouping
5446 * @hw: pointer to the hardware structure
5447 * @lkup_exts: an array of protocol header extractions
5448 * @rg_list: pointer to a list that stores new recipe groups
5449 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5451 * Using first fit algorithm, take all the words that are still not done
5452 * and start grouping them in 4-word groups. Each group makes up one
5455 static enum ice_status
5456 ice_create_first_fit_recp_def(struct ice_hw *hw,
5457 struct ice_prot_lkup_ext *lkup_exts,
5458 struct LIST_HEAD_TYPE *rg_list,
5461 struct ice_pref_recipe_group *grp = NULL;
5466 if (!lkup_exts->n_val_words) {
5467 struct ice_recp_grp_entry *entry;
5469 entry = (struct ice_recp_grp_entry *)
5470 ice_malloc(hw, sizeof(*entry));
5472 return ICE_ERR_NO_MEMORY;
5473 LIST_ADD(&entry->l_entry, rg_list);
5474 grp = &entry->r_group;
5476 grp->n_val_pairs = 0;
5479 /* Walk through every word in the rule to check if it is not done. If so
5480 * then this word needs to be part of a new recipe.
5482 for (j = 0; j < lkup_exts->n_val_words; j++)
5483 if (!ice_is_bit_set(lkup_exts->done, j)) {
5485 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5486 struct ice_recp_grp_entry *entry;
5488 entry = (struct ice_recp_grp_entry *)
5489 ice_malloc(hw, sizeof(*entry));
5491 return ICE_ERR_NO_MEMORY;
5492 LIST_ADD(&entry->l_entry, rg_list);
5493 grp = &entry->r_group;
5497 grp->pairs[grp->n_val_pairs].prot_id =
5498 lkup_exts->fv_words[j].prot_id;
5499 grp->pairs[grp->n_val_pairs].off =
5500 lkup_exts->fv_words[j].off;
5501 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5509 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5510 * @hw: pointer to the hardware structure
5511 * @fv_list: field vector with the extraction sequence information
5512 * @rg_list: recipe groupings with protocol-offset pairs
5514 * Helper function to fill in the field vector indices for protocol-offset
5515 * pairs. These indexes are then ultimately programmed into a recipe.
5517 static enum ice_status
5518 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5519 struct LIST_HEAD_TYPE *rg_list)
5521 struct ice_sw_fv_list_entry *fv;
5522 struct ice_recp_grp_entry *rg;
5523 struct ice_fv_word *fv_ext;
5525 if (LIST_EMPTY(fv_list))
5528 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5529 fv_ext = fv->fv_ptr->ew;
5531 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5534 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5535 struct ice_fv_word *pr;
5540 pr = &rg->r_group.pairs[i];
5541 mask = rg->r_group.mask[i];
5543 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5544 if (fv_ext[j].prot_id == pr->prot_id &&
5545 fv_ext[j].off == pr->off) {
5548 /* Store index of field vector */
5550 rg->fv_mask[i] = mask;
5554 /* Protocol/offset could not be found, caller gave an
5558 return ICE_ERR_PARAM;
5566 * ice_find_free_recp_res_idx - find free result indexes for recipe
5567 * @hw: pointer to hardware structure
5568 * @profiles: bitmap of profiles that will be associated with the new recipe
5569 * @free_idx: pointer to variable to receive the free index bitmap
5571 * The algorithm used here is:
5572 * 1. When creating a new recipe, create a set P which contains all
5573 * Profiles that will be associated with our new recipe
5575 * 2. For each Profile p in set P:
5576 * a. Add all recipes associated with Profile p into set R
5577 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5578 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5579 * i. Or just assume they all have the same possible indexes:
5581 * i.e., PossibleIndexes = 0x0000F00000000000
5583 * 3. For each Recipe r in set R:
5584 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5585 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5587 * FreeIndexes will contain the bits indicating the indexes free for use,
5588 * then the code needs to update the recipe[r].used_result_idx_bits to
5589 * indicate which indexes were selected for use by this recipe.
5592 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5593 ice_bitmap_t *free_idx)
5595 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5596 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5597 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5601 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5602 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5603 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5604 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5606 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5607 ice_set_bit(count, possible_idx);
5609 /* For each profile we are going to associate the recipe with, add the
5610 * recipes that are associated with that profile. This will give us
5611 * the set of recipes that our recipe may collide with. Also, determine
5612 * what possible result indexes are usable given this set of profiles.
5615 while (ICE_MAX_NUM_PROFILES >
5616 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5617 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5618 ICE_MAX_NUM_RECIPES);
5619 ice_and_bitmap(possible_idx, possible_idx,
5620 hw->switch_info->prof_res_bm[bit],
5625 /* For each recipe that our new recipe may collide with, determine
5626 * which indexes have been used.
5628 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5629 if (ice_is_bit_set(recipes, bit)) {
5630 ice_or_bitmap(used_idx, used_idx,
5631 hw->switch_info->recp_list[bit].res_idxs,
5635 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5637 /* return number of free indexes */
5640 while (ICE_MAX_FV_WORDS >
5641 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5650 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5651 * @hw: pointer to hardware structure
5652 * @rm: recipe management list entry
5653 * @match_tun: if field vector index for tunnel needs to be programmed
5654 * @profiles: bitmap of profiles that will be assocated.
5656 static enum ice_status
5657 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5658 bool match_tun, ice_bitmap_t *profiles)
5660 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5661 struct ice_aqc_recipe_data_elem *tmp;
5662 struct ice_aqc_recipe_data_elem *buf;
5663 struct ice_recp_grp_entry *entry;
5664 enum ice_status status;
5670 /* When more than one recipe are required, another recipe is needed to
5671 * chain them together. Matching a tunnel metadata ID takes up one of
5672 * the match fields in the chaining recipe reducing the number of
5673 * chained recipes by one.
5675 /* check number of free result indices */
5676 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5677 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5679 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5680 free_res_idx, rm->n_grp_count);
5682 if (rm->n_grp_count > 1) {
5683 if (rm->n_grp_count > free_res_idx)
5684 return ICE_ERR_MAX_LIMIT;
5689 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5690 return ICE_ERR_MAX_LIMIT;
5692 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5693 ICE_MAX_NUM_RECIPES,
5696 return ICE_ERR_NO_MEMORY;
5698 buf = (struct ice_aqc_recipe_data_elem *)
5699 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5701 status = ICE_ERR_NO_MEMORY;
5705 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5706 recipe_count = ICE_MAX_NUM_RECIPES;
5707 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5709 if (status || recipe_count == 0)
5712 /* Allocate the recipe resources, and configure them according to the
5713 * match fields from protocol headers and extracted field vectors.
5715 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5716 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5719 status = ice_alloc_recipe(hw, &entry->rid);
5723 /* Clear the result index of the located recipe, as this will be
5724 * updated, if needed, later in the recipe creation process.
5726 tmp[0].content.result_indx = 0;
5728 buf[recps] = tmp[0];
5729 buf[recps].recipe_indx = (u8)entry->rid;
5730 /* if the recipe is a non-root recipe RID should be programmed
5731 * as 0 for the rules to be applied correctly.
5733 buf[recps].content.rid = 0;
5734 ice_memset(&buf[recps].content.lkup_indx, 0,
5735 sizeof(buf[recps].content.lkup_indx),
5738 /* All recipes use look-up index 0 to match switch ID. */
5739 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5740 buf[recps].content.mask[0] =
5741 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5742 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5745 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5746 buf[recps].content.lkup_indx[i] = 0x80;
5747 buf[recps].content.mask[i] = 0;
5750 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5751 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5752 buf[recps].content.mask[i + 1] =
5753 CPU_TO_LE16(entry->fv_mask[i]);
5756 if (rm->n_grp_count > 1) {
5757 /* Checks to see if there really is a valid result index
5760 if (chain_idx >= ICE_MAX_FV_WORDS) {
5761 ice_debug(hw, ICE_DBG_SW,
5762 "No chain index available\n");
5763 status = ICE_ERR_MAX_LIMIT;
5767 entry->chain_idx = chain_idx;
5768 buf[recps].content.result_indx =
5769 ICE_AQ_RECIPE_RESULT_EN |
5770 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5771 ICE_AQ_RECIPE_RESULT_DATA_M);
5772 ice_clear_bit(chain_idx, result_idx_bm);
5773 chain_idx = ice_find_first_bit(result_idx_bm,
5777 /* fill recipe dependencies */
5778 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5779 ICE_MAX_NUM_RECIPES);
5780 ice_set_bit(buf[recps].recipe_indx,
5781 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5782 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5786 if (rm->n_grp_count == 1) {
5787 rm->root_rid = buf[0].recipe_indx;
5788 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5789 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5790 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5791 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5792 sizeof(buf[0].recipe_bitmap),
5793 ICE_NONDMA_TO_NONDMA);
5795 status = ICE_ERR_BAD_PTR;
5798 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5799 * the recipe which is getting created if specified
5800 * by user. Usually any advanced switch filter, which results
5801 * into new extraction sequence, ended up creating a new recipe
5802 * of type ROOT and usually recipes are associated with profiles
5803 * Switch rule referreing newly created recipe, needs to have
5804 * either/or 'fwd' or 'join' priority, otherwise switch rule
5805 * evaluation will not happen correctly. In other words, if
5806 * switch rule to be evaluated on priority basis, then recipe
5807 * needs to have priority, otherwise it will be evaluated last.
5809 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5811 struct ice_recp_grp_entry *last_chain_entry;
5814 /* Allocate the last recipe that will chain the outcomes of the
5815 * other recipes together
5817 status = ice_alloc_recipe(hw, &rid);
5821 buf[recps].recipe_indx = (u8)rid;
5822 buf[recps].content.rid = (u8)rid;
5823 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5824 /* the new entry created should also be part of rg_list to
5825 * make sure we have complete recipe
5827 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5828 sizeof(*last_chain_entry));
5829 if (!last_chain_entry) {
5830 status = ICE_ERR_NO_MEMORY;
5833 last_chain_entry->rid = rid;
5834 ice_memset(&buf[recps].content.lkup_indx, 0,
5835 sizeof(buf[recps].content.lkup_indx),
5837 /* All recipes use look-up index 0 to match switch ID. */
5838 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5839 buf[recps].content.mask[0] =
5840 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5841 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5842 buf[recps].content.lkup_indx[i] =
5843 ICE_AQ_RECIPE_LKUP_IGNORE;
5844 buf[recps].content.mask[i] = 0;
5848 /* update r_bitmap with the recp that is used for chaining */
5849 ice_set_bit(rid, rm->r_bitmap);
5850 /* this is the recipe that chains all the other recipes so it
5851 * should not have a chaining ID to indicate the same
5853 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5854 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5856 last_chain_entry->fv_idx[i] = entry->chain_idx;
5857 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5858 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5859 ice_set_bit(entry->rid, rm->r_bitmap);
5861 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5862 if (sizeof(buf[recps].recipe_bitmap) >=
5863 sizeof(rm->r_bitmap)) {
5864 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5865 sizeof(buf[recps].recipe_bitmap),
5866 ICE_NONDMA_TO_NONDMA);
5868 status = ICE_ERR_BAD_PTR;
5871 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5873 /* To differentiate among different UDP tunnels, a meta data ID
5877 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5878 buf[recps].content.mask[i] =
5879 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5883 rm->root_rid = (u8)rid;
5885 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5889 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5890 ice_release_change_lock(hw);
5894 /* Every recipe that just got created add it to the recipe
5897 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5898 struct ice_switch_info *sw = hw->switch_info;
5899 bool is_root, idx_found = false;
5900 struct ice_sw_recipe *recp;
5901 u16 idx, buf_idx = 0;
5903 /* find buffer index for copying some data */
5904 for (idx = 0; idx < rm->n_grp_count; idx++)
5905 if (buf[idx].recipe_indx == entry->rid) {
5911 status = ICE_ERR_OUT_OF_RANGE;
5915 recp = &sw->recp_list[entry->rid];
5916 is_root = (rm->root_rid == entry->rid);
5917 recp->is_root = is_root;
5919 recp->root_rid = entry->rid;
5920 recp->big_recp = (is_root && rm->n_grp_count > 1);
5922 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5923 entry->r_group.n_val_pairs *
5924 sizeof(struct ice_fv_word),
5925 ICE_NONDMA_TO_NONDMA);
5927 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5928 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5930 /* Copy non-result fv index values and masks to recipe. This
5931 * call will also update the result recipe bitmask.
5933 ice_collect_result_idx(&buf[buf_idx], recp);
5935 /* for non-root recipes, also copy to the root, this allows
5936 * easier matching of a complete chained recipe
5939 ice_collect_result_idx(&buf[buf_idx],
5940 &sw->recp_list[rm->root_rid]);
5942 recp->n_ext_words = entry->r_group.n_val_pairs;
5943 recp->chain_idx = entry->chain_idx;
5944 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5945 recp->n_grp_count = rm->n_grp_count;
5946 recp->tun_type = rm->tun_type;
5947 recp->recp_created = true;
5962 * ice_create_recipe_group - creates recipe group
5963 * @hw: pointer to hardware structure
5964 * @rm: recipe management list entry
5965 * @lkup_exts: lookup elements
5967 static enum ice_status
5968 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5969 struct ice_prot_lkup_ext *lkup_exts)
5971 enum ice_status status;
5974 rm->n_grp_count = 0;
5976 /* Create recipes for words that are marked not done by packing them
5979 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5980 &rm->rg_list, &recp_count);
5982 rm->n_grp_count += recp_count;
5983 rm->n_ext_words = lkup_exts->n_val_words;
5984 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5985 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5986 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5987 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5994 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5995 * @hw: pointer to hardware structure
5996 * @lkups: lookup elements or match criteria for the advanced recipe, one
5997 * structure per protocol header
5998 * @lkups_cnt: number of protocols
5999 * @bm: bitmap of field vectors to consider
6000 * @fv_list: pointer to a list that holds the returned field vectors
6002 static enum ice_status
6003 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6004 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6006 enum ice_status status;
6013 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6015 return ICE_ERR_NO_MEMORY;
6017 for (i = 0; i < lkups_cnt; i++)
6018 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6019 status = ICE_ERR_CFG;
6023 /* Find field vectors that include all specified protocol types */
6024 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6027 ice_free(hw, prot_ids);
6032 * ice_tun_type_match_mask - determine if tun type needs a match mask
6033 * @tun_type: tunnel type
6034 * @mask: mask to be used for the tunnel
6036 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6039 case ICE_SW_TUN_VXLAN_GPE:
6040 case ICE_SW_TUN_NVGRE:
6041 case ICE_SW_TUN_UDP:
6042 case ICE_ALL_TUNNELS:
6043 *mask = ICE_TUN_FLAG_MASK;
6053 * ice_add_special_words - Add words that are not protocols, such as metadata
6054 * @rinfo: other information regarding the rule e.g. priority and action info
6055 * @lkup_exts: lookup word structure
6057 static enum ice_status
6058 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6059 struct ice_prot_lkup_ext *lkup_exts)
6063 /* If this is a tunneled packet, then add recipe index to match the
6064 * tunnel bit in the packet metadata flags.
6066 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6067 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6068 u8 word = lkup_exts->n_val_words++;
6070 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6071 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6072 lkup_exts->field_mask[word] = mask;
6074 return ICE_ERR_MAX_LIMIT;
6081 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6082 * @hw: pointer to hardware structure
6083 * @rinfo: other information regarding the rule e.g. priority and action info
6084 * @bm: pointer to memory for returning the bitmap of field vectors
6087 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6090 enum ice_prof_type prof_type;
6092 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6094 switch (rinfo->tun_type) {
6096 prof_type = ICE_PROF_NON_TUN;
6098 case ICE_ALL_TUNNELS:
6099 prof_type = ICE_PROF_TUN_ALL;
6101 case ICE_SW_TUN_VXLAN_GPE:
6102 case ICE_SW_TUN_GENEVE:
6103 case ICE_SW_TUN_VXLAN:
6104 case ICE_SW_TUN_UDP:
6105 case ICE_SW_TUN_GTP:
6106 prof_type = ICE_PROF_TUN_UDP;
6108 case ICE_SW_TUN_NVGRE:
6109 prof_type = ICE_PROF_TUN_GRE;
6111 case ICE_SW_TUN_PPPOE:
6112 prof_type = ICE_PROF_TUN_PPPOE;
6114 case ICE_SW_TUN_PROFID_IPV6_ESP:
6115 case ICE_SW_TUN_IPV6_ESP:
6116 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6118 case ICE_SW_TUN_PROFID_IPV6_AH:
6119 case ICE_SW_TUN_IPV6_AH:
6120 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6122 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6123 case ICE_SW_TUN_IPV6_L2TPV3:
6124 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6126 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6127 case ICE_SW_TUN_IPV6_NAT_T:
6128 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6130 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6131 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6133 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6134 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6136 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6137 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6139 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6140 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6142 case ICE_SW_TUN_IPV4_NAT_T:
6143 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6145 case ICE_SW_TUN_IPV4_L2TPV3:
6146 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6148 case ICE_SW_TUN_IPV4_ESP:
6149 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6151 case ICE_SW_TUN_IPV4_AH:
6152 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6154 case ICE_SW_TUN_AND_NON_TUN:
6156 prof_type = ICE_PROF_ALL;
6160 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6164 * ice_is_prof_rule - determine if rule type is a profile rule
6165 * @type: the rule type
6167 * if the rule type is a profile rule, that means that there no field value
6168 * match required, in this case just a profile hit is required.
6170 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6173 case ICE_SW_TUN_PROFID_IPV6_ESP:
6174 case ICE_SW_TUN_PROFID_IPV6_AH:
6175 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6176 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6177 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6178 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6179 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6180 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6190 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6191 * @hw: pointer to hardware structure
6192 * @lkups: lookup elements or match criteria for the advanced recipe, one
6193 * structure per protocol header
6194 * @lkups_cnt: number of protocols
6195 * @rinfo: other information regarding the rule e.g. priority and action info
6196 * @rid: return the recipe ID of the recipe created
6198 static enum ice_status
6199 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6200 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6202 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6203 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6204 struct ice_prot_lkup_ext *lkup_exts;
6205 struct ice_recp_grp_entry *r_entry;
6206 struct ice_sw_fv_list_entry *fvit;
6207 struct ice_recp_grp_entry *r_tmp;
6208 struct ice_sw_fv_list_entry *tmp;
6209 enum ice_status status = ICE_SUCCESS;
6210 struct ice_sw_recipe *rm;
6211 bool match_tun = false;
6215 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6216 return ICE_ERR_PARAM;
6218 lkup_exts = (struct ice_prot_lkup_ext *)
6219 ice_malloc(hw, sizeof(*lkup_exts));
6221 return ICE_ERR_NO_MEMORY;
6223 /* Determine the number of words to be matched and if it exceeds a
6224 * recipe's restrictions
6226 for (i = 0; i < lkups_cnt; i++) {
6229 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6230 status = ICE_ERR_CFG;
6231 goto err_free_lkup_exts;
6234 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6236 status = ICE_ERR_CFG;
6237 goto err_free_lkup_exts;
6241 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6243 status = ICE_ERR_NO_MEMORY;
6244 goto err_free_lkup_exts;
6247 /* Get field vectors that contain fields extracted from all the protocol
6248 * headers being programmed.
6250 INIT_LIST_HEAD(&rm->fv_list);
6251 INIT_LIST_HEAD(&rm->rg_list);
6253 /* Get bitmap of field vectors (profiles) that are compatible with the
6254 * rule request; only these will be searched in the subsequent call to
6257 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6259 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6263 /* Group match words into recipes using preferred recipe grouping
6266 status = ice_create_recipe_group(hw, rm, lkup_exts);
6270 /* For certain tunnel types it is necessary to use a metadata ID flag to
6271 * differentiate different tunnel types. A separate recipe needs to be
6272 * used for the metadata.
6274 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6275 rm->n_grp_count > 1)
6278 /* set the recipe priority if specified */
6279 rm->priority = (u8)rinfo->priority;
6281 /* Find offsets from the field vector. Pick the first one for all the
6284 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6288 /* An empty FV list means to use all the profiles returned in the
6291 if (LIST_EMPTY(&rm->fv_list)) {
6294 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6295 if (ice_is_bit_set(fv_bitmap, j)) {
6296 struct ice_sw_fv_list_entry *fvl;
6298 fvl = (struct ice_sw_fv_list_entry *)
6299 ice_malloc(hw, sizeof(*fvl));
6303 fvl->profile_id = j;
6304 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6308 /* get bitmap of all profiles the recipe will be associated with */
6309 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6310 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6312 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6313 ice_set_bit((u16)fvit->profile_id, profiles);
6316 /* Create any special protocol/offset pairs, such as looking at tunnel
6317 * bits by extracting metadata
6319 status = ice_add_special_words(rinfo, lkup_exts);
6321 goto err_free_lkup_exts;
6323 /* Look for a recipe which matches our requested fv / mask list */
6324 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6325 if (*rid < ICE_MAX_NUM_RECIPES)
6326 /* Success if found a recipe that match the existing criteria */
6329 rm->tun_type = rinfo->tun_type;
6330 /* Recipe we need does not exist, add a recipe */
6331 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
6335 /* Associate all the recipes created with all the profiles in the
6336 * common field vector.
6338 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6340 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6343 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6344 (u8 *)r_bitmap, NULL);
6348 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6349 ICE_MAX_NUM_RECIPES);
6350 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6354 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6357 ice_release_change_lock(hw);
6362 /* Update profile to recipe bitmap array */
6363 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6364 ICE_MAX_NUM_RECIPES);
6366 /* Update recipe to profile bitmap array */
6367 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6368 if (ice_is_bit_set(r_bitmap, j))
6369 ice_set_bit((u16)fvit->profile_id,
6370 recipe_to_profile[j]);
6373 *rid = rm->root_rid;
6374 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6375 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6377 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6378 ice_recp_grp_entry, l_entry) {
6379 LIST_DEL(&r_entry->l_entry);
6380 ice_free(hw, r_entry);
6383 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6385 LIST_DEL(&fvit->list_entry);
6390 ice_free(hw, rm->root_buf);
6395 ice_free(hw, lkup_exts);
6401 * ice_find_dummy_packet - find dummy packet by tunnel type
6403 * @lkups: lookup elements or match criteria for the advanced recipe, one
6404 * structure per protocol header
6405 * @lkups_cnt: number of protocols
6406 * @tun_type: tunnel type from the match criteria
6407 * @pkt: dummy packet to fill according to filter match criteria
6408 * @pkt_len: packet length of dummy packet
6409 * @offsets: pointer to receive the pointer to the offsets for the packet
6412 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6413 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6415 const struct ice_dummy_pkt_offsets **offsets)
6417 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6421 for (i = 0; i < lkups_cnt; i++) {
6422 if (lkups[i].type == ICE_UDP_ILOS)
6424 else if (lkups[i].type == ICE_TCP_IL)
6426 else if (lkups[i].type == ICE_IPV6_OFOS)
6428 else if (lkups[i].type == ICE_VLAN_OFOS)
6430 else if (lkups[i].type == ICE_IPV4_OFOS &&
6431 lkups[i].h_u.ipv4_hdr.protocol ==
6432 ICE_IPV4_NVGRE_PROTO_ID &&
6433 lkups[i].m_u.ipv4_hdr.protocol ==
6436 else if (lkups[i].type == ICE_PPPOE &&
6437 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6438 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6439 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6442 else if (lkups[i].type == ICE_ETYPE_OL &&
6443 lkups[i].h_u.ethertype.ethtype_id ==
6444 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6445 lkups[i].m_u.ethertype.ethtype_id ==
6450 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6451 *pkt = dummy_ipv4_esp_pkt;
6452 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6453 *offsets = dummy_ipv4_esp_packet_offsets;
6457 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6458 *pkt = dummy_ipv6_esp_pkt;
6459 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6460 *offsets = dummy_ipv6_esp_packet_offsets;
6464 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6465 *pkt = dummy_ipv4_ah_pkt;
6466 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6467 *offsets = dummy_ipv4_ah_packet_offsets;
6471 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6472 *pkt = dummy_ipv6_ah_pkt;
6473 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6474 *offsets = dummy_ipv6_ah_packet_offsets;
6478 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6479 *pkt = dummy_ipv4_nat_pkt;
6480 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6481 *offsets = dummy_ipv4_nat_packet_offsets;
6485 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6486 *pkt = dummy_ipv6_nat_pkt;
6487 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6488 *offsets = dummy_ipv6_nat_packet_offsets;
6492 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6493 *pkt = dummy_ipv4_l2tpv3_pkt;
6494 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6495 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6499 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6500 *pkt = dummy_ipv6_l2tpv3_pkt;
6501 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6502 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6506 if (tun_type == ICE_SW_TUN_GTP) {
6507 *pkt = dummy_udp_gtp_packet;
6508 *pkt_len = sizeof(dummy_udp_gtp_packet);
6509 *offsets = dummy_udp_gtp_packet_offsets;
6512 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6513 *pkt = dummy_pppoe_ipv6_packet;
6514 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6515 *offsets = dummy_pppoe_packet_offsets;
6517 } else if (tun_type == ICE_SW_TUN_PPPOE) {
6518 *pkt = dummy_pppoe_ipv4_packet;
6519 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6520 *offsets = dummy_pppoe_packet_offsets;
6524 if (tun_type == ICE_ALL_TUNNELS) {
6525 *pkt = dummy_gre_udp_packet;
6526 *pkt_len = sizeof(dummy_gre_udp_packet);
6527 *offsets = dummy_gre_udp_packet_offsets;
6531 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6533 *pkt = dummy_gre_tcp_packet;
6534 *pkt_len = sizeof(dummy_gre_tcp_packet);
6535 *offsets = dummy_gre_tcp_packet_offsets;
6539 *pkt = dummy_gre_udp_packet;
6540 *pkt_len = sizeof(dummy_gre_udp_packet);
6541 *offsets = dummy_gre_udp_packet_offsets;
6545 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6546 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
6548 *pkt = dummy_udp_tun_tcp_packet;
6549 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6550 *offsets = dummy_udp_tun_tcp_packet_offsets;
6554 *pkt = dummy_udp_tun_udp_packet;
6555 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6556 *offsets = dummy_udp_tun_udp_packet_offsets;
6562 *pkt = dummy_vlan_udp_packet;
6563 *pkt_len = sizeof(dummy_vlan_udp_packet);
6564 *offsets = dummy_vlan_udp_packet_offsets;
6567 *pkt = dummy_udp_packet;
6568 *pkt_len = sizeof(dummy_udp_packet);
6569 *offsets = dummy_udp_packet_offsets;
6571 } else if (udp && ipv6) {
6573 *pkt = dummy_vlan_udp_ipv6_packet;
6574 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6575 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6578 *pkt = dummy_udp_ipv6_packet;
6579 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6580 *offsets = dummy_udp_ipv6_packet_offsets;
6582 } else if ((tcp && ipv6) || ipv6) {
6584 *pkt = dummy_vlan_tcp_ipv6_packet;
6585 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6586 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6589 *pkt = dummy_tcp_ipv6_packet;
6590 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6591 *offsets = dummy_tcp_ipv6_packet_offsets;
6596 *pkt = dummy_vlan_tcp_packet;
6597 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6598 *offsets = dummy_vlan_tcp_packet_offsets;
6600 *pkt = dummy_tcp_packet;
6601 *pkt_len = sizeof(dummy_tcp_packet);
6602 *offsets = dummy_tcp_packet_offsets;
6607 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6609 * @lkups: lookup elements or match criteria for the advanced recipe, one
6610 * structure per protocol header
6611 * @lkups_cnt: number of protocols
6612 * @s_rule: stores rule information from the match criteria
6613 * @dummy_pkt: dummy packet to fill according to filter match criteria
6614 * @pkt_len: packet length of dummy packet
6615 * @offsets: offset info for the dummy packet
6617 static enum ice_status
6618 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6619 struct ice_aqc_sw_rules_elem *s_rule,
6620 const u8 *dummy_pkt, u16 pkt_len,
6621 const struct ice_dummy_pkt_offsets *offsets)
6626 /* Start with a packet with a pre-defined/dummy content. Then, fill
6627 * in the header values to be looked up or matched.
6629 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6631 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6633 for (i = 0; i < lkups_cnt; i++) {
6634 enum ice_protocol_type type;
6635 u16 offset = 0, len = 0, j;
6638 /* find the start of this layer; it should be found since this
6639 * was already checked when search for the dummy packet
6641 type = lkups[i].type;
6642 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6643 if (type == offsets[j].type) {
6644 offset = offsets[j].offset;
6649 /* this should never happen in a correct calling sequence */
6651 return ICE_ERR_PARAM;
6653 switch (lkups[i].type) {
6656 len = sizeof(struct ice_ether_hdr);
6659 len = sizeof(struct ice_ethtype_hdr);
6662 len = sizeof(struct ice_vlan_hdr);
6666 len = sizeof(struct ice_ipv4_hdr);
6670 len = sizeof(struct ice_ipv6_hdr);
6675 len = sizeof(struct ice_l4_hdr);
6678 len = sizeof(struct ice_sctp_hdr);
6681 len = sizeof(struct ice_nvgre);
6686 len = sizeof(struct ice_udp_tnl_hdr);
6690 len = sizeof(struct ice_udp_gtp_hdr);
6693 len = sizeof(struct ice_pppoe_hdr);
6696 len = sizeof(struct ice_esp_hdr);
6699 len = sizeof(struct ice_nat_t_hdr);
6702 len = sizeof(struct ice_ah_hdr);
6705 len = sizeof(struct ice_l2tpv3_sess_hdr);
6708 return ICE_ERR_PARAM;
6711 /* the length should be a word multiple */
6712 if (len % ICE_BYTES_PER_WORD)
6715 /* We have the offset to the header start, the length, the
6716 * caller's header values and mask. Use this information to
6717 * copy the data into the dummy packet appropriately based on
6718 * the mask. Note that we need to only write the bits as
6719 * indicated by the mask to make sure we don't improperly write
6720 * over any significant packet data.
6722 for (j = 0; j < len / sizeof(u16); j++)
6723 if (((u16 *)&lkups[i].m_u)[j])
6724 ((u16 *)(pkt + offset))[j] =
6725 (((u16 *)(pkt + offset))[j] &
6726 ~((u16 *)&lkups[i].m_u)[j]) |
6727 (((u16 *)&lkups[i].h_u)[j] &
6728 ((u16 *)&lkups[i].m_u)[j]);
6731 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6737 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6738 * @hw: pointer to the hardware structure
6739 * @tun_type: tunnel type
6740 * @pkt: dummy packet to fill in
6741 * @offsets: offset info for the dummy packet
6743 static enum ice_status
6744 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6745 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6750 case ICE_SW_TUN_AND_NON_TUN:
6751 case ICE_SW_TUN_VXLAN_GPE:
6752 case ICE_SW_TUN_VXLAN:
6753 case ICE_SW_TUN_UDP:
6754 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6758 case ICE_SW_TUN_GENEVE:
6759 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6764 /* Nothing needs to be done for this tunnel type */
6768 /* Find the outer UDP protocol header and insert the port number */
6769 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6770 if (offsets[i].type == ICE_UDP_OF) {
6771 struct ice_l4_hdr *hdr;
6774 offset = offsets[i].offset;
6775 hdr = (struct ice_l4_hdr *)&pkt[offset];
6776 hdr->dst_port = CPU_TO_BE16(open_port);
6786 * ice_find_adv_rule_entry - Search a rule entry
6787 * @hw: pointer to the hardware structure
6788 * @lkups: lookup elements or match criteria for the advanced recipe, one
6789 * structure per protocol header
6790 * @lkups_cnt: number of protocols
6791 * @recp_id: recipe ID for which we are finding the rule
6792 * @rinfo: other information regarding the rule e.g. priority and action info
6794 * Helper function to search for a given advance rule entry
6795 * Returns pointer to entry storing the rule if found
6797 static struct ice_adv_fltr_mgmt_list_entry *
6798 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6799 u16 lkups_cnt, u16 recp_id,
6800 struct ice_adv_rule_info *rinfo)
6802 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6803 struct ice_switch_info *sw = hw->switch_info;
6806 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6807 ice_adv_fltr_mgmt_list_entry, list_entry) {
6808 bool lkups_matched = true;
6810 if (lkups_cnt != list_itr->lkups_cnt)
6812 for (i = 0; i < list_itr->lkups_cnt; i++)
6813 if (memcmp(&list_itr->lkups[i], &lkups[i],
6815 lkups_matched = false;
6818 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6819 rinfo->tun_type == list_itr->rule_info.tun_type &&
6827 * ice_adv_add_update_vsi_list
6828 * @hw: pointer to the hardware structure
6829 * @m_entry: pointer to current adv filter management list entry
6830 * @cur_fltr: filter information from the book keeping entry
6831 * @new_fltr: filter information with the new VSI to be added
6833 * Call AQ command to add or update previously created VSI list with new VSI.
6835 * Helper function to do book keeping associated with adding filter information
6836 * The algorithm to do the booking keeping is described below :
6837 * When a VSI needs to subscribe to a given advanced filter
6838 * if only one VSI has been added till now
6839 * Allocate a new VSI list and add two VSIs
6840 * to this list using switch rule command
6841 * Update the previously created switch rule with the
6842 * newly created VSI list ID
6843 * if a VSI list was previously created
6844 * Add the new VSI to the previously created VSI list set
6845 * using the update switch rule command
6847 static enum ice_status
6848 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6849 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6850 struct ice_adv_rule_info *cur_fltr,
6851 struct ice_adv_rule_info *new_fltr)
6853 enum ice_status status;
6854 u16 vsi_list_id = 0;
6856 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6857 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6858 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6859 return ICE_ERR_NOT_IMPL;
6861 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6862 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6863 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6864 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6865 return ICE_ERR_NOT_IMPL;
6867 /* Workaround fix for unexpected rule deletion by kernel PF
6870 if (new_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI &&
6871 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI)
6872 return ICE_ERR_NOT_IMPL;
6874 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6875 /* Only one entry existed in the mapping and it was not already
6876 * a part of a VSI list. So, create a VSI list with the old and
6879 struct ice_fltr_info tmp_fltr;
6880 u16 vsi_handle_arr[2];
6882 /* A rule already exists with the new VSI being added */
6883 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6884 new_fltr->sw_act.fwd_id.hw_vsi_id)
6885 return ICE_ERR_ALREADY_EXISTS;
6887 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6888 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6889 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6895 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6896 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
6897 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6898 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6899 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6900 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6902 /* Update the previous switch rule of "forward to VSI" to
6905 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6909 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6910 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6911 m_entry->vsi_list_info =
6912 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6915 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6917 if (!m_entry->vsi_list_info)
6920 /* A rule already exists with the new VSI being added */
6921 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6924 /* Update the previously created VSI list set with
6925 * the new VSI ID passed in
6927 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6929 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6931 ice_aqc_opc_update_sw_rules,
6933 /* update VSI list mapping info with new VSI ID */
6935 ice_set_bit(vsi_handle,
6936 m_entry->vsi_list_info->vsi_map);
6939 m_entry->vsi_count++;
6944 * ice_add_adv_rule - helper function to create an advanced switch rule
6945 * @hw: pointer to the hardware structure
6946 * @lkups: information on the words that needs to be looked up. All words
6947 * together makes one recipe
6948 * @lkups_cnt: num of entries in the lkups array
6949 * @rinfo: other information related to the rule that needs to be programmed
6950 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6951 * ignored is case of error.
6953 * This function can program only 1 rule at a time. The lkups is used to
6954 * describe the all the words that forms the "lookup" portion of the recipe.
6955 * These words can span multiple protocols. Callers to this function need to
6956 * pass in a list of protocol headers with lookup information along and mask
6957 * that determines which words are valid from the given protocol header.
6958 * rinfo describes other information related to this rule such as forwarding
6959 * IDs, priority of this rule, etc.
6962 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6963 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6964 struct ice_rule_query_data *added_entry)
6966 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6967 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6968 const struct ice_dummy_pkt_offsets *pkt_offsets;
6969 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6970 struct LIST_HEAD_TYPE *rule_head;
6971 struct ice_switch_info *sw;
6972 enum ice_status status;
6973 const u8 *pkt = NULL;
6979 /* Initialize profile to result index bitmap */
6980 if (!hw->switch_info->prof_res_bm_init) {
6981 hw->switch_info->prof_res_bm_init = 1;
6982 ice_init_prof_result_bm(hw);
6985 prof_rule = ice_is_prof_rule(rinfo->tun_type);
6986 if (!prof_rule && !lkups_cnt)
6987 return ICE_ERR_PARAM;
6989 /* get # of words we need to match */
6991 for (i = 0; i < lkups_cnt; i++) {
6994 ptr = (u16 *)&lkups[i].m_u;
6995 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7001 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7002 return ICE_ERR_PARAM;
7004 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7005 return ICE_ERR_PARAM;
7008 /* make sure that we can locate a dummy packet */
7009 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7012 status = ICE_ERR_PARAM;
7013 goto err_ice_add_adv_rule;
7016 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7017 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7018 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7019 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7022 vsi_handle = rinfo->sw_act.vsi_handle;
7023 if (!ice_is_vsi_valid(hw, vsi_handle))
7024 return ICE_ERR_PARAM;
7026 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7027 rinfo->sw_act.fwd_id.hw_vsi_id =
7028 ice_get_hw_vsi_num(hw, vsi_handle);
7029 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7030 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7032 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7035 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7037 /* we have to add VSI to VSI_LIST and increment vsi_count.
7038 * Also Update VSI list so that we can change forwarding rule
7039 * if the rule already exists, we will check if it exists with
7040 * same vsi_id, if not then add it to the VSI list if it already
7041 * exists if not then create a VSI list and add the existing VSI
7042 * ID and the new VSI ID to the list
7043 * We will add that VSI to the list
7045 status = ice_adv_add_update_vsi_list(hw, m_entry,
7046 &m_entry->rule_info,
7049 added_entry->rid = rid;
7050 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7051 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7055 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7056 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7058 return ICE_ERR_NO_MEMORY;
7059 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7060 switch (rinfo->sw_act.fltr_act) {
7061 case ICE_FWD_TO_VSI:
7062 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7063 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7064 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7067 act |= ICE_SINGLE_ACT_TO_Q;
7068 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7069 ICE_SINGLE_ACT_Q_INDEX_M;
7071 case ICE_FWD_TO_QGRP:
7072 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7073 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7074 act |= ICE_SINGLE_ACT_TO_Q;
7075 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7076 ICE_SINGLE_ACT_Q_INDEX_M;
7077 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7078 ICE_SINGLE_ACT_Q_REGION_M;
7080 case ICE_DROP_PACKET:
7081 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7082 ICE_SINGLE_ACT_VALID_BIT;
7085 status = ICE_ERR_CFG;
7086 goto err_ice_add_adv_rule;
7089 /* set the rule LOOKUP type based on caller specified 'RX'
7090 * instead of hardcoding it to be either LOOKUP_TX/RX
7092 * for 'RX' set the source to be the port number
7093 * for 'TX' set the source to be the source HW VSI number (determined
7097 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7098 s_rule->pdata.lkup_tx_rx.src =
7099 CPU_TO_LE16(hw->port_info->lport);
7101 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7102 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7105 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7106 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7108 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7109 pkt_len, pkt_offsets);
7111 goto err_ice_add_adv_rule;
7113 if (rinfo->tun_type != ICE_NON_TUN &&
7114 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7115 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7116 s_rule->pdata.lkup_tx_rx.hdr,
7119 goto err_ice_add_adv_rule;
7122 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7123 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7126 goto err_ice_add_adv_rule;
7127 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7128 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7130 status = ICE_ERR_NO_MEMORY;
7131 goto err_ice_add_adv_rule;
7134 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7135 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7136 ICE_NONDMA_TO_NONDMA);
7137 if (!adv_fltr->lkups && !prof_rule) {
7138 status = ICE_ERR_NO_MEMORY;
7139 goto err_ice_add_adv_rule;
7142 adv_fltr->lkups_cnt = lkups_cnt;
7143 adv_fltr->rule_info = *rinfo;
7144 adv_fltr->rule_info.fltr_rule_id =
7145 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7146 sw = hw->switch_info;
7147 sw->recp_list[rid].adv_rule = true;
7148 rule_head = &sw->recp_list[rid].filt_rules;
7150 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7151 adv_fltr->vsi_count = 1;
7153 /* Add rule entry to book keeping list */
7154 LIST_ADD(&adv_fltr->list_entry, rule_head);
7156 added_entry->rid = rid;
7157 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7158 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7160 err_ice_add_adv_rule:
7161 if (status && adv_fltr) {
7162 ice_free(hw, adv_fltr->lkups);
7163 ice_free(hw, adv_fltr);
7166 ice_free(hw, s_rule);
7172 * ice_adv_rem_update_vsi_list
7173 * @hw: pointer to the hardware structure
7174 * @vsi_handle: VSI handle of the VSI to remove
7175 * @fm_list: filter management entry for which the VSI list management needs to
7178 static enum ice_status
7179 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7180 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7182 struct ice_vsi_list_map_info *vsi_list_info;
7183 enum ice_sw_lkup_type lkup_type;
7184 enum ice_status status;
7187 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7188 fm_list->vsi_count == 0)
7189 return ICE_ERR_PARAM;
7191 /* A rule with the VSI being removed does not exist */
7192 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7193 return ICE_ERR_DOES_NOT_EXIST;
7195 lkup_type = ICE_SW_LKUP_LAST;
7196 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7197 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7198 ice_aqc_opc_update_sw_rules,
7203 fm_list->vsi_count--;
7204 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7205 vsi_list_info = fm_list->vsi_list_info;
7206 if (fm_list->vsi_count == 1) {
7207 struct ice_fltr_info tmp_fltr;
7210 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7212 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7213 return ICE_ERR_OUT_OF_RANGE;
7215 /* Make sure VSI list is empty before removing it below */
7216 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7218 ice_aqc_opc_update_sw_rules,
7223 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7224 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7225 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7226 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7227 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7228 tmp_fltr.fwd_id.hw_vsi_id =
7229 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7230 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7231 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7233 /* Update the previous switch rule of "MAC forward to VSI" to
7234 * "MAC fwd to VSI list"
7236 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7238 ice_debug(hw, ICE_DBG_SW,
7239 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7240 tmp_fltr.fwd_id.hw_vsi_id, status);
7244 /* Remove the VSI list since it is no longer used */
7245 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7247 ice_debug(hw, ICE_DBG_SW,
7248 "Failed to remove VSI list %d, error %d\n",
7249 vsi_list_id, status);
7253 LIST_DEL(&vsi_list_info->list_entry);
7254 ice_free(hw, vsi_list_info);
7255 fm_list->vsi_list_info = NULL;
7262 * ice_rem_adv_rule - removes existing advanced switch rule
7263 * @hw: pointer to the hardware structure
7264 * @lkups: information on the words that needs to be looked up. All words
7265 * together makes one recipe
7266 * @lkups_cnt: num of entries in the lkups array
7267 * @rinfo: Its the pointer to the rule information for the rule
7269 * This function can be used to remove 1 rule at a time. The lkups is
7270 * used to describe all the words that forms the "lookup" portion of the
7271 * rule. These words can span multiple protocols. Callers to this function
7272 * need to pass in a list of protocol headers with lookup information along
7273 * and mask that determines which words are valid from the given protocol
7274 * header. rinfo describes other information related to this rule such as
7275 * forwarding IDs, priority of this rule, etc.
7278 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7279 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7281 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7282 struct ice_prot_lkup_ext lkup_exts;
7283 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7284 enum ice_status status = ICE_SUCCESS;
7285 bool remove_rule = false;
7286 u16 i, rid, vsi_handle;
7288 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7289 for (i = 0; i < lkups_cnt; i++) {
7292 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7295 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7300 /* Create any special protocol/offset pairs, such as looking at tunnel
7301 * bits by extracting metadata
7303 status = ice_add_special_words(rinfo, &lkup_exts);
7307 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7308 /* If did not find a recipe that match the existing criteria */
7309 if (rid == ICE_MAX_NUM_RECIPES)
7310 return ICE_ERR_PARAM;
7312 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7313 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7314 /* the rule is already removed */
7317 ice_acquire_lock(rule_lock);
7318 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7320 } else if (list_elem->vsi_count > 1) {
7321 list_elem->vsi_list_info->ref_cnt--;
7322 remove_rule = false;
7323 vsi_handle = rinfo->sw_act.vsi_handle;
7324 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7326 vsi_handle = rinfo->sw_act.vsi_handle;
7327 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7329 ice_release_lock(rule_lock);
7332 if (list_elem->vsi_count == 0)
7335 ice_release_lock(rule_lock);
7337 struct ice_aqc_sw_rules_elem *s_rule;
7340 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7342 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7345 return ICE_ERR_NO_MEMORY;
7346 s_rule->pdata.lkup_tx_rx.act = 0;
7347 s_rule->pdata.lkup_tx_rx.index =
7348 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7349 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7350 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7352 ice_aqc_opc_remove_sw_rules, NULL);
7353 if (status == ICE_SUCCESS) {
7354 ice_acquire_lock(rule_lock);
7355 LIST_DEL(&list_elem->list_entry);
7356 ice_free(hw, list_elem->lkups);
7357 ice_free(hw, list_elem);
7358 ice_release_lock(rule_lock);
7360 ice_free(hw, s_rule);
7366 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7367 * @hw: pointer to the hardware structure
7368 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7370 * This function is used to remove 1 rule at a time. The removal is based on
7371 * the remove_entry parameter. This function will remove rule for a given
7372 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7375 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7376 struct ice_rule_query_data *remove_entry)
7378 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7379 struct LIST_HEAD_TYPE *list_head;
7380 struct ice_adv_rule_info rinfo;
7381 struct ice_switch_info *sw;
7383 sw = hw->switch_info;
7384 if (!sw->recp_list[remove_entry->rid].recp_created)
7385 return ICE_ERR_PARAM;
7386 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7387 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7389 if (list_itr->rule_info.fltr_rule_id ==
7390 remove_entry->rule_id) {
7391 rinfo = list_itr->rule_info;
7392 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7393 return ice_rem_adv_rule(hw, list_itr->lkups,
7394 list_itr->lkups_cnt, &rinfo);
7397 return ICE_ERR_PARAM;
7401 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7403 * @hw: pointer to the hardware structure
7404 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7406 * This function is used to remove all the rules for a given VSI and as soon
7407 * as removing a rule fails, it will return immediately with the error code,
7408 * else it will return ICE_SUCCESS
7411 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7413 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7414 struct ice_vsi_list_map_info *map_info;
7415 struct LIST_HEAD_TYPE *list_head;
7416 struct ice_adv_rule_info rinfo;
7417 struct ice_switch_info *sw;
7418 enum ice_status status;
7419 u16 vsi_list_id = 0;
7422 sw = hw->switch_info;
7423 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7424 if (!sw->recp_list[rid].recp_created)
7426 if (!sw->recp_list[rid].adv_rule)
7428 list_head = &sw->recp_list[rid].filt_rules;
7430 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7431 ice_adv_fltr_mgmt_list_entry, list_entry) {
7432 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7437 rinfo = list_itr->rule_info;
7438 rinfo.sw_act.vsi_handle = vsi_handle;
7439 status = ice_rem_adv_rule(hw, list_itr->lkups,
7440 list_itr->lkups_cnt, &rinfo);
7450 * ice_replay_fltr - Replay all the filters stored by a specific list head
7451 * @hw: pointer to the hardware structure
7452 * @list_head: list for which filters needs to be replayed
7453 * @recp_id: Recipe ID for which rules need to be replayed
7455 static enum ice_status
7456 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7458 struct ice_fltr_mgmt_list_entry *itr;
7459 enum ice_status status = ICE_SUCCESS;
7460 struct ice_sw_recipe *recp_list;
7461 u8 lport = hw->port_info->lport;
7462 struct LIST_HEAD_TYPE l_head;
7464 if (LIST_EMPTY(list_head))
7467 recp_list = &hw->switch_info->recp_list[recp_id];
7468 /* Move entries from the given list_head to a temporary l_head so that
7469 * they can be replayed. Otherwise when trying to re-add the same
7470 * filter, the function will return already exists
7472 LIST_REPLACE_INIT(list_head, &l_head);
7474 /* Mark the given list_head empty by reinitializing it so filters
7475 * could be added again by *handler
7477 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7479 struct ice_fltr_list_entry f_entry;
7481 f_entry.fltr_info = itr->fltr_info;
7482 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7483 status = ice_add_rule_internal(hw, recp_list, lport,
7485 if (status != ICE_SUCCESS)
7490 /* Add a filter per VSI separately */
7495 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7497 if (!ice_is_vsi_valid(hw, vsi_handle))
7500 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7501 f_entry.fltr_info.vsi_handle = vsi_handle;
7502 f_entry.fltr_info.fwd_id.hw_vsi_id =
7503 ice_get_hw_vsi_num(hw, vsi_handle);
7504 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7505 if (recp_id == ICE_SW_LKUP_VLAN)
7506 status = ice_add_vlan_internal(hw, recp_list,
7509 status = ice_add_rule_internal(hw, recp_list,
7512 if (status != ICE_SUCCESS)
7517 /* Clear the filter management list */
7518 ice_rem_sw_rule_info(hw, &l_head);
7523 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7524 * @hw: pointer to the hardware structure
7526 * NOTE: This function does not clean up partially added filters on error.
7527 * It is up to caller of the function to issue a reset or fail early.
7529 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7531 struct ice_switch_info *sw = hw->switch_info;
7532 enum ice_status status = ICE_SUCCESS;
7535 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7536 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7538 status = ice_replay_fltr(hw, i, head);
7539 if (status != ICE_SUCCESS)
7546 * ice_replay_vsi_fltr - Replay filters for requested VSI
7547 * @hw: pointer to the hardware structure
7548 * @pi: pointer to port information structure
7549 * @sw: pointer to switch info struct for which function replays filters
7550 * @vsi_handle: driver VSI handle
7551 * @recp_id: Recipe ID for which rules need to be replayed
7552 * @list_head: list for which filters need to be replayed
7554 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7555 * It is required to pass valid VSI handle.
7557 static enum ice_status
7558 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7559 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
7560 struct LIST_HEAD_TYPE *list_head)
7562 struct ice_fltr_mgmt_list_entry *itr;
7563 enum ice_status status = ICE_SUCCESS;
7564 struct ice_sw_recipe *recp_list;
7567 if (LIST_EMPTY(list_head))
7569 recp_list = &sw->recp_list[recp_id];
7570 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7572 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7574 struct ice_fltr_list_entry f_entry;
7576 f_entry.fltr_info = itr->fltr_info;
7577 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7578 itr->fltr_info.vsi_handle == vsi_handle) {
7579 /* update the src in case it is VSI num */
7580 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7581 f_entry.fltr_info.src = hw_vsi_id;
7582 status = ice_add_rule_internal(hw, recp_list,
7585 if (status != ICE_SUCCESS)
7589 if (!itr->vsi_list_info ||
7590 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7592 /* Clearing it so that the logic can add it back */
7593 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7594 f_entry.fltr_info.vsi_handle = vsi_handle;
7595 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7596 /* update the src in case it is VSI num */
7597 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7598 f_entry.fltr_info.src = hw_vsi_id;
7599 if (recp_id == ICE_SW_LKUP_VLAN)
7600 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7602 status = ice_add_rule_internal(hw, recp_list,
7605 if (status != ICE_SUCCESS)
7613 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7614 * @hw: pointer to the hardware structure
7615 * @vsi_handle: driver VSI handle
7616 * @list_head: list for which filters need to be replayed
7618 * Replay the advanced rule for the given VSI.
7620 static enum ice_status
7621 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7622 struct LIST_HEAD_TYPE *list_head)
7624 struct ice_rule_query_data added_entry = { 0 };
7625 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7626 enum ice_status status = ICE_SUCCESS;
7628 if (LIST_EMPTY(list_head))
7630 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7632 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7633 u16 lk_cnt = adv_fltr->lkups_cnt;
7635 if (vsi_handle != rinfo->sw_act.vsi_handle)
7637 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7646 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7647 * @hw: pointer to the hardware structure
7648 * @pi: pointer to port information structure
7649 * @vsi_handle: driver VSI handle
7651 * Replays filters for requested VSI via vsi_handle.
7654 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7657 struct ice_switch_info *sw = hw->switch_info;
7658 enum ice_status status;
7661 /* Update the recipes that were created */
7662 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7663 struct LIST_HEAD_TYPE *head;
7665 head = &sw->recp_list[i].filt_replay_rules;
7666 if (!sw->recp_list[i].adv_rule)
7667 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
7670 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7671 if (status != ICE_SUCCESS)
7679 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
7680 * @hw: pointer to the HW struct
7681 * @sw: pointer to switch info struct for which function removes filters
7683 * Deletes the filter replay rules for given switch
7685 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
7692 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7693 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7694 struct LIST_HEAD_TYPE *l_head;
7696 l_head = &sw->recp_list[i].filt_replay_rules;
7697 if (!sw->recp_list[i].adv_rule)
7698 ice_rem_sw_rule_info(hw, l_head);
7700 ice_rem_adv_rule_info(hw, l_head);
7706 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7707 * @hw: pointer to the HW struct
7709 * Deletes the filter replay rules.
7711 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7713 ice_rm_sw_replay_rule_info(hw, hw->switch_info);