1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x10, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
463 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, /* 2 bytes for 4 byte alignment */
468 /* C-tag (802.1Q): IPv6 + UDP */
469 static const struct ice_dummy_pkt_offsets
470 dummy_vlan_udp_ipv6_packet_offsets[] = {
472 { ICE_ETYPE_OL, 12 },
473 { ICE_VLAN_OFOS, 14 },
474 { ICE_IPV6_OFOS, 18 },
475 { ICE_UDP_ILOS, 58 },
476 { ICE_PROTOCOL_LAST, 0 },
479 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
480 static const u8 dummy_vlan_udp_ipv6_packet[] = {
481 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
485 0x81, 0x00, /* ICE_ETYPE_OL 12 */
487 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
490 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
501 0x00, 0x08, 0x00, 0x00,
503 0x00, 0x00, /* 2 bytes for 4 byte alignment */
506 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508 { ICE_IPV4_OFOS, 14 },
511 { ICE_PROTOCOL_LAST, 0 },
514 static const u8 dummy_udp_gtp_packet[] = {
515 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
520 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x11, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
526 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
527 0x00, 0x1c, 0x00, 0x00,
529 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
530 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x00, 0x85,
533 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
534 0x00, 0x00, 0x00, 0x00,
537 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
539 { ICE_ETYPE_OL, 12 },
540 { ICE_VLAN_OFOS, 14},
542 { ICE_PROTOCOL_LAST, 0 },
545 static const u8 dummy_pppoe_ipv4_packet[] = {
546 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x00, 0x00, 0x00,
550 0x81, 0x00, /* ICE_ETYPE_OL 12 */
552 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
554 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
557 0x00, 0x21, /* PPP Link Layer 24 */
559 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
560 0x00, 0x00, 0x00, 0x00,
561 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, 0x00, 0x00,
563 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
568 static const u8 dummy_pppoe_ipv6_packet[] = {
569 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
573 0x81, 0x00, /* ICE_ETYPE_OL 12 */
575 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
577 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
580 0x00, 0x57, /* PPP Link Layer 24 */
582 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
583 0x00, 0x00, 0x3b, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00,
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
596 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
598 { ICE_IPV4_OFOS, 14 },
600 { ICE_PROTOCOL_LAST, 0 },
603 static const u8 dummy_ipv4_esp_pkt[] = {
604 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
609 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
610 0x00, 0x00, 0x40, 0x00,
611 0x40, 0x32, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
620 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
622 { ICE_IPV6_OFOS, 14 },
624 { ICE_PROTOCOL_LAST, 0 },
627 static const u8 dummy_ipv6_esp_pkt[] = {
628 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
629 0x00, 0x00, 0x00, 0x00,
630 0x00, 0x00, 0x00, 0x00,
633 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
634 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
649 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
651 { ICE_IPV4_OFOS, 14 },
653 { ICE_PROTOCOL_LAST, 0 },
656 static const u8 dummy_ipv4_ah_pkt[] = {
657 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
658 0x00, 0x00, 0x00, 0x00,
659 0x00, 0x00, 0x00, 0x00,
662 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
663 0x00, 0x00, 0x40, 0x00,
664 0x40, 0x33, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
669 0x00, 0x00, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00,
671 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
674 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
676 { ICE_IPV6_OFOS, 14 },
678 { ICE_PROTOCOL_LAST, 0 },
681 static const u8 dummy_ipv6_ah_pkt[] = {
682 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
687 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
688 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
699 0x00, 0x00, 0x00, 0x00,
700 0x00, 0x00, 0x00, 0x00,
701 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
704 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
706 { ICE_IPV4_OFOS, 14 },
707 { ICE_UDP_ILOS, 34 },
709 { ICE_PROTOCOL_LAST, 0 },
712 static const u8 dummy_ipv4_nat_pkt[] = {
713 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
718 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
719 0x00, 0x00, 0x40, 0x00,
720 0x40, 0x11, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
725 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
732 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
734 { ICE_IPV6_OFOS, 14 },
735 { ICE_UDP_ILOS, 54 },
737 { ICE_PROTOCOL_LAST, 0 },
740 static const u8 dummy_ipv6_nat_pkt[] = {
741 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
742 0x00, 0x00, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
746 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
747 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00,
753 0x00, 0x00, 0x00, 0x00,
754 0x00, 0x00, 0x00, 0x00,
755 0x00, 0x00, 0x00, 0x00,
757 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
758 0x00, 0x00, 0x00, 0x00,
760 0x00, 0x00, 0x00, 0x00,
761 0x00, 0x00, 0x00, 0x00,
762 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
766 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
768 { ICE_IPV4_OFOS, 14 },
770 { ICE_PROTOCOL_LAST, 0 },
773 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
774 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
779 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
780 0x00, 0x00, 0x40, 0x00,
781 0x40, 0x73, 0x00, 0x00,
782 0x00, 0x00, 0x00, 0x00,
783 0x00, 0x00, 0x00, 0x00,
785 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
786 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
791 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
793 { ICE_IPV6_OFOS, 14 },
795 { ICE_PROTOCOL_LAST, 0 },
798 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
799 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
800 0x00, 0x00, 0x00, 0x00,
801 0x00, 0x00, 0x00, 0x00,
804 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
805 0x00, 0x0c, 0x73, 0x40,
806 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
809 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00,
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
813 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
821 /* this is a recipe to profile association bitmap */
822 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
823 ICE_MAX_NUM_PROFILES);
825 /* this is a profile to recipe association bitmap */
826 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
827 ICE_MAX_NUM_RECIPES);
829 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
832 * ice_collect_result_idx - copy result index values
833 * @buf: buffer that contains the result index
834 * @recp: the recipe struct to copy data into
836 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
837 struct ice_sw_recipe *recp)
839 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
840 ice_set_bit(buf->content.result_indx &
841 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
845 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
846 * @hw: pointer to hardware structure
847 * @recps: struct that we need to populate
848 * @rid: recipe ID that we are populating
849 * @refresh_required: true if we should get recipe to profile mapping from FW
851 * This function is used to populate all the necessary entries into our
852 * bookkeeping so that we have a current list of all the recipes that are
853 * programmed in the firmware.
855 static enum ice_status
856 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
857 bool *refresh_required)
859 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
860 struct ice_aqc_recipe_data_elem *tmp;
861 u16 num_recps = ICE_MAX_NUM_RECIPES;
862 struct ice_prot_lkup_ext *lkup_exts;
863 enum ice_status status;
867 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
869 /* we need a buffer big enough to accommodate all the recipes */
870 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
871 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
873 return ICE_ERR_NO_MEMORY;
875 tmp[0].recipe_indx = rid;
876 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
877 /* non-zero status meaning recipe doesn't exist */
881 /* Get recipe to profile map so that we can get the fv from lkups that
882 * we read for a recipe from FW. Since we want to minimize the number of
883 * times we make this FW call, just make one call and cache the copy
884 * until a new recipe is added. This operation is only required the
885 * first time to get the changes from FW. Then to search existing
886 * entries we don't need to update the cache again until another recipe
889 if (*refresh_required) {
890 ice_get_recp_to_prof_map(hw);
891 *refresh_required = false;
894 /* Start populating all the entries for recps[rid] based on lkups from
895 * firmware. Note that we are only creating the root recipe in our
898 lkup_exts = &recps[rid].lkup_exts;
900 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
901 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
902 struct ice_recp_grp_entry *rg_entry;
903 u8 i, prof, idx, prot = 0;
907 rg_entry = (struct ice_recp_grp_entry *)
908 ice_malloc(hw, sizeof(*rg_entry));
910 status = ICE_ERR_NO_MEMORY;
914 idx = root_bufs.recipe_indx;
915 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
917 /* Mark all result indices in this chain */
918 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
919 ice_set_bit(root_bufs.content.result_indx &
920 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
922 /* get the first profile that is associated with rid */
923 prof = ice_find_first_bit(recipe_to_profile[idx],
924 ICE_MAX_NUM_PROFILES);
925 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
926 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
928 rg_entry->fv_idx[i] = lkup_indx;
929 rg_entry->fv_mask[i] =
930 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
932 /* If the recipe is a chained recipe then all its
933 * child recipe's result will have a result index.
934 * To fill fv_words we should not use those result
935 * index, we only need the protocol ids and offsets.
936 * We will skip all the fv_idx which stores result
937 * index in them. We also need to skip any fv_idx which
938 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
939 * valid offset value.
941 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
942 rg_entry->fv_idx[i]) ||
943 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
944 rg_entry->fv_idx[i] == 0)
947 ice_find_prot_off(hw, ICE_BLK_SW, prof,
948 rg_entry->fv_idx[i], &prot, &off);
949 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
950 lkup_exts->fv_words[fv_word_idx].off = off;
951 lkup_exts->field_mask[fv_word_idx] =
952 rg_entry->fv_mask[i];
955 /* populate rg_list with the data from the child entry of this
958 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
960 /* Propagate some data to the recipe database */
961 recps[idx].is_root = !!is_root;
962 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
963 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
964 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
965 recps[idx].chain_idx = root_bufs.content.result_indx &
966 ~ICE_AQ_RECIPE_RESULT_EN;
967 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
969 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
975 /* Only do the following for root recipes entries */
976 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
977 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
978 recps[idx].root_rid = root_bufs.content.rid &
979 ~ICE_AQ_RECIPE_ID_IS_ROOT;
980 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
983 /* Complete initialization of the root recipe entry */
984 lkup_exts->n_val_words = fv_word_idx;
985 recps[rid].big_recp = (num_recps > 1);
986 recps[rid].n_grp_count = (u8)num_recps;
987 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
988 ice_memdup(hw, tmp, recps[rid].n_grp_count *
989 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
990 if (!recps[rid].root_buf)
993 /* Copy result indexes */
994 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
995 recps[rid].recp_created = true;
1003 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1004 * @hw: pointer to hardware structure
1006 * This function is used to populate recipe_to_profile matrix where index to
1007 * this array is the recipe ID and the element is the mapping of which profiles
1008 * is this recipe mapped to.
1011 ice_get_recp_to_prof_map(struct ice_hw *hw)
1013 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1016 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1019 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1020 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1021 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1023 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1024 ICE_MAX_NUM_RECIPES);
1025 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1026 if (ice_is_bit_set(r_bitmap, j))
1027 ice_set_bit(i, recipe_to_profile[j]);
1032 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1033 * @hw: pointer to the HW struct
1034 * @recp_list: pointer to sw recipe list
1036 * Allocate memory for the entire recipe table and initialize the structures/
1037 * entries corresponding to basic recipes.
1040 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1042 struct ice_sw_recipe *recps;
1045 recps = (struct ice_sw_recipe *)
1046 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1048 return ICE_ERR_NO_MEMORY;
1050 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1051 recps[i].root_rid = i;
1052 INIT_LIST_HEAD(&recps[i].filt_rules);
1053 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1054 INIT_LIST_HEAD(&recps[i].rg_list);
1055 ice_init_lock(&recps[i].filt_rule_lock);
1064 * ice_aq_get_sw_cfg - get switch configuration
1065 * @hw: pointer to the hardware structure
1066 * @buf: pointer to the result buffer
1067 * @buf_size: length of the buffer available for response
1068 * @req_desc: pointer to requested descriptor
1069 * @num_elems: pointer to number of elements
1070 * @cd: pointer to command details structure or NULL
1072 * Get switch configuration (0x0200) to be placed in 'buff'.
1073 * This admin command returns information such as initial VSI/port number
1074 * and switch ID it belongs to.
1076 * NOTE: *req_desc is both an input/output parameter.
1077 * The caller of this function first calls this function with *request_desc set
1078 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1079 * configuration information has been returned; if non-zero (meaning not all
1080 * the information was returned), the caller should call this function again
1081 * with *req_desc set to the previous value returned by f/w to get the
1082 * next block of switch configuration information.
1084 * *num_elems is output only parameter. This reflects the number of elements
1085 * in response buffer. The caller of this function to use *num_elems while
1086 * parsing the response buffer.
1088 static enum ice_status
1089 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1090 u16 buf_size, u16 *req_desc, u16 *num_elems,
1091 struct ice_sq_cd *cd)
1093 struct ice_aqc_get_sw_cfg *cmd;
1094 enum ice_status status;
1095 struct ice_aq_desc desc;
1097 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1098 cmd = &desc.params.get_sw_conf;
1099 cmd->element = CPU_TO_LE16(*req_desc);
1101 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1103 *req_desc = LE16_TO_CPU(cmd->element);
1104 *num_elems = LE16_TO_CPU(cmd->num_elems);
1111 * ice_alloc_sw - allocate resources specific to switch
1112 * @hw: pointer to the HW struct
1113 * @ena_stats: true to turn on VEB stats
1114 * @shared_res: true for shared resource, false for dedicated resource
1115 * @sw_id: switch ID returned
1116 * @counter_id: VEB counter ID returned
1118 * allocates switch resources (SWID and VEB counter) (0x0208)
1121 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1124 struct ice_aqc_alloc_free_res_elem *sw_buf;
1125 struct ice_aqc_res_elem *sw_ele;
1126 enum ice_status status;
1129 buf_len = sizeof(*sw_buf);
1130 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1131 ice_malloc(hw, buf_len);
1133 return ICE_ERR_NO_MEMORY;
1135 /* Prepare buffer for switch ID.
1136 * The number of resource entries in buffer is passed as 1 since only a
1137 * single switch/VEB instance is allocated, and hence a single sw_id
1140 sw_buf->num_elems = CPU_TO_LE16(1);
1142 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1143 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1144 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1146 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1147 ice_aqc_opc_alloc_res, NULL);
1150 goto ice_alloc_sw_exit;
1152 sw_ele = &sw_buf->elem[0];
1153 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1156 /* Prepare buffer for VEB Counter */
1157 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1158 struct ice_aqc_alloc_free_res_elem *counter_buf;
1159 struct ice_aqc_res_elem *counter_ele;
1161 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1162 ice_malloc(hw, buf_len);
1164 status = ICE_ERR_NO_MEMORY;
1165 goto ice_alloc_sw_exit;
1168 /* The number of resource entries in buffer is passed as 1 since
1169 * only a single switch/VEB instance is allocated, and hence a
1170 * single VEB counter is requested.
1172 counter_buf->num_elems = CPU_TO_LE16(1);
1173 counter_buf->res_type =
1174 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1175 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1176 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1180 ice_free(hw, counter_buf);
1181 goto ice_alloc_sw_exit;
1183 counter_ele = &counter_buf->elem[0];
1184 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1185 ice_free(hw, counter_buf);
1189 ice_free(hw, sw_buf);
1194 * ice_free_sw - free resources specific to switch
1195 * @hw: pointer to the HW struct
1196 * @sw_id: switch ID returned
1197 * @counter_id: VEB counter ID returned
1199 * free switch resources (SWID and VEB counter) (0x0209)
1201 * NOTE: This function frees multiple resources. It continues
1202 * releasing other resources even after it encounters error.
1203 * The error code returned is the last error it encountered.
1205 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1207 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1208 enum ice_status status, ret_status;
1211 buf_len = sizeof(*sw_buf);
1212 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1213 ice_malloc(hw, buf_len);
1215 return ICE_ERR_NO_MEMORY;
1217 /* Prepare buffer to free for switch ID res.
1218 * The number of resource entries in buffer is passed as 1 since only a
1219 * single switch/VEB instance is freed, and hence a single sw_id
1222 sw_buf->num_elems = CPU_TO_LE16(1);
1223 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1224 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1226 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1227 ice_aqc_opc_free_res, NULL);
1230 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1232 /* Prepare buffer to free for VEB Counter resource */
1233 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1234 ice_malloc(hw, buf_len);
1236 ice_free(hw, sw_buf);
1237 return ICE_ERR_NO_MEMORY;
1240 /* The number of resource entries in buffer is passed as 1 since only a
1241 * single switch/VEB instance is freed, and hence a single VEB counter
1244 counter_buf->num_elems = CPU_TO_LE16(1);
1245 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1246 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1248 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1249 ice_aqc_opc_free_res, NULL);
1251 ice_debug(hw, ICE_DBG_SW,
1252 "VEB counter resource could not be freed\n");
1253 ret_status = status;
1256 ice_free(hw, counter_buf);
1257 ice_free(hw, sw_buf);
1263 * @hw: pointer to the HW struct
1264 * @vsi_ctx: pointer to a VSI context struct
1265 * @cd: pointer to command details structure or NULL
1267 * Add a VSI context to the hardware (0x0210)
1270 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1271 struct ice_sq_cd *cd)
1273 struct ice_aqc_add_update_free_vsi_resp *res;
1274 struct ice_aqc_add_get_update_free_vsi *cmd;
1275 struct ice_aq_desc desc;
1276 enum ice_status status;
1278 cmd = &desc.params.vsi_cmd;
1279 res = &desc.params.add_update_free_vsi_res;
1281 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1283 if (!vsi_ctx->alloc_from_pool)
1284 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1285 ICE_AQ_VSI_IS_VALID);
1287 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1289 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1291 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1292 sizeof(vsi_ctx->info), cd);
1295 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1296 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1297 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1305 * @hw: pointer to the HW struct
1306 * @vsi_ctx: pointer to a VSI context struct
1307 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1308 * @cd: pointer to command details structure or NULL
1310 * Free VSI context info from hardware (0x0213)
1313 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1314 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1316 struct ice_aqc_add_update_free_vsi_resp *resp;
1317 struct ice_aqc_add_get_update_free_vsi *cmd;
1318 struct ice_aq_desc desc;
1319 enum ice_status status;
1321 cmd = &desc.params.vsi_cmd;
1322 resp = &desc.params.add_update_free_vsi_res;
1324 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1326 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1328 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1330 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1332 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1333 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1341 * @hw: pointer to the HW struct
1342 * @vsi_ctx: pointer to a VSI context struct
1343 * @cd: pointer to command details structure or NULL
1345 * Update VSI context in the hardware (0x0211)
1348 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1349 struct ice_sq_cd *cd)
1351 struct ice_aqc_add_update_free_vsi_resp *resp;
1352 struct ice_aqc_add_get_update_free_vsi *cmd;
1353 struct ice_aq_desc desc;
1354 enum ice_status status;
1356 cmd = &desc.params.vsi_cmd;
1357 resp = &desc.params.add_update_free_vsi_res;
1359 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1361 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1363 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1365 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1366 sizeof(vsi_ctx->info), cd);
1369 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1370 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1377 * ice_is_vsi_valid - check whether the VSI is valid or not
1378 * @hw: pointer to the HW struct
1379 * @vsi_handle: VSI handle
1381 * check whether the VSI is valid or not
1383 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1385 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1389 * ice_get_hw_vsi_num - return the HW VSI number
1390 * @hw: pointer to the HW struct
1391 * @vsi_handle: VSI handle
1393 * return the HW VSI number
1394 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1396 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1398 return hw->vsi_ctx[vsi_handle]->vsi_num;
1402 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1403 * @hw: pointer to the HW struct
1404 * @vsi_handle: VSI handle
1406 * return the VSI context entry for a given VSI handle
1408 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1410 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1414 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1415 * @hw: pointer to the HW struct
1416 * @vsi_handle: VSI handle
1417 * @vsi: VSI context pointer
1419 * save the VSI context entry for a given VSI handle
1422 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1424 hw->vsi_ctx[vsi_handle] = vsi;
1428 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1429 * @hw: pointer to the HW struct
1430 * @vsi_handle: VSI handle
1432 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1434 struct ice_vsi_ctx *vsi;
1437 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1440 ice_for_each_traffic_class(i) {
1441 if (vsi->lan_q_ctx[i]) {
1442 ice_free(hw, vsi->lan_q_ctx[i]);
1443 vsi->lan_q_ctx[i] = NULL;
1449 * ice_clear_vsi_ctx - clear the VSI context entry
1450 * @hw: pointer to the HW struct
1451 * @vsi_handle: VSI handle
1453 * clear the VSI context entry
1455 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1457 struct ice_vsi_ctx *vsi;
1459 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1461 ice_clear_vsi_q_ctx(hw, vsi_handle);
1463 hw->vsi_ctx[vsi_handle] = NULL;
1468 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1469 * @hw: pointer to the HW struct
1471 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1475 for (i = 0; i < ICE_MAX_VSI; i++)
1476 ice_clear_vsi_ctx(hw, i);
1480 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1481 * @hw: pointer to the HW struct
1482 * @vsi_handle: unique VSI handle provided by drivers
1483 * @vsi_ctx: pointer to a VSI context struct
1484 * @cd: pointer to command details structure or NULL
1486 * Add a VSI context to the hardware also add it into the VSI handle list.
1487 * If this function gets called after reset for existing VSIs then update
1488 * with the new HW VSI number in the corresponding VSI handle list entry.
1491 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1492 struct ice_sq_cd *cd)
1494 struct ice_vsi_ctx *tmp_vsi_ctx;
1495 enum ice_status status;
1497 if (vsi_handle >= ICE_MAX_VSI)
1498 return ICE_ERR_PARAM;
1499 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1502 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1504 /* Create a new VSI context */
1505 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1506 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1508 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1509 return ICE_ERR_NO_MEMORY;
1511 *tmp_vsi_ctx = *vsi_ctx;
1513 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1515 /* update with new HW VSI num */
1516 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1523 * ice_free_vsi- free VSI context from hardware and VSI handle list
1524 * @hw: pointer to the HW struct
1525 * @vsi_handle: unique VSI handle
1526 * @vsi_ctx: pointer to a VSI context struct
1527 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1528 * @cd: pointer to command details structure or NULL
1530 * Free VSI context info from hardware as well as from VSI handle list
1533 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1534 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1536 enum ice_status status;
1538 if (!ice_is_vsi_valid(hw, vsi_handle))
1539 return ICE_ERR_PARAM;
1540 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1541 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1543 ice_clear_vsi_ctx(hw, vsi_handle);
1549 * @hw: pointer to the HW struct
1550 * @vsi_handle: unique VSI handle
1551 * @vsi_ctx: pointer to a VSI context struct
1552 * @cd: pointer to command details structure or NULL
1554 * Update VSI context in the hardware
1557 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1558 struct ice_sq_cd *cd)
1560 if (!ice_is_vsi_valid(hw, vsi_handle))
1561 return ICE_ERR_PARAM;
1562 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1563 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1567 * ice_aq_get_vsi_params
1568 * @hw: pointer to the HW struct
1569 * @vsi_ctx: pointer to a VSI context struct
1570 * @cd: pointer to command details structure or NULL
1572 * Get VSI context info from hardware (0x0212)
1575 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1576 struct ice_sq_cd *cd)
1578 struct ice_aqc_add_get_update_free_vsi *cmd;
1579 struct ice_aqc_get_vsi_resp *resp;
1580 struct ice_aq_desc desc;
1581 enum ice_status status;
1583 cmd = &desc.params.vsi_cmd;
1584 resp = &desc.params.get_vsi_resp;
1586 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1588 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1590 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1591 sizeof(vsi_ctx->info), cd);
1593 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1595 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1596 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1603 * ice_aq_add_update_mir_rule - add/update a mirror rule
1604 * @hw: pointer to the HW struct
1605 * @rule_type: Rule Type
1606 * @dest_vsi: VSI number to which packets will be mirrored
1607 * @count: length of the list
1608 * @mr_buf: buffer for list of mirrored VSI numbers
1609 * @cd: pointer to command details structure or NULL
1612 * Add/Update Mirror Rule (0x260).
1615 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1616 u16 count, struct ice_mir_rule_buf *mr_buf,
1617 struct ice_sq_cd *cd, u16 *rule_id)
1619 struct ice_aqc_add_update_mir_rule *cmd;
1620 struct ice_aq_desc desc;
1621 enum ice_status status;
1622 __le16 *mr_list = NULL;
1625 switch (rule_type) {
1626 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1627 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1628 /* Make sure count and mr_buf are set for these rule_types */
1629 if (!(count && mr_buf))
1630 return ICE_ERR_PARAM;
1632 buf_size = count * sizeof(__le16);
1633 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1635 return ICE_ERR_NO_MEMORY;
1637 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1638 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1639 /* Make sure count and mr_buf are not set for these
1642 if (count || mr_buf)
1643 return ICE_ERR_PARAM;
1646 ice_debug(hw, ICE_DBG_SW,
1647 "Error due to unsupported rule_type %u\n", rule_type);
1648 return ICE_ERR_OUT_OF_RANGE;
1651 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1653 /* Pre-process 'mr_buf' items for add/update of virtual port
1654 * ingress/egress mirroring (but not physical port ingress/egress
1660 for (i = 0; i < count; i++) {
1663 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1665 /* Validate specified VSI number, make sure it is less
1666 * than ICE_MAX_VSI, if not return with error.
1668 if (id >= ICE_MAX_VSI) {
1669 ice_debug(hw, ICE_DBG_SW,
1670 "Error VSI index (%u) out-of-range\n",
1672 ice_free(hw, mr_list);
1673 return ICE_ERR_OUT_OF_RANGE;
1676 /* add VSI to mirror rule */
1679 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1680 else /* remove VSI from mirror rule */
1681 mr_list[i] = CPU_TO_LE16(id);
1685 cmd = &desc.params.add_update_rule;
1686 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1687 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1688 ICE_AQC_RULE_ID_VALID_M);
1689 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1690 cmd->num_entries = CPU_TO_LE16(count);
1691 cmd->dest = CPU_TO_LE16(dest_vsi);
1693 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1695 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1697 ice_free(hw, mr_list);
1703 * ice_aq_delete_mir_rule - delete a mirror rule
1704 * @hw: pointer to the HW struct
1705 * @rule_id: Mirror rule ID (to be deleted)
1706 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1707 * otherwise it is returned to the shared pool
1708 * @cd: pointer to command details structure or NULL
1710 * Delete Mirror Rule (0x261).
1713 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1714 struct ice_sq_cd *cd)
1716 struct ice_aqc_delete_mir_rule *cmd;
1717 struct ice_aq_desc desc;
1719 /* rule_id should be in the range 0...63 */
1720 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1721 return ICE_ERR_OUT_OF_RANGE;
1723 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1725 cmd = &desc.params.del_rule;
1726 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1727 cmd->rule_id = CPU_TO_LE16(rule_id);
1730 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1732 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1736 * ice_aq_alloc_free_vsi_list
1737 * @hw: pointer to the HW struct
1738 * @vsi_list_id: VSI list ID returned or used for lookup
1739 * @lkup_type: switch rule filter lookup type
1740 * @opc: switch rules population command type - pass in the command opcode
1742 * allocates or free a VSI list resource
1744 static enum ice_status
1745 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1746 enum ice_sw_lkup_type lkup_type,
1747 enum ice_adminq_opc opc)
1749 struct ice_aqc_alloc_free_res_elem *sw_buf;
1750 struct ice_aqc_res_elem *vsi_ele;
1751 enum ice_status status;
1754 buf_len = sizeof(*sw_buf);
1755 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1756 ice_malloc(hw, buf_len);
1758 return ICE_ERR_NO_MEMORY;
1759 sw_buf->num_elems = CPU_TO_LE16(1);
1761 if (lkup_type == ICE_SW_LKUP_MAC ||
1762 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1763 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1764 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1765 lkup_type == ICE_SW_LKUP_PROMISC ||
1766 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1767 lkup_type == ICE_SW_LKUP_LAST) {
1768 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1769 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1771 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1773 status = ICE_ERR_PARAM;
1774 goto ice_aq_alloc_free_vsi_list_exit;
1777 if (opc == ice_aqc_opc_free_res)
1778 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1780 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1782 goto ice_aq_alloc_free_vsi_list_exit;
1784 if (opc == ice_aqc_opc_alloc_res) {
1785 vsi_ele = &sw_buf->elem[0];
1786 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1789 ice_aq_alloc_free_vsi_list_exit:
1790 ice_free(hw, sw_buf);
1795 * ice_aq_set_storm_ctrl - Sets storm control configuration
1796 * @hw: pointer to the HW struct
1797 * @bcast_thresh: represents the upper threshold for broadcast storm control
1798 * @mcast_thresh: represents the upper threshold for multicast storm control
1799 * @ctl_bitmask: storm control control knobs
1801 * Sets the storm control configuration (0x0280)
1804 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1807 struct ice_aqc_storm_cfg *cmd;
1808 struct ice_aq_desc desc;
1810 cmd = &desc.params.storm_conf;
1812 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1814 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1815 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1816 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1818 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1822 * ice_aq_get_storm_ctrl - gets storm control configuration
1823 * @hw: pointer to the HW struct
1824 * @bcast_thresh: represents the upper threshold for broadcast storm control
1825 * @mcast_thresh: represents the upper threshold for multicast storm control
1826 * @ctl_bitmask: storm control control knobs
1828 * Gets the storm control configuration (0x0281)
1831 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1834 enum ice_status status;
1835 struct ice_aq_desc desc;
1837 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1839 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1841 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1844 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1847 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1850 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1857 * ice_aq_sw_rules - add/update/remove switch rules
1858 * @hw: pointer to the HW struct
1859 * @rule_list: pointer to switch rule population list
1860 * @rule_list_sz: total size of the rule list in bytes
1861 * @num_rules: number of switch rules in the rule_list
1862 * @opc: switch rules population command type - pass in the command opcode
1863 * @cd: pointer to command details structure or NULL
1865 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1867 static enum ice_status
1868 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1869 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1871 struct ice_aq_desc desc;
1873 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1875 if (opc != ice_aqc_opc_add_sw_rules &&
1876 opc != ice_aqc_opc_update_sw_rules &&
1877 opc != ice_aqc_opc_remove_sw_rules)
1878 return ICE_ERR_PARAM;
1880 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1882 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1883 desc.params.sw_rules.num_rules_fltr_entry_index =
1884 CPU_TO_LE16(num_rules);
1885 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1889 * ice_aq_add_recipe - add switch recipe
1890 * @hw: pointer to the HW struct
1891 * @s_recipe_list: pointer to switch rule population list
1892 * @num_recipes: number of switch recipes in the list
1893 * @cd: pointer to command details structure or NULL
1898 ice_aq_add_recipe(struct ice_hw *hw,
1899 struct ice_aqc_recipe_data_elem *s_recipe_list,
1900 u16 num_recipes, struct ice_sq_cd *cd)
1902 struct ice_aqc_add_get_recipe *cmd;
1903 struct ice_aq_desc desc;
1906 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1907 cmd = &desc.params.add_get_recipe;
1908 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1910 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1911 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1913 buf_size = num_recipes * sizeof(*s_recipe_list);
1915 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1919 * ice_aq_get_recipe - get switch recipe
1920 * @hw: pointer to the HW struct
1921 * @s_recipe_list: pointer to switch rule population list
1922 * @num_recipes: pointer to the number of recipes (input and output)
1923 * @recipe_root: root recipe number of recipe(s) to retrieve
1924 * @cd: pointer to command details structure or NULL
1928 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1929 * On output, *num_recipes will equal the number of entries returned in
1932 * The caller must supply enough space in s_recipe_list to hold all possible
1933 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1936 ice_aq_get_recipe(struct ice_hw *hw,
1937 struct ice_aqc_recipe_data_elem *s_recipe_list,
1938 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1940 struct ice_aqc_add_get_recipe *cmd;
1941 struct ice_aq_desc desc;
1942 enum ice_status status;
1945 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1946 return ICE_ERR_PARAM;
1948 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1949 cmd = &desc.params.add_get_recipe;
1950 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1952 cmd->return_index = CPU_TO_LE16(recipe_root);
1953 cmd->num_sub_recipes = 0;
1955 buf_size = *num_recipes * sizeof(*s_recipe_list);
1957 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1958 /* cppcheck-suppress constArgument */
1959 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1965 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1966 * @hw: pointer to the HW struct
1967 * @profile_id: package profile ID to associate the recipe with
1968 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1969 * @cd: pointer to command details structure or NULL
1970 * Recipe to profile association (0x0291)
1973 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1974 struct ice_sq_cd *cd)
1976 struct ice_aqc_recipe_to_profile *cmd;
1977 struct ice_aq_desc desc;
1979 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1980 cmd = &desc.params.recipe_to_profile;
1981 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1982 cmd->profile_id = CPU_TO_LE16(profile_id);
1983 /* Set the recipe ID bit in the bitmask to let the device know which
1984 * profile we are associating the recipe to
1986 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1987 ICE_NONDMA_TO_NONDMA);
1989 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1993 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1994 * @hw: pointer to the HW struct
1995 * @profile_id: package profile ID to associate the recipe with
1996 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1997 * @cd: pointer to command details structure or NULL
1998 * Associate profile ID with given recipe (0x0293)
2001 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2002 struct ice_sq_cd *cd)
2004 struct ice_aqc_recipe_to_profile *cmd;
2005 struct ice_aq_desc desc;
2006 enum ice_status status;
2008 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2009 cmd = &desc.params.recipe_to_profile;
2010 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2011 cmd->profile_id = CPU_TO_LE16(profile_id);
2013 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2015 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2016 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2022 * ice_alloc_recipe - add recipe resource
2023 * @hw: pointer to the hardware structure
2024 * @rid: recipe ID returned as response to AQ call
2026 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2028 struct ice_aqc_alloc_free_res_elem *sw_buf;
2029 enum ice_status status;
2032 buf_len = sizeof(*sw_buf);
2033 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2035 return ICE_ERR_NO_MEMORY;
2037 sw_buf->num_elems = CPU_TO_LE16(1);
2038 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2039 ICE_AQC_RES_TYPE_S) |
2040 ICE_AQC_RES_TYPE_FLAG_SHARED);
2041 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2042 ice_aqc_opc_alloc_res, NULL);
2044 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2045 ice_free(hw, sw_buf);
2050 /* ice_init_port_info - Initialize port_info with switch configuration data
2051 * @pi: pointer to port_info
2052 * @vsi_port_num: VSI number or port number
2053 * @type: Type of switch element (port or VSI)
2054 * @swid: switch ID of the switch the element is attached to
2055 * @pf_vf_num: PF or VF number
2056 * @is_vf: true if the element is a VF, false otherwise
2059 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2060 u16 swid, u16 pf_vf_num, bool is_vf)
2063 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2064 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2066 pi->pf_vf_num = pf_vf_num;
2068 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2069 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2072 ice_debug(pi->hw, ICE_DBG_SW,
2073 "incorrect VSI/port type received\n");
2078 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2079 * @hw: pointer to the hardware structure
2081 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2083 struct ice_aqc_get_sw_cfg_resp *rbuf;
2084 enum ice_status status;
2091 num_total_ports = 1;
2093 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2094 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2097 return ICE_ERR_NO_MEMORY;
2099 /* Multiple calls to ice_aq_get_sw_cfg may be required
2100 * to get all the switch configuration information. The need
2101 * for additional calls is indicated by ice_aq_get_sw_cfg
2102 * writing a non-zero value in req_desc
2105 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2106 &req_desc, &num_elems, NULL);
2111 for (i = 0; i < num_elems; i++) {
2112 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2113 u16 pf_vf_num, swid, vsi_port_num;
2117 ele = rbuf[i].elements;
2118 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2119 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2121 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2122 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2124 swid = LE16_TO_CPU(ele->swid);
2126 if (LE16_TO_CPU(ele->pf_vf_num) &
2127 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2130 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2131 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2134 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2135 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2136 if (j == num_total_ports) {
2137 ice_debug(hw, ICE_DBG_SW,
2138 "more ports than expected\n");
2139 status = ICE_ERR_CFG;
2142 ice_init_port_info(hw->port_info,
2143 vsi_port_num, res_type, swid,
2151 } while (req_desc && !status);
2154 ice_free(hw, (void *)rbuf);
2159 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2160 * @hw: pointer to the hardware structure
2161 * @fi: filter info structure to fill/update
2163 * This helper function populates the lb_en and lan_en elements of the provided
2164 * ice_fltr_info struct using the switch's type and characteristics of the
2165 * switch rule being configured.
2167 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2169 if ((fi->flag & ICE_FLTR_RX) &&
2170 (fi->fltr_act == ICE_FWD_TO_VSI ||
2171 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2172 fi->lkup_type == ICE_SW_LKUP_LAST)
2176 if ((fi->flag & ICE_FLTR_TX) &&
2177 (fi->fltr_act == ICE_FWD_TO_VSI ||
2178 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2179 fi->fltr_act == ICE_FWD_TO_Q ||
2180 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2181 /* Setting LB for prune actions will result in replicated
2182 * packets to the internal switch that will be dropped.
2184 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2187 /* Set lan_en to TRUE if
2188 * 1. The switch is a VEB AND
2190 * 2.1 The lookup is a directional lookup like ethertype,
2191 * promiscuous, ethertype-MAC, promiscuous-VLAN
2192 * and default-port OR
2193 * 2.2 The lookup is VLAN, OR
2194 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2195 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2199 * The switch is a VEPA.
2201 * In all other cases, the LAN enable has to be set to false.
2204 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2205 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2206 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2207 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2208 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2209 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2210 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2211 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2212 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2213 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2222 * ice_fill_sw_rule - Helper function to fill switch rule structure
2223 * @hw: pointer to the hardware structure
2224 * @f_info: entry containing packet forwarding information
2225 * @s_rule: switch rule structure to be filled in based on mac_entry
2226 * @opc: switch rules population command type - pass in the command opcode
2229 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2230 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2232 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2240 if (opc == ice_aqc_opc_remove_sw_rules) {
2241 s_rule->pdata.lkup_tx_rx.act = 0;
2242 s_rule->pdata.lkup_tx_rx.index =
2243 CPU_TO_LE16(f_info->fltr_rule_id);
2244 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2248 eth_hdr_sz = sizeof(dummy_eth_header);
2249 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2251 /* initialize the ether header with a dummy header */
2252 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2253 ice_fill_sw_info(hw, f_info);
2255 switch (f_info->fltr_act) {
2256 case ICE_FWD_TO_VSI:
2257 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2258 ICE_SINGLE_ACT_VSI_ID_M;
2259 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2260 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2261 ICE_SINGLE_ACT_VALID_BIT;
2263 case ICE_FWD_TO_VSI_LIST:
2264 act |= ICE_SINGLE_ACT_VSI_LIST;
2265 act |= (f_info->fwd_id.vsi_list_id <<
2266 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2267 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2268 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2269 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2270 ICE_SINGLE_ACT_VALID_BIT;
2273 act |= ICE_SINGLE_ACT_TO_Q;
2274 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2275 ICE_SINGLE_ACT_Q_INDEX_M;
2277 case ICE_DROP_PACKET:
2278 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2279 ICE_SINGLE_ACT_VALID_BIT;
2281 case ICE_FWD_TO_QGRP:
2282 q_rgn = f_info->qgrp_size > 0 ?
2283 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2284 act |= ICE_SINGLE_ACT_TO_Q;
2285 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2286 ICE_SINGLE_ACT_Q_INDEX_M;
2287 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2288 ICE_SINGLE_ACT_Q_REGION_M;
2295 act |= ICE_SINGLE_ACT_LB_ENABLE;
2297 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2299 switch (f_info->lkup_type) {
2300 case ICE_SW_LKUP_MAC:
2301 daddr = f_info->l_data.mac.mac_addr;
2303 case ICE_SW_LKUP_VLAN:
2304 vlan_id = f_info->l_data.vlan.vlan_id;
2305 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2306 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2307 act |= ICE_SINGLE_ACT_PRUNE;
2308 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2311 case ICE_SW_LKUP_ETHERTYPE_MAC:
2312 daddr = f_info->l_data.ethertype_mac.mac_addr;
2314 case ICE_SW_LKUP_ETHERTYPE:
2315 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2316 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2318 case ICE_SW_LKUP_MAC_VLAN:
2319 daddr = f_info->l_data.mac_vlan.mac_addr;
2320 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2322 case ICE_SW_LKUP_PROMISC_VLAN:
2323 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2325 case ICE_SW_LKUP_PROMISC:
2326 daddr = f_info->l_data.mac_vlan.mac_addr;
2332 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2333 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2334 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2336 /* Recipe set depending on lookup type */
2337 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2338 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2339 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2342 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2343 ICE_NONDMA_TO_NONDMA);
2345 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2346 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2347 *off = CPU_TO_BE16(vlan_id);
2350 /* Create the switch rule with the final dummy Ethernet header */
2351 if (opc != ice_aqc_opc_update_sw_rules)
2352 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2356 * ice_add_marker_act
2357 * @hw: pointer to the hardware structure
2358 * @m_ent: the management entry for which sw marker needs to be added
2359 * @sw_marker: sw marker to tag the Rx descriptor with
2360 * @l_id: large action resource ID
2362 * Create a large action to hold software marker and update the switch rule
2363 * entry pointed by m_ent with newly created large action
2365 static enum ice_status
2366 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2367 u16 sw_marker, u16 l_id)
2369 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2370 /* For software marker we need 3 large actions
2371 * 1. FWD action: FWD TO VSI or VSI LIST
2372 * 2. GENERIC VALUE action to hold the profile ID
2373 * 3. GENERIC VALUE action to hold the software marker ID
2375 const u16 num_lg_acts = 3;
2376 enum ice_status status;
2382 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2383 return ICE_ERR_PARAM;
2385 /* Create two back-to-back switch rules and submit them to the HW using
2386 * one memory buffer:
2390 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2391 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2392 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2394 return ICE_ERR_NO_MEMORY;
2396 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2398 /* Fill in the first switch rule i.e. large action */
2399 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2400 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2401 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2403 /* First action VSI forwarding or VSI list forwarding depending on how
2406 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2407 m_ent->fltr_info.fwd_id.hw_vsi_id;
2409 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2410 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2411 ICE_LG_ACT_VSI_LIST_ID_M;
2412 if (m_ent->vsi_count > 1)
2413 act |= ICE_LG_ACT_VSI_LIST;
2414 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2416 /* Second action descriptor type */
2417 act = ICE_LG_ACT_GENERIC;
2419 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2420 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2422 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2423 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2425 /* Third action Marker value */
2426 act |= ICE_LG_ACT_GENERIC;
2427 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2428 ICE_LG_ACT_GENERIC_VALUE_M;
2430 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2432 /* call the fill switch rule to fill the lookup Tx Rx structure */
2433 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2434 ice_aqc_opc_update_sw_rules);
2436 /* Update the action to point to the large action ID */
2437 rx_tx->pdata.lkup_tx_rx.act =
2438 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2439 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2440 ICE_SINGLE_ACT_PTR_VAL_M));
2442 /* Use the filter rule ID of the previously created rule with single
2443 * act. Once the update happens, hardware will treat this as large
2446 rx_tx->pdata.lkup_tx_rx.index =
2447 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2449 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2450 ice_aqc_opc_update_sw_rules, NULL);
2452 m_ent->lg_act_idx = l_id;
2453 m_ent->sw_marker_id = sw_marker;
2456 ice_free(hw, lg_act);
2461 * ice_add_counter_act - add/update filter rule with counter action
2462 * @hw: pointer to the hardware structure
2463 * @m_ent: the management entry for which counter needs to be added
2464 * @counter_id: VLAN counter ID returned as part of allocate resource
2465 * @l_id: large action resource ID
2467 static enum ice_status
2468 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2469 u16 counter_id, u16 l_id)
2471 struct ice_aqc_sw_rules_elem *lg_act;
2472 struct ice_aqc_sw_rules_elem *rx_tx;
2473 enum ice_status status;
2474 /* 2 actions will be added while adding a large action counter */
2475 const int num_acts = 2;
2482 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2483 return ICE_ERR_PARAM;
2485 /* Create two back-to-back switch rules and submit them to the HW using
2486 * one memory buffer:
2490 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2491 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2492 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2495 return ICE_ERR_NO_MEMORY;
2497 rx_tx = (struct ice_aqc_sw_rules_elem *)
2498 ((u8 *)lg_act + lg_act_size);
2500 /* Fill in the first switch rule i.e. large action */
2501 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2502 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2503 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2505 /* First action VSI forwarding or VSI list forwarding depending on how
2508 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2509 m_ent->fltr_info.fwd_id.hw_vsi_id;
2511 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2512 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2513 ICE_LG_ACT_VSI_LIST_ID_M;
2514 if (m_ent->vsi_count > 1)
2515 act |= ICE_LG_ACT_VSI_LIST;
2516 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2518 /* Second action counter ID */
2519 act = ICE_LG_ACT_STAT_COUNT;
2520 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2521 ICE_LG_ACT_STAT_COUNT_M;
2522 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2524 /* call the fill switch rule to fill the lookup Tx Rx structure */
2525 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2526 ice_aqc_opc_update_sw_rules);
2528 act = ICE_SINGLE_ACT_PTR;
2529 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2530 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2532 /* Use the filter rule ID of the previously created rule with single
2533 * act. Once the update happens, hardware will treat this as large
2536 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2537 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2539 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2540 ice_aqc_opc_update_sw_rules, NULL);
2542 m_ent->lg_act_idx = l_id;
2543 m_ent->counter_index = counter_id;
2546 ice_free(hw, lg_act);
2551 * ice_create_vsi_list_map
2552 * @hw: pointer to the hardware structure
2553 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2554 * @num_vsi: number of VSI handles in the array
2555 * @vsi_list_id: VSI list ID generated as part of allocate resource
2557 * Helper function to create a new entry of VSI list ID to VSI mapping
2558 * using the given VSI list ID
2560 static struct ice_vsi_list_map_info *
2561 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2564 struct ice_switch_info *sw = hw->switch_info;
2565 struct ice_vsi_list_map_info *v_map;
2568 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2573 v_map->vsi_list_id = vsi_list_id;
2575 for (i = 0; i < num_vsi; i++)
2576 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2578 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2583 * ice_update_vsi_list_rule
2584 * @hw: pointer to the hardware structure
2585 * @vsi_handle_arr: array of VSI handles to form a VSI list
2586 * @num_vsi: number of VSI handles in the array
2587 * @vsi_list_id: VSI list ID generated as part of allocate resource
2588 * @remove: Boolean value to indicate if this is a remove action
2589 * @opc: switch rules population command type - pass in the command opcode
2590 * @lkup_type: lookup type of the filter
2592 * Call AQ command to add a new switch rule or update existing switch rule
2593 * using the given VSI list ID
2595 static enum ice_status
2596 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2597 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2598 enum ice_sw_lkup_type lkup_type)
2600 struct ice_aqc_sw_rules_elem *s_rule;
2601 enum ice_status status;
2607 return ICE_ERR_PARAM;
2609 if (lkup_type == ICE_SW_LKUP_MAC ||
2610 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2611 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2612 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2613 lkup_type == ICE_SW_LKUP_PROMISC ||
2614 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2615 lkup_type == ICE_SW_LKUP_LAST)
2616 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2617 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2618 else if (lkup_type == ICE_SW_LKUP_VLAN)
2619 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2620 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2622 return ICE_ERR_PARAM;
2624 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2625 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2627 return ICE_ERR_NO_MEMORY;
2628 for (i = 0; i < num_vsi; i++) {
2629 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2630 status = ICE_ERR_PARAM;
2633 /* AQ call requires hw_vsi_id(s) */
2634 s_rule->pdata.vsi_list.vsi[i] =
2635 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2638 s_rule->type = CPU_TO_LE16(rule_type);
2639 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2640 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2642 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2645 ice_free(hw, s_rule);
2650 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2651 * @hw: pointer to the HW struct
2652 * @vsi_handle_arr: array of VSI handles to form a VSI list
2653 * @num_vsi: number of VSI handles in the array
2654 * @vsi_list_id: stores the ID of the VSI list to be created
2655 * @lkup_type: switch rule filter's lookup type
2657 static enum ice_status
2658 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2659 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2661 enum ice_status status;
2663 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2664 ice_aqc_opc_alloc_res);
2668 /* Update the newly created VSI list to include the specified VSIs */
2669 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2670 *vsi_list_id, false,
2671 ice_aqc_opc_add_sw_rules, lkup_type);
2675 * ice_create_pkt_fwd_rule
2676 * @hw: pointer to the hardware structure
2677 * @recp_list: corresponding filter management list
2678 * @f_entry: entry containing packet forwarding information
2680 * Create switch rule with given filter information and add an entry
2681 * to the corresponding filter management list to track this switch rule
2684 static enum ice_status
2685 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2686 struct ice_fltr_list_entry *f_entry)
2688 struct ice_fltr_mgmt_list_entry *fm_entry;
2689 struct ice_aqc_sw_rules_elem *s_rule;
2690 enum ice_status status;
2692 s_rule = (struct ice_aqc_sw_rules_elem *)
2693 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2695 return ICE_ERR_NO_MEMORY;
2696 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2697 ice_malloc(hw, sizeof(*fm_entry));
2699 status = ICE_ERR_NO_MEMORY;
2700 goto ice_create_pkt_fwd_rule_exit;
2703 fm_entry->fltr_info = f_entry->fltr_info;
2705 /* Initialize all the fields for the management entry */
2706 fm_entry->vsi_count = 1;
2707 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2708 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2709 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2711 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2712 ice_aqc_opc_add_sw_rules);
2714 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2715 ice_aqc_opc_add_sw_rules, NULL);
2717 ice_free(hw, fm_entry);
2718 goto ice_create_pkt_fwd_rule_exit;
2721 f_entry->fltr_info.fltr_rule_id =
2722 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2723 fm_entry->fltr_info.fltr_rule_id =
2724 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2726 /* The book keeping entries will get removed when base driver
2727 * calls remove filter AQ command
2729 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2731 ice_create_pkt_fwd_rule_exit:
2732 ice_free(hw, s_rule);
2737 * ice_update_pkt_fwd_rule
2738 * @hw: pointer to the hardware structure
2739 * @f_info: filter information for switch rule
2741 * Call AQ command to update a previously created switch rule with a
2744 static enum ice_status
2745 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2747 struct ice_aqc_sw_rules_elem *s_rule;
2748 enum ice_status status;
2750 s_rule = (struct ice_aqc_sw_rules_elem *)
2751 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2753 return ICE_ERR_NO_MEMORY;
2755 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2757 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2759 /* Update switch rule with new rule set to forward VSI list */
2760 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2761 ice_aqc_opc_update_sw_rules, NULL);
2763 ice_free(hw, s_rule);
2768 * ice_update_sw_rule_bridge_mode
2769 * @hw: pointer to the HW struct
2771 * Updates unicast switch filter rules based on VEB/VEPA mode
2773 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2775 struct ice_switch_info *sw = hw->switch_info;
2776 struct ice_fltr_mgmt_list_entry *fm_entry;
2777 enum ice_status status = ICE_SUCCESS;
2778 struct LIST_HEAD_TYPE *rule_head;
2779 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2781 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2782 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2784 ice_acquire_lock(rule_lock);
2785 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2787 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2788 u8 *addr = fi->l_data.mac.mac_addr;
2790 /* Update unicast Tx rules to reflect the selected
2793 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2794 (fi->fltr_act == ICE_FWD_TO_VSI ||
2795 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2796 fi->fltr_act == ICE_FWD_TO_Q ||
2797 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2798 status = ice_update_pkt_fwd_rule(hw, fi);
2804 ice_release_lock(rule_lock);
2810 * ice_add_update_vsi_list
2811 * @hw: pointer to the hardware structure
2812 * @m_entry: pointer to current filter management list entry
2813 * @cur_fltr: filter information from the book keeping entry
2814 * @new_fltr: filter information with the new VSI to be added
2816 * Call AQ command to add or update previously created VSI list with new VSI.
2818 * Helper function to do book keeping associated with adding filter information
2819 * The algorithm to do the book keeping is described below :
2820 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2821 * if only one VSI has been added till now
2822 * Allocate a new VSI list and add two VSIs
2823 * to this list using switch rule command
2824 * Update the previously created switch rule with the
2825 * newly created VSI list ID
2826 * if a VSI list was previously created
2827 * Add the new VSI to the previously created VSI list set
2828 * using the update switch rule command
2830 static enum ice_status
2831 ice_add_update_vsi_list(struct ice_hw *hw,
2832 struct ice_fltr_mgmt_list_entry *m_entry,
2833 struct ice_fltr_info *cur_fltr,
2834 struct ice_fltr_info *new_fltr)
2836 enum ice_status status = ICE_SUCCESS;
2837 u16 vsi_list_id = 0;
2839 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2840 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2841 return ICE_ERR_NOT_IMPL;
2843 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2844 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2845 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2846 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2847 return ICE_ERR_NOT_IMPL;
2849 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2850 /* Only one entry existed in the mapping and it was not already
2851 * a part of a VSI list. So, create a VSI list with the old and
2854 struct ice_fltr_info tmp_fltr;
2855 u16 vsi_handle_arr[2];
2857 /* A rule already exists with the new VSI being added */
2858 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2859 return ICE_ERR_ALREADY_EXISTS;
2861 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2862 vsi_handle_arr[1] = new_fltr->vsi_handle;
2863 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2865 new_fltr->lkup_type);
2869 tmp_fltr = *new_fltr;
2870 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2871 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2872 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2873 /* Update the previous switch rule of "MAC forward to VSI" to
2874 * "MAC fwd to VSI list"
2876 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2880 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2881 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2882 m_entry->vsi_list_info =
2883 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2886 /* If this entry was large action then the large action needs
2887 * to be updated to point to FWD to VSI list
2889 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2891 ice_add_marker_act(hw, m_entry,
2892 m_entry->sw_marker_id,
2893 m_entry->lg_act_idx);
2895 u16 vsi_handle = new_fltr->vsi_handle;
2896 enum ice_adminq_opc opcode;
2898 if (!m_entry->vsi_list_info)
2901 /* A rule already exists with the new VSI being added */
2902 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2905 /* Update the previously created VSI list set with
2906 * the new VSI ID passed in
2908 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2909 opcode = ice_aqc_opc_update_sw_rules;
2911 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2912 vsi_list_id, false, opcode,
2913 new_fltr->lkup_type);
2914 /* update VSI list mapping info with new VSI ID */
2916 ice_set_bit(vsi_handle,
2917 m_entry->vsi_list_info->vsi_map);
2920 m_entry->vsi_count++;
2925 * ice_find_rule_entry - Search a rule entry
2926 * @list_head: head of rule list
2927 * @f_info: rule information
2929 * Helper function to search for a given rule entry
2930 * Returns pointer to entry storing the rule if found
2932 static struct ice_fltr_mgmt_list_entry *
2933 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2934 struct ice_fltr_info *f_info)
2936 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2938 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2940 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2941 sizeof(f_info->l_data)) &&
2942 f_info->flag == list_itr->fltr_info.flag) {
2951 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2952 * @recp_list: VSI lists needs to be searched
2953 * @vsi_handle: VSI handle to be found in VSI list
2954 * @vsi_list_id: VSI list ID found containing vsi_handle
2956 * Helper function to search a VSI list with single entry containing given VSI
2957 * handle element. This can be extended further to search VSI list with more
2958 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2960 static struct ice_vsi_list_map_info *
2961 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2964 struct ice_vsi_list_map_info *map_info = NULL;
2965 struct LIST_HEAD_TYPE *list_head;
2967 list_head = &recp_list->filt_rules;
2968 if (recp_list->adv_rule) {
2969 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2971 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2972 ice_adv_fltr_mgmt_list_entry,
2974 if (list_itr->vsi_list_info) {
2975 map_info = list_itr->vsi_list_info;
2976 if (ice_is_bit_set(map_info->vsi_map,
2978 *vsi_list_id = map_info->vsi_list_id;
2984 struct ice_fltr_mgmt_list_entry *list_itr;
2986 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2987 ice_fltr_mgmt_list_entry,
2989 if (list_itr->vsi_count == 1 &&
2990 list_itr->vsi_list_info) {
2991 map_info = list_itr->vsi_list_info;
2992 if (ice_is_bit_set(map_info->vsi_map,
2994 *vsi_list_id = map_info->vsi_list_id;
3004 * ice_add_rule_internal - add rule for a given lookup type
3005 * @hw: pointer to the hardware structure
3006 * @recp_list: recipe list for which rule has to be added
3007 * @lport: logic port number on which function add rule
3008 * @f_entry: structure containing MAC forwarding information
3010 * Adds or updates the rule lists for a given recipe
3012 static enum ice_status
3013 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3014 u8 lport, struct ice_fltr_list_entry *f_entry)
3016 struct ice_fltr_info *new_fltr, *cur_fltr;
3017 struct ice_fltr_mgmt_list_entry *m_entry;
3018 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3019 enum ice_status status = ICE_SUCCESS;
3021 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3022 return ICE_ERR_PARAM;
3024 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3025 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3026 f_entry->fltr_info.fwd_id.hw_vsi_id =
3027 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3029 rule_lock = &recp_list->filt_rule_lock;
3031 ice_acquire_lock(rule_lock);
3032 new_fltr = &f_entry->fltr_info;
3033 if (new_fltr->flag & ICE_FLTR_RX)
3034 new_fltr->src = lport;
3035 else if (new_fltr->flag & ICE_FLTR_TX)
3037 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3039 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3041 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3042 goto exit_add_rule_internal;
3045 cur_fltr = &m_entry->fltr_info;
3046 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3048 exit_add_rule_internal:
3049 ice_release_lock(rule_lock);
3054 * ice_remove_vsi_list_rule
3055 * @hw: pointer to the hardware structure
3056 * @vsi_list_id: VSI list ID generated as part of allocate resource
3057 * @lkup_type: switch rule filter lookup type
3059 * The VSI list should be emptied before this function is called to remove the
3062 static enum ice_status
3063 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3064 enum ice_sw_lkup_type lkup_type)
3066 struct ice_aqc_sw_rules_elem *s_rule;
3067 enum ice_status status;
3070 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3071 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3073 return ICE_ERR_NO_MEMORY;
3075 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3076 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3078 /* Free the vsi_list resource that we allocated. It is assumed that the
3079 * list is empty at this point.
3081 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3082 ice_aqc_opc_free_res);
3084 ice_free(hw, s_rule);
3089 * ice_rem_update_vsi_list
3090 * @hw: pointer to the hardware structure
3091 * @vsi_handle: VSI handle of the VSI to remove
3092 * @fm_list: filter management entry for which the VSI list management needs to
3095 static enum ice_status
3096 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3097 struct ice_fltr_mgmt_list_entry *fm_list)
3099 enum ice_sw_lkup_type lkup_type;
3100 enum ice_status status = ICE_SUCCESS;
3103 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3104 fm_list->vsi_count == 0)
3105 return ICE_ERR_PARAM;
3107 /* A rule with the VSI being removed does not exist */
3108 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3109 return ICE_ERR_DOES_NOT_EXIST;
3111 lkup_type = fm_list->fltr_info.lkup_type;
3112 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3113 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3114 ice_aqc_opc_update_sw_rules,
3119 fm_list->vsi_count--;
3120 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3122 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3123 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3124 struct ice_vsi_list_map_info *vsi_list_info =
3125 fm_list->vsi_list_info;
3128 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3130 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3131 return ICE_ERR_OUT_OF_RANGE;
3133 /* Make sure VSI list is empty before removing it below */
3134 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3136 ice_aqc_opc_update_sw_rules,
3141 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3142 tmp_fltr_info.fwd_id.hw_vsi_id =
3143 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3144 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3145 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3147 ice_debug(hw, ICE_DBG_SW,
3148 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3149 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3153 fm_list->fltr_info = tmp_fltr_info;
3156 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3157 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3158 struct ice_vsi_list_map_info *vsi_list_info =
3159 fm_list->vsi_list_info;
3161 /* Remove the VSI list since it is no longer used */
3162 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3164 ice_debug(hw, ICE_DBG_SW,
3165 "Failed to remove VSI list %d, error %d\n",
3166 vsi_list_id, status);
3170 LIST_DEL(&vsi_list_info->list_entry);
3171 ice_free(hw, vsi_list_info);
3172 fm_list->vsi_list_info = NULL;
3179 * ice_remove_rule_internal - Remove a filter rule of a given type
3181 * @hw: pointer to the hardware structure
3182 * @recp_list: recipe list for which the rule needs to removed
3183 * @f_entry: rule entry containing filter information
3185 static enum ice_status
3186 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3187 struct ice_fltr_list_entry *f_entry)
3189 struct ice_fltr_mgmt_list_entry *list_elem;
3190 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3191 enum ice_status status = ICE_SUCCESS;
3192 bool remove_rule = false;
3195 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3196 return ICE_ERR_PARAM;
3197 f_entry->fltr_info.fwd_id.hw_vsi_id =
3198 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3200 rule_lock = &recp_list->filt_rule_lock;
3201 ice_acquire_lock(rule_lock);
3202 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3203 &f_entry->fltr_info);
3205 status = ICE_ERR_DOES_NOT_EXIST;
3209 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3211 } else if (!list_elem->vsi_list_info) {
3212 status = ICE_ERR_DOES_NOT_EXIST;
3214 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3215 /* a ref_cnt > 1 indicates that the vsi_list is being
3216 * shared by multiple rules. Decrement the ref_cnt and
3217 * remove this rule, but do not modify the list, as it
3218 * is in-use by other rules.
3220 list_elem->vsi_list_info->ref_cnt--;
3223 /* a ref_cnt of 1 indicates the vsi_list is only used
3224 * by one rule. However, the original removal request is only
3225 * for a single VSI. Update the vsi_list first, and only
3226 * remove the rule if there are no further VSIs in this list.
3228 vsi_handle = f_entry->fltr_info.vsi_handle;
3229 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3232 /* if VSI count goes to zero after updating the VSI list */
3233 if (list_elem->vsi_count == 0)
3238 /* Remove the lookup rule */
3239 struct ice_aqc_sw_rules_elem *s_rule;
3241 s_rule = (struct ice_aqc_sw_rules_elem *)
3242 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3244 status = ICE_ERR_NO_MEMORY;
3248 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3249 ice_aqc_opc_remove_sw_rules);
3251 status = ice_aq_sw_rules(hw, s_rule,
3252 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3253 ice_aqc_opc_remove_sw_rules, NULL);
3255 /* Remove a book keeping from the list */
3256 ice_free(hw, s_rule);
3261 LIST_DEL(&list_elem->list_entry);
3262 ice_free(hw, list_elem);
3265 ice_release_lock(rule_lock);
3270 * ice_aq_get_res_alloc - get allocated resources
3271 * @hw: pointer to the HW struct
3272 * @num_entries: pointer to u16 to store the number of resource entries returned
3273 * @buf: pointer to user-supplied buffer
3274 * @buf_size: size of buff
3275 * @cd: pointer to command details structure or NULL
3277 * The user-supplied buffer must be large enough to store the resource
3278 * information for all resource types. Each resource type is an
3279 * ice_aqc_get_res_resp_data_elem structure.
3282 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3283 u16 buf_size, struct ice_sq_cd *cd)
3285 struct ice_aqc_get_res_alloc *resp;
3286 enum ice_status status;
3287 struct ice_aq_desc desc;
3290 return ICE_ERR_BAD_PTR;
3292 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3293 return ICE_ERR_INVAL_SIZE;
3295 resp = &desc.params.get_res;
3297 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3298 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3300 if (!status && num_entries)
3301 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3307 * ice_aq_get_res_descs - get allocated resource descriptors
3308 * @hw: pointer to the hardware structure
3309 * @num_entries: number of resource entries in buffer
3310 * @buf: Indirect buffer to hold data parameters and response
3311 * @buf_size: size of buffer for indirect commands
3312 * @res_type: resource type
3313 * @res_shared: is resource shared
3314 * @desc_id: input - first desc ID to start; output - next desc ID
3315 * @cd: pointer to command details structure or NULL
3318 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3319 struct ice_aqc_get_allocd_res_desc_resp *buf,
3320 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3321 struct ice_sq_cd *cd)
3323 struct ice_aqc_get_allocd_res_desc *cmd;
3324 struct ice_aq_desc desc;
3325 enum ice_status status;
3327 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3329 cmd = &desc.params.get_res_desc;
3332 return ICE_ERR_PARAM;
3334 if (buf_size != (num_entries * sizeof(*buf)))
3335 return ICE_ERR_PARAM;
3337 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3339 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3340 ICE_AQC_RES_TYPE_M) | (res_shared ?
3341 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3342 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3344 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3346 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3352 * ice_add_mac_rule - Add a MAC address based filter rule
3353 * @hw: pointer to the hardware structure
3354 * @m_list: list of MAC addresses and forwarding information
3355 * @sw: pointer to switch info struct for which function add rule
3356 * @lport: logic port number on which function add rule
3358 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3359 * multiple unicast addresses, the function assumes that all the
3360 * addresses are unique in a given add_mac call. It doesn't
3361 * check for duplicates in this case, removing duplicates from a given
3362 * list should be taken care of in the caller of this function.
3364 static enum ice_status
3365 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3366 struct ice_switch_info *sw, u8 lport)
3368 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3369 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3370 struct ice_fltr_list_entry *m_list_itr;
3371 struct LIST_HEAD_TYPE *rule_head;
3372 u16 total_elem_left, s_rule_size;
3373 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3374 enum ice_status status = ICE_SUCCESS;
3375 u16 num_unicast = 0;
3379 rule_lock = &recp_list->filt_rule_lock;
3380 rule_head = &recp_list->filt_rules;
3382 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3384 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3388 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3389 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3390 if (!ice_is_vsi_valid(hw, vsi_handle))
3391 return ICE_ERR_PARAM;
3392 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3393 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3394 /* update the src in case it is VSI num */
3395 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3396 return ICE_ERR_PARAM;
3397 m_list_itr->fltr_info.src = hw_vsi_id;
3398 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3399 IS_ZERO_ETHER_ADDR(add))
3400 return ICE_ERR_PARAM;
3401 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3402 /* Don't overwrite the unicast address */
3403 ice_acquire_lock(rule_lock);
3404 if (ice_find_rule_entry(rule_head,
3405 &m_list_itr->fltr_info)) {
3406 ice_release_lock(rule_lock);
3407 return ICE_ERR_ALREADY_EXISTS;
3409 ice_release_lock(rule_lock);
3411 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3412 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3413 m_list_itr->status =
3414 ice_add_rule_internal(hw, recp_list, lport,
3416 if (m_list_itr->status)
3417 return m_list_itr->status;
3421 ice_acquire_lock(rule_lock);
3422 /* Exit if no suitable entries were found for adding bulk switch rule */
3424 status = ICE_SUCCESS;
3425 goto ice_add_mac_exit;
3428 /* Allocate switch rule buffer for the bulk update for unicast */
3429 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3430 s_rule = (struct ice_aqc_sw_rules_elem *)
3431 ice_calloc(hw, num_unicast, s_rule_size);
3433 status = ICE_ERR_NO_MEMORY;
3434 goto ice_add_mac_exit;
3438 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3440 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3441 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3443 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3444 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3445 ice_aqc_opc_add_sw_rules);
3446 r_iter = (struct ice_aqc_sw_rules_elem *)
3447 ((u8 *)r_iter + s_rule_size);
3451 /* Call AQ bulk switch rule update for all unicast addresses */
3453 /* Call AQ switch rule in AQ_MAX chunk */
3454 for (total_elem_left = num_unicast; total_elem_left > 0;
3455 total_elem_left -= elem_sent) {
3456 struct ice_aqc_sw_rules_elem *entry = r_iter;
3458 elem_sent = MIN_T(u8, total_elem_left,
3459 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3460 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3461 elem_sent, ice_aqc_opc_add_sw_rules,
3464 goto ice_add_mac_exit;
3465 r_iter = (struct ice_aqc_sw_rules_elem *)
3466 ((u8 *)r_iter + (elem_sent * s_rule_size));
3469 /* Fill up rule ID based on the value returned from FW */
3471 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3473 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3474 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3475 struct ice_fltr_mgmt_list_entry *fm_entry;
3477 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3478 f_info->fltr_rule_id =
3479 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3480 f_info->fltr_act = ICE_FWD_TO_VSI;
3481 /* Create an entry to track this MAC address */
3482 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3483 ice_malloc(hw, sizeof(*fm_entry));
3485 status = ICE_ERR_NO_MEMORY;
3486 goto ice_add_mac_exit;
3488 fm_entry->fltr_info = *f_info;
3489 fm_entry->vsi_count = 1;
3490 /* The book keeping entries will get removed when
3491 * base driver calls remove filter AQ command
3494 LIST_ADD(&fm_entry->list_entry, rule_head);
3495 r_iter = (struct ice_aqc_sw_rules_elem *)
3496 ((u8 *)r_iter + s_rule_size);
3501 ice_release_lock(rule_lock);
3503 ice_free(hw, s_rule);
3508 * ice_add_mac - Add a MAC address based filter rule
3509 * @hw: pointer to the hardware structure
3510 * @m_list: list of MAC addresses and forwarding information
3512 * Function add MAC rule for logical port from HW struct
3515 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3518 return ICE_ERR_PARAM;
3520 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3521 hw->port_info->lport);
3525 * ice_add_vlan_internal - Add one VLAN based filter rule
3526 * @hw: pointer to the hardware structure
3527 * @recp_list: recipe list for which rule has to be added
3528 * @f_entry: filter entry containing one VLAN information
3530 static enum ice_status
3531 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3532 struct ice_fltr_list_entry *f_entry)
3534 struct ice_fltr_mgmt_list_entry *v_list_itr;
3535 struct ice_fltr_info *new_fltr, *cur_fltr;
3536 enum ice_sw_lkup_type lkup_type;
3537 u16 vsi_list_id = 0, vsi_handle;
3538 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3539 enum ice_status status = ICE_SUCCESS;
3541 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3542 return ICE_ERR_PARAM;
3544 f_entry->fltr_info.fwd_id.hw_vsi_id =
3545 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3546 new_fltr = &f_entry->fltr_info;
3548 /* VLAN ID should only be 12 bits */
3549 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3550 return ICE_ERR_PARAM;
3552 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3553 return ICE_ERR_PARAM;
3555 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3556 lkup_type = new_fltr->lkup_type;
3557 vsi_handle = new_fltr->vsi_handle;
3558 rule_lock = &recp_list->filt_rule_lock;
3559 ice_acquire_lock(rule_lock);
3560 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3562 struct ice_vsi_list_map_info *map_info = NULL;
3564 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3565 /* All VLAN pruning rules use a VSI list. Check if
3566 * there is already a VSI list containing VSI that we
3567 * want to add. If found, use the same vsi_list_id for
3568 * this new VLAN rule or else create a new list.
3570 map_info = ice_find_vsi_list_entry(recp_list,
3574 status = ice_create_vsi_list_rule(hw,
3582 /* Convert the action to forwarding to a VSI list. */
3583 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3584 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3587 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3589 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3592 status = ICE_ERR_DOES_NOT_EXIST;
3595 /* reuse VSI list for new rule and increment ref_cnt */
3597 v_list_itr->vsi_list_info = map_info;
3598 map_info->ref_cnt++;
3600 v_list_itr->vsi_list_info =
3601 ice_create_vsi_list_map(hw, &vsi_handle,
3605 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3606 /* Update existing VSI list to add new VSI ID only if it used
3609 cur_fltr = &v_list_itr->fltr_info;
3610 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3613 /* If VLAN rule exists and VSI list being used by this rule is
3614 * referenced by more than 1 VLAN rule. Then create a new VSI
3615 * list appending previous VSI with new VSI and update existing
3616 * VLAN rule to point to new VSI list ID
3618 struct ice_fltr_info tmp_fltr;
3619 u16 vsi_handle_arr[2];
3622 /* Current implementation only supports reusing VSI list with
3623 * one VSI count. We should never hit below condition
3625 if (v_list_itr->vsi_count > 1 &&
3626 v_list_itr->vsi_list_info->ref_cnt > 1) {
3627 ice_debug(hw, ICE_DBG_SW,
3628 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3629 status = ICE_ERR_CFG;
3634 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3637 /* A rule already exists with the new VSI being added */
3638 if (cur_handle == vsi_handle) {
3639 status = ICE_ERR_ALREADY_EXISTS;
3643 vsi_handle_arr[0] = cur_handle;
3644 vsi_handle_arr[1] = vsi_handle;
3645 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3646 &vsi_list_id, lkup_type);
3650 tmp_fltr = v_list_itr->fltr_info;
3651 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3652 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3653 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3654 /* Update the previous switch rule to a new VSI list which
3655 * includes current VSI that is requested
3657 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3661 /* before overriding VSI list map info. decrement ref_cnt of
3664 v_list_itr->vsi_list_info->ref_cnt--;
3666 /* now update to newly created list */
3667 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3668 v_list_itr->vsi_list_info =
3669 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3671 v_list_itr->vsi_count++;
3675 ice_release_lock(rule_lock);
3680 * ice_add_vlan_rule - Add VLAN based filter rule
3681 * @hw: pointer to the hardware structure
3682 * @v_list: list of VLAN entries and forwarding information
3683 * @sw: pointer to switch info struct for which function add rule
3685 static enum ice_status
3686 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3687 struct ice_switch_info *sw)
3689 struct ice_fltr_list_entry *v_list_itr;
3690 struct ice_sw_recipe *recp_list;
3692 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3693 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3695 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3696 return ICE_ERR_PARAM;
3697 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3698 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3700 if (v_list_itr->status)
3701 return v_list_itr->status;
3707 * ice_add_vlan - Add a VLAN based filter rule
3708 * @hw: pointer to the hardware structure
3709 * @v_list: list of VLAN and forwarding information
3711 * Function add VLAN rule for logical port from HW struct
3714 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3717 return ICE_ERR_PARAM;
3719 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3723 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3724 * @hw: pointer to the hardware structure
3725 * @mv_list: list of MAC and VLAN filters
3726 * @sw: pointer to switch info struct for which function add rule
3727 * @lport: logic port number on which function add rule
3729 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3730 * pruning bits enabled, then it is the responsibility of the caller to make
3731 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3732 * VLAN won't be received on that VSI otherwise.
3734 static enum ice_status
3735 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3736 struct ice_switch_info *sw, u8 lport)
3738 struct ice_fltr_list_entry *mv_list_itr;
3739 struct ice_sw_recipe *recp_list;
3741 if (!mv_list || !hw)
3742 return ICE_ERR_PARAM;
3744 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3745 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3747 enum ice_sw_lkup_type l_type =
3748 mv_list_itr->fltr_info.lkup_type;
3750 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3751 return ICE_ERR_PARAM;
3752 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3753 mv_list_itr->status =
3754 ice_add_rule_internal(hw, recp_list, lport,
3756 if (mv_list_itr->status)
3757 return mv_list_itr->status;
3763 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3764 * @hw: pointer to the hardware structure
3765 * @mv_list: list of MAC VLAN addresses and forwarding information
3767 * Function add MAC VLAN rule for logical port from HW struct
3770 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3772 if (!mv_list || !hw)
3773 return ICE_ERR_PARAM;
3775 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3776 hw->port_info->lport);
3780 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3781 * @hw: pointer to the hardware structure
3782 * @em_list: list of ether type MAC filter, MAC is optional
3783 * @sw: pointer to switch info struct for which function add rule
3784 * @lport: logic port number on which function add rule
3786 * This function requires the caller to populate the entries in
3787 * the filter list with the necessary fields (including flags to
3788 * indicate Tx or Rx rules).
3790 static enum ice_status
3791 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3792 struct ice_switch_info *sw, u8 lport)
3794 struct ice_fltr_list_entry *em_list_itr;
3796 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3798 struct ice_sw_recipe *recp_list;
3799 enum ice_sw_lkup_type l_type;
3801 l_type = em_list_itr->fltr_info.lkup_type;
3802 recp_list = &sw->recp_list[l_type];
3804 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3805 l_type != ICE_SW_LKUP_ETHERTYPE)
3806 return ICE_ERR_PARAM;
3808 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3811 if (em_list_itr->status)
3812 return em_list_itr->status;
3819 * ice_add_eth_mac - Add a ethertype based filter rule
3820 * @hw: pointer to the hardware structure
3821 * @em_list: list of ethertype and forwarding information
3823 * Function add ethertype rule for logical port from HW struct
3825 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3827 if (!em_list || !hw)
3828 return ICE_ERR_PARAM;
3830 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3831 hw->port_info->lport);
3835 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3836 * @hw: pointer to the hardware structure
3837 * @em_list: list of ethertype or ethertype MAC entries
3838 * @sw: pointer to switch info struct for which function add rule
3840 static enum ice_status
3841 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3842 struct ice_switch_info *sw)
3844 struct ice_fltr_list_entry *em_list_itr, *tmp;
3846 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3848 struct ice_sw_recipe *recp_list;
3849 enum ice_sw_lkup_type l_type;
3851 l_type = em_list_itr->fltr_info.lkup_type;
3853 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3854 l_type != ICE_SW_LKUP_ETHERTYPE)
3855 return ICE_ERR_PARAM;
3857 recp_list = &sw->recp_list[l_type];
3858 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3860 if (em_list_itr->status)
3861 return em_list_itr->status;
3867 * ice_remove_eth_mac - remove a ethertype based filter rule
3868 * @hw: pointer to the hardware structure
3869 * @em_list: list of ethertype and forwarding information
3873 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3875 if (!em_list || !hw)
3876 return ICE_ERR_PARAM;
3878 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3882 * ice_rem_sw_rule_info
3883 * @hw: pointer to the hardware structure
3884 * @rule_head: pointer to the switch list structure that we want to delete
3887 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3889 if (!LIST_EMPTY(rule_head)) {
3890 struct ice_fltr_mgmt_list_entry *entry;
3891 struct ice_fltr_mgmt_list_entry *tmp;
3893 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3894 ice_fltr_mgmt_list_entry, list_entry) {
3895 LIST_DEL(&entry->list_entry);
3896 ice_free(hw, entry);
3902 * ice_rem_adv_rule_info
3903 * @hw: pointer to the hardware structure
3904 * @rule_head: pointer to the switch list structure that we want to delete
3907 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3909 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3910 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3912 if (LIST_EMPTY(rule_head))
3915 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3916 ice_adv_fltr_mgmt_list_entry, list_entry) {
3917 LIST_DEL(&lst_itr->list_entry);
3918 ice_free(hw, lst_itr->lkups);
3919 ice_free(hw, lst_itr);
3924 * ice_rem_all_sw_rules_info
3925 * @hw: pointer to the hardware structure
3927 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3929 struct ice_switch_info *sw = hw->switch_info;
3932 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3933 struct LIST_HEAD_TYPE *rule_head;
3935 rule_head = &sw->recp_list[i].filt_rules;
3936 if (!sw->recp_list[i].adv_rule)
3937 ice_rem_sw_rule_info(hw, rule_head);
3939 ice_rem_adv_rule_info(hw, rule_head);
3944 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3945 * @pi: pointer to the port_info structure
3946 * @vsi_handle: VSI handle to set as default
3947 * @set: true to add the above mentioned switch rule, false to remove it
3948 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3950 * add filter rule to set/unset given VSI as default VSI for the switch
3951 * (represented by swid)
3954 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3957 struct ice_aqc_sw_rules_elem *s_rule;
3958 struct ice_fltr_info f_info;
3959 struct ice_hw *hw = pi->hw;
3960 enum ice_adminq_opc opcode;
3961 enum ice_status status;
3965 if (!ice_is_vsi_valid(hw, vsi_handle))
3966 return ICE_ERR_PARAM;
3967 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3969 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3970 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3971 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3973 return ICE_ERR_NO_MEMORY;
3975 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3977 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3978 f_info.flag = direction;
3979 f_info.fltr_act = ICE_FWD_TO_VSI;
3980 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3982 if (f_info.flag & ICE_FLTR_RX) {
3983 f_info.src = pi->lport;
3984 f_info.src_id = ICE_SRC_ID_LPORT;
3986 f_info.fltr_rule_id =
3987 pi->dflt_rx_vsi_rule_id;
3988 } else if (f_info.flag & ICE_FLTR_TX) {
3989 f_info.src_id = ICE_SRC_ID_VSI;
3990 f_info.src = hw_vsi_id;
3992 f_info.fltr_rule_id =
3993 pi->dflt_tx_vsi_rule_id;
3997 opcode = ice_aqc_opc_add_sw_rules;
3999 opcode = ice_aqc_opc_remove_sw_rules;
4001 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4003 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4004 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4007 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4009 if (f_info.flag & ICE_FLTR_TX) {
4010 pi->dflt_tx_vsi_num = hw_vsi_id;
4011 pi->dflt_tx_vsi_rule_id = index;
4012 } else if (f_info.flag & ICE_FLTR_RX) {
4013 pi->dflt_rx_vsi_num = hw_vsi_id;
4014 pi->dflt_rx_vsi_rule_id = index;
4017 if (f_info.flag & ICE_FLTR_TX) {
4018 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4019 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4020 } else if (f_info.flag & ICE_FLTR_RX) {
4021 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4022 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4027 ice_free(hw, s_rule);
4032 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4033 * @list_head: head of rule list
4034 * @f_info: rule information
4036 * Helper function to search for a unicast rule entry - this is to be used
4037 * to remove unicast MAC filter that is not shared with other VSIs on the
4040 * Returns pointer to entry storing the rule if found
4042 static struct ice_fltr_mgmt_list_entry *
4043 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4044 struct ice_fltr_info *f_info)
4046 struct ice_fltr_mgmt_list_entry *list_itr;
4048 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4050 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4051 sizeof(f_info->l_data)) &&
4052 f_info->fwd_id.hw_vsi_id ==
4053 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4054 f_info->flag == list_itr->fltr_info.flag)
4061 * ice_remove_mac_rule - remove a MAC based filter rule
4062 * @hw: pointer to the hardware structure
4063 * @m_list: list of MAC addresses and forwarding information
4064 * @recp_list: list from which function remove MAC address
4066 * This function removes either a MAC filter rule or a specific VSI from a
4067 * VSI list for a multicast MAC address.
4069 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4070 * ice_add_mac. Caller should be aware that this call will only work if all
4071 * the entries passed into m_list were added previously. It will not attempt to
4072 * do a partial remove of entries that were found.
4074 static enum ice_status
4075 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4076 struct ice_sw_recipe *recp_list)
4078 struct ice_fltr_list_entry *list_itr, *tmp;
4079 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4082 return ICE_ERR_PARAM;
4084 rule_lock = &recp_list->filt_rule_lock;
4085 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4087 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4088 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4091 if (l_type != ICE_SW_LKUP_MAC)
4092 return ICE_ERR_PARAM;
4094 vsi_handle = list_itr->fltr_info.vsi_handle;
4095 if (!ice_is_vsi_valid(hw, vsi_handle))
4096 return ICE_ERR_PARAM;
4098 list_itr->fltr_info.fwd_id.hw_vsi_id =
4099 ice_get_hw_vsi_num(hw, vsi_handle);
4100 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4101 /* Don't remove the unicast address that belongs to
4102 * another VSI on the switch, since it is not being
4105 ice_acquire_lock(rule_lock);
4106 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4107 &list_itr->fltr_info)) {
4108 ice_release_lock(rule_lock);
4109 return ICE_ERR_DOES_NOT_EXIST;
4111 ice_release_lock(rule_lock);
4113 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4115 if (list_itr->status)
4116 return list_itr->status;
4122 * ice_remove_mac - remove a MAC address based filter rule
4123 * @hw: pointer to the hardware structure
4124 * @m_list: list of MAC addresses and forwarding information
4128 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4130 struct ice_sw_recipe *recp_list;
4132 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4133 return ice_remove_mac_rule(hw, m_list, recp_list);
4137 * ice_remove_vlan_rule - Remove VLAN based filter rule
4138 * @hw: pointer to the hardware structure
4139 * @v_list: list of VLAN entries and forwarding information
4140 * @recp_list: list from which function remove VLAN
4142 static enum ice_status
4143 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4144 struct ice_sw_recipe *recp_list)
4146 struct ice_fltr_list_entry *v_list_itr, *tmp;
4148 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4150 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4152 if (l_type != ICE_SW_LKUP_VLAN)
4153 return ICE_ERR_PARAM;
4154 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4156 if (v_list_itr->status)
4157 return v_list_itr->status;
4163 * ice_remove_vlan - remove a VLAN address based filter rule
4164 * @hw: pointer to the hardware structure
4165 * @v_list: list of VLAN and forwarding information
4169 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4171 struct ice_sw_recipe *recp_list;
4174 return ICE_ERR_PARAM;
4176 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4177 return ice_remove_vlan_rule(hw, v_list, recp_list);
4181 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4182 * @hw: pointer to the hardware structure
4183 * @v_list: list of MAC VLAN entries and forwarding information
4184 * @recp_list: list from which function remove MAC VLAN
4186 static enum ice_status
4187 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4188 struct ice_sw_recipe *recp_list)
4190 struct ice_fltr_list_entry *v_list_itr, *tmp;
4192 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4193 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4195 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4197 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4198 return ICE_ERR_PARAM;
4199 v_list_itr->status =
4200 ice_remove_rule_internal(hw, recp_list,
4202 if (v_list_itr->status)
4203 return v_list_itr->status;
4209 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4210 * @hw: pointer to the hardware structure
4211 * @mv_list: list of MAC VLAN and forwarding information
4214 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4216 struct ice_sw_recipe *recp_list;
4218 if (!mv_list || !hw)
4219 return ICE_ERR_PARAM;
4221 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4222 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4226 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4227 * @fm_entry: filter entry to inspect
4228 * @vsi_handle: VSI handle to compare with filter info
4231 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4233 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4234 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4235 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4236 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4241 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4242 * @hw: pointer to the hardware structure
4243 * @vsi_handle: VSI handle to remove filters from
4244 * @vsi_list_head: pointer to the list to add entry to
4245 * @fi: pointer to fltr_info of filter entry to copy & add
4247 * Helper function, used when creating a list of filters to remove from
4248 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4249 * original filter entry, with the exception of fltr_info.fltr_act and
4250 * fltr_info.fwd_id fields. These are set such that later logic can
4251 * extract which VSI to remove the fltr from, and pass on that information.
4253 static enum ice_status
4254 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4255 struct LIST_HEAD_TYPE *vsi_list_head,
4256 struct ice_fltr_info *fi)
4258 struct ice_fltr_list_entry *tmp;
4260 /* this memory is freed up in the caller function
4261 * once filters for this VSI are removed
4263 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4265 return ICE_ERR_NO_MEMORY;
4267 tmp->fltr_info = *fi;
4269 /* Overwrite these fields to indicate which VSI to remove filter from,
4270 * so find and remove logic can extract the information from the
4271 * list entries. Note that original entries will still have proper
4274 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4275 tmp->fltr_info.vsi_handle = vsi_handle;
4276 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4278 LIST_ADD(&tmp->list_entry, vsi_list_head);
4284 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4285 * @hw: pointer to the hardware structure
4286 * @vsi_handle: VSI handle to remove filters from
4287 * @lkup_list_head: pointer to the list that has certain lookup type filters
4288 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4290 * Locates all filters in lkup_list_head that are used by the given VSI,
4291 * and adds COPIES of those entries to vsi_list_head (intended to be used
4292 * to remove the listed filters).
4293 * Note that this means all entries in vsi_list_head must be explicitly
4294 * deallocated by the caller when done with list.
4296 static enum ice_status
4297 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4298 struct LIST_HEAD_TYPE *lkup_list_head,
4299 struct LIST_HEAD_TYPE *vsi_list_head)
4301 struct ice_fltr_mgmt_list_entry *fm_entry;
4302 enum ice_status status = ICE_SUCCESS;
4304 /* check to make sure VSI ID is valid and within boundary */
4305 if (!ice_is_vsi_valid(hw, vsi_handle))
4306 return ICE_ERR_PARAM;
4308 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4309 ice_fltr_mgmt_list_entry, list_entry) {
4310 struct ice_fltr_info *fi;
4312 fi = &fm_entry->fltr_info;
4313 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4316 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4325 * ice_determine_promisc_mask
4326 * @fi: filter info to parse
4328 * Helper function to determine which ICE_PROMISC_ mask corresponds
4329 * to given filter into.
4331 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4333 u16 vid = fi->l_data.mac_vlan.vlan_id;
4334 u8 *macaddr = fi->l_data.mac.mac_addr;
4335 bool is_tx_fltr = false;
4336 u8 promisc_mask = 0;
4338 if (fi->flag == ICE_FLTR_TX)
4341 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4342 promisc_mask |= is_tx_fltr ?
4343 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4344 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4345 promisc_mask |= is_tx_fltr ?
4346 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4347 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4348 promisc_mask |= is_tx_fltr ?
4349 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4351 promisc_mask |= is_tx_fltr ?
4352 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4354 return promisc_mask;
4358 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4359 * @hw: pointer to the hardware structure
4360 * @vsi_handle: VSI handle to retrieve info from
4361 * @promisc_mask: pointer to mask to be filled in
4362 * @vid: VLAN ID of promisc VLAN VSI
4363 * @sw: pointer to switch info struct for which function add rule
4365 static enum ice_status
4366 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4367 u16 *vid, struct ice_switch_info *sw)
4369 struct ice_fltr_mgmt_list_entry *itr;
4370 struct LIST_HEAD_TYPE *rule_head;
4371 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4373 if (!ice_is_vsi_valid(hw, vsi_handle))
4374 return ICE_ERR_PARAM;
4378 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4379 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4381 ice_acquire_lock(rule_lock);
4382 LIST_FOR_EACH_ENTRY(itr, rule_head,
4383 ice_fltr_mgmt_list_entry, list_entry) {
4384 /* Continue if this filter doesn't apply to this VSI or the
4385 * VSI ID is not in the VSI map for this filter
4387 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4390 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4392 ice_release_lock(rule_lock);
4398 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4399 * @hw: pointer to the hardware structure
4400 * @vsi_handle: VSI handle to retrieve info from
4401 * @promisc_mask: pointer to mask to be filled in
4402 * @vid: VLAN ID of promisc VLAN VSI
4405 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4408 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4409 vid, hw->switch_info);
4413 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4414 * @hw: pointer to the hardware structure
4415 * @vsi_handle: VSI handle to retrieve info from
4416 * @promisc_mask: pointer to mask to be filled in
4417 * @vid: VLAN ID of promisc VLAN VSI
4418 * @sw: pointer to switch info struct for which function add rule
4420 static enum ice_status
4421 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4422 u16 *vid, struct ice_switch_info *sw)
4424 struct ice_fltr_mgmt_list_entry *itr;
4425 struct LIST_HEAD_TYPE *rule_head;
4426 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4428 if (!ice_is_vsi_valid(hw, vsi_handle))
4429 return ICE_ERR_PARAM;
4433 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4434 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4436 ice_acquire_lock(rule_lock);
4437 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4439 /* Continue if this filter doesn't apply to this VSI or the
4440 * VSI ID is not in the VSI map for this filter
4442 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4445 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4447 ice_release_lock(rule_lock);
4453 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4454 * @hw: pointer to the hardware structure
4455 * @vsi_handle: VSI handle to retrieve info from
4456 * @promisc_mask: pointer to mask to be filled in
4457 * @vid: VLAN ID of promisc VLAN VSI
4460 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4463 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4464 vid, hw->switch_info);
4468 * ice_remove_promisc - Remove promisc based filter rules
4469 * @hw: pointer to the hardware structure
4470 * @recp_id: recipe ID for which the rule needs to removed
4471 * @v_list: list of promisc entries
4473 static enum ice_status
4474 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4475 struct LIST_HEAD_TYPE *v_list)
4477 struct ice_fltr_list_entry *v_list_itr, *tmp;
4478 struct ice_sw_recipe *recp_list;
4480 recp_list = &hw->switch_info->recp_list[recp_id];
4481 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4483 v_list_itr->status =
4484 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4485 if (v_list_itr->status)
4486 return v_list_itr->status;
4492 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4493 * @hw: pointer to the hardware structure
4494 * @vsi_handle: VSI handle to clear mode
4495 * @promisc_mask: mask of promiscuous config bits to clear
4496 * @vid: VLAN ID to clear VLAN promiscuous
4497 * @sw: pointer to switch info struct for which function add rule
4499 static enum ice_status
4500 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4501 u16 vid, struct ice_switch_info *sw)
4503 struct ice_fltr_list_entry *fm_entry, *tmp;
4504 struct LIST_HEAD_TYPE remove_list_head;
4505 struct ice_fltr_mgmt_list_entry *itr;
4506 struct LIST_HEAD_TYPE *rule_head;
4507 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4508 enum ice_status status = ICE_SUCCESS;
4511 if (!ice_is_vsi_valid(hw, vsi_handle))
4512 return ICE_ERR_PARAM;
4514 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4515 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4517 recipe_id = ICE_SW_LKUP_PROMISC;
4519 rule_head = &sw->recp_list[recipe_id].filt_rules;
4520 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4522 INIT_LIST_HEAD(&remove_list_head);
4524 ice_acquire_lock(rule_lock);
4525 LIST_FOR_EACH_ENTRY(itr, rule_head,
4526 ice_fltr_mgmt_list_entry, list_entry) {
4527 struct ice_fltr_info *fltr_info;
4528 u8 fltr_promisc_mask = 0;
4530 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4532 fltr_info = &itr->fltr_info;
4534 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4535 vid != fltr_info->l_data.mac_vlan.vlan_id)
4538 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4540 /* Skip if filter is not completely specified by given mask */
4541 if (fltr_promisc_mask & ~promisc_mask)
4544 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4548 ice_release_lock(rule_lock);
4549 goto free_fltr_list;
4552 ice_release_lock(rule_lock);
4554 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4557 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4558 ice_fltr_list_entry, list_entry) {
4559 LIST_DEL(&fm_entry->list_entry);
4560 ice_free(hw, fm_entry);
4567 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4568 * @hw: pointer to the hardware structure
4569 * @vsi_handle: VSI handle to clear mode
4570 * @promisc_mask: mask of promiscuous config bits to clear
4571 * @vid: VLAN ID to clear VLAN promiscuous
4574 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4575 u8 promisc_mask, u16 vid)
4577 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4578 vid, hw->switch_info);
4582 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4583 * @hw: pointer to the hardware structure
4584 * @vsi_handle: VSI handle to configure
4585 * @promisc_mask: mask of promiscuous config bits
4586 * @vid: VLAN ID to set VLAN promiscuous
4587 * @lport: logical port number to configure promisc mode
4588 * @sw: pointer to switch info struct for which function add rule
4590 static enum ice_status
4591 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4592 u16 vid, u8 lport, struct ice_switch_info *sw)
4594 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4595 struct ice_fltr_list_entry f_list_entry;
4596 struct ice_fltr_info new_fltr;
4597 enum ice_status status = ICE_SUCCESS;
4603 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4605 if (!ice_is_vsi_valid(hw, vsi_handle))
4606 return ICE_ERR_PARAM;
4607 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4609 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4611 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4612 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4613 new_fltr.l_data.mac_vlan.vlan_id = vid;
4614 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4616 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4617 recipe_id = ICE_SW_LKUP_PROMISC;
4620 /* Separate filters must be set for each direction/packet type
4621 * combination, so we will loop over the mask value, store the
4622 * individual type, and clear it out in the input mask as it
4625 while (promisc_mask) {
4626 struct ice_sw_recipe *recp_list;
4632 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4633 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4634 pkt_type = UCAST_FLTR;
4635 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4636 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4637 pkt_type = UCAST_FLTR;
4639 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4640 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4641 pkt_type = MCAST_FLTR;
4642 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4643 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4644 pkt_type = MCAST_FLTR;
4646 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4647 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4648 pkt_type = BCAST_FLTR;
4649 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4650 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4651 pkt_type = BCAST_FLTR;
4655 /* Check for VLAN promiscuous flag */
4656 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4657 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4658 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4659 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4663 /* Set filter DA based on packet type */
4664 mac_addr = new_fltr.l_data.mac.mac_addr;
4665 if (pkt_type == BCAST_FLTR) {
4666 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4667 } else if (pkt_type == MCAST_FLTR ||
4668 pkt_type == UCAST_FLTR) {
4669 /* Use the dummy ether header DA */
4670 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4671 ICE_NONDMA_TO_NONDMA);
4672 if (pkt_type == MCAST_FLTR)
4673 mac_addr[0] |= 0x1; /* Set multicast bit */
4676 /* Need to reset this to zero for all iterations */
4679 new_fltr.flag |= ICE_FLTR_TX;
4680 new_fltr.src = hw_vsi_id;
4682 new_fltr.flag |= ICE_FLTR_RX;
4683 new_fltr.src = lport;
4686 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4687 new_fltr.vsi_handle = vsi_handle;
4688 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4689 f_list_entry.fltr_info = new_fltr;
4690 recp_list = &sw->recp_list[recipe_id];
4692 status = ice_add_rule_internal(hw, recp_list, lport,
4694 if (status != ICE_SUCCESS)
4695 goto set_promisc_exit;
4703 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4704 * @hw: pointer to the hardware structure
4705 * @vsi_handle: VSI handle to configure
4706 * @promisc_mask: mask of promiscuous config bits
4707 * @vid: VLAN ID to set VLAN promiscuous
4710 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4713 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
4714 hw->port_info->lport,
4719 * _ice_set_vlan_vsi_promisc
4720 * @hw: pointer to the hardware structure
4721 * @vsi_handle: VSI handle to configure
4722 * @promisc_mask: mask of promiscuous config bits
4723 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4724 * @lport: logical port number to configure promisc mode
4725 * @sw: pointer to switch info struct for which function add rule
4727 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4729 static enum ice_status
4730 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4731 bool rm_vlan_promisc, u8 lport,
4732 struct ice_switch_info *sw)
4734 struct ice_fltr_list_entry *list_itr, *tmp;
4735 struct LIST_HEAD_TYPE vsi_list_head;
4736 struct LIST_HEAD_TYPE *vlan_head;
4737 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4738 enum ice_status status;
4741 INIT_LIST_HEAD(&vsi_list_head);
4742 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4743 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4744 ice_acquire_lock(vlan_lock);
4745 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4747 ice_release_lock(vlan_lock);
4749 goto free_fltr_list;
4751 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4753 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4754 if (rm_vlan_promisc)
4755 status = _ice_clear_vsi_promisc(hw, vsi_handle,
4759 status = _ice_set_vsi_promisc(hw, vsi_handle,
4760 promisc_mask, vlan_id,
4767 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4768 ice_fltr_list_entry, list_entry) {
4769 LIST_DEL(&list_itr->list_entry);
4770 ice_free(hw, list_itr);
4776 * ice_set_vlan_vsi_promisc
4777 * @hw: pointer to the hardware structure
4778 * @vsi_handle: VSI handle to configure
4779 * @promisc_mask: mask of promiscuous config bits
4780 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4782 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4785 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4786 bool rm_vlan_promisc)
4788 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
4789 rm_vlan_promisc, hw->port_info->lport,
4794 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4795 * @hw: pointer to the hardware structure
4796 * @vsi_handle: VSI handle to remove filters from
4797 * @recp_list: recipe list from which function remove fltr
4798 * @lkup: switch rule filter lookup type
4801 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4802 struct ice_sw_recipe *recp_list,
4803 enum ice_sw_lkup_type lkup)
4805 struct ice_fltr_list_entry *fm_entry;
4806 struct LIST_HEAD_TYPE remove_list_head;
4807 struct LIST_HEAD_TYPE *rule_head;
4808 struct ice_fltr_list_entry *tmp;
4809 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4810 enum ice_status status;
4812 INIT_LIST_HEAD(&remove_list_head);
4813 rule_lock = &recp_list[lkup].filt_rule_lock;
4814 rule_head = &recp_list[lkup].filt_rules;
4815 ice_acquire_lock(rule_lock);
4816 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4818 ice_release_lock(rule_lock);
4823 case ICE_SW_LKUP_MAC:
4824 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4826 case ICE_SW_LKUP_VLAN:
4827 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4829 case ICE_SW_LKUP_PROMISC:
4830 case ICE_SW_LKUP_PROMISC_VLAN:
4831 ice_remove_promisc(hw, lkup, &remove_list_head);
4833 case ICE_SW_LKUP_MAC_VLAN:
4834 ice_remove_mac_vlan(hw, &remove_list_head);
4836 case ICE_SW_LKUP_ETHERTYPE:
4837 case ICE_SW_LKUP_ETHERTYPE_MAC:
4838 ice_remove_eth_mac(hw, &remove_list_head);
4840 case ICE_SW_LKUP_DFLT:
4841 ice_debug(hw, ICE_DBG_SW,
4842 "Remove filters for this lookup type hasn't been implemented yet\n");
4844 case ICE_SW_LKUP_LAST:
4845 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4849 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4850 ice_fltr_list_entry, list_entry) {
4851 LIST_DEL(&fm_entry->list_entry);
4852 ice_free(hw, fm_entry);
4857 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4858 * @hw: pointer to the hardware structure
4859 * @vsi_handle: VSI handle to remove filters from
4860 * @sw: pointer to switch info struct
4863 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4864 struct ice_switch_info *sw)
4866 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4868 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4869 sw->recp_list, ICE_SW_LKUP_MAC);
4870 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4871 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4872 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4873 sw->recp_list, ICE_SW_LKUP_PROMISC);
4874 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4875 sw->recp_list, ICE_SW_LKUP_VLAN);
4876 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4877 sw->recp_list, ICE_SW_LKUP_DFLT);
4878 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4879 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4880 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4881 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4882 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4883 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4887 * ice_remove_vsi_fltr - Remove all filters for a VSI
4888 * @hw: pointer to the hardware structure
4889 * @vsi_handle: VSI handle to remove filters from
4891 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4893 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4897 * ice_alloc_res_cntr - allocating resource counter
4898 * @hw: pointer to the hardware structure
4899 * @type: type of resource
4900 * @alloc_shared: if set it is shared else dedicated
4901 * @num_items: number of entries requested for FD resource type
4902 * @counter_id: counter index returned by AQ call
4905 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4908 struct ice_aqc_alloc_free_res_elem *buf;
4909 enum ice_status status;
4912 /* Allocate resource */
4913 buf_len = sizeof(*buf);
4914 buf = (struct ice_aqc_alloc_free_res_elem *)
4915 ice_malloc(hw, buf_len);
4917 return ICE_ERR_NO_MEMORY;
4919 buf->num_elems = CPU_TO_LE16(num_items);
4920 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4921 ICE_AQC_RES_TYPE_M) | alloc_shared);
4923 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4924 ice_aqc_opc_alloc_res, NULL);
4928 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4936 * ice_free_res_cntr - free resource counter
4937 * @hw: pointer to the hardware structure
4938 * @type: type of resource
4939 * @alloc_shared: if set it is shared else dedicated
4940 * @num_items: number of entries to be freed for FD resource type
4941 * @counter_id: counter ID resource which needs to be freed
4944 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4947 struct ice_aqc_alloc_free_res_elem *buf;
4948 enum ice_status status;
4952 buf_len = sizeof(*buf);
4953 buf = (struct ice_aqc_alloc_free_res_elem *)
4954 ice_malloc(hw, buf_len);
4956 return ICE_ERR_NO_MEMORY;
4958 buf->num_elems = CPU_TO_LE16(num_items);
4959 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4960 ICE_AQC_RES_TYPE_M) | alloc_shared);
4961 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4963 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4964 ice_aqc_opc_free_res, NULL);
4966 ice_debug(hw, ICE_DBG_SW,
4967 "counter resource could not be freed\n");
4974 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4975 * @hw: pointer to the hardware structure
4976 * @counter_id: returns counter index
4978 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4980 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4981 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4986 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4987 * @hw: pointer to the hardware structure
4988 * @counter_id: counter index to be freed
4990 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4992 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4993 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4998 * ice_alloc_res_lg_act - add large action resource
4999 * @hw: pointer to the hardware structure
5000 * @l_id: large action ID to fill it in
5001 * @num_acts: number of actions to hold with a large action entry
5003 static enum ice_status
5004 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5006 struct ice_aqc_alloc_free_res_elem *sw_buf;
5007 enum ice_status status;
5010 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5011 return ICE_ERR_PARAM;
5013 /* Allocate resource for large action */
5014 buf_len = sizeof(*sw_buf);
5015 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5016 ice_malloc(hw, buf_len);
5018 return ICE_ERR_NO_MEMORY;
5020 sw_buf->num_elems = CPU_TO_LE16(1);
5022 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5023 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5024 * If num_acts is greater than 2, then use
5025 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5026 * The num_acts cannot exceed 4. This was ensured at the
5027 * beginning of the function.
5030 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5031 else if (num_acts == 2)
5032 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5034 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5036 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5037 ice_aqc_opc_alloc_res, NULL);
5039 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5041 ice_free(hw, sw_buf);
5046 * ice_add_mac_with_sw_marker - add filter with sw marker
5047 * @hw: pointer to the hardware structure
5048 * @f_info: filter info structure containing the MAC filter information
5049 * @sw_marker: sw marker to tag the Rx descriptor with
5052 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5055 struct ice_fltr_mgmt_list_entry *m_entry;
5056 struct ice_fltr_list_entry fl_info;
5057 struct ice_sw_recipe *recp_list;
5058 struct LIST_HEAD_TYPE l_head;
5059 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5060 enum ice_status ret;
5064 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5065 return ICE_ERR_PARAM;
5067 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5068 return ICE_ERR_PARAM;
5070 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5071 return ICE_ERR_PARAM;
5073 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5074 return ICE_ERR_PARAM;
5075 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5077 /* Add filter if it doesn't exist so then the adding of large
5078 * action always results in update
5081 INIT_LIST_HEAD(&l_head);
5082 fl_info.fltr_info = *f_info;
5083 LIST_ADD(&fl_info.list_entry, &l_head);
5085 entry_exists = false;
5086 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5087 hw->port_info->lport);
5088 if (ret == ICE_ERR_ALREADY_EXISTS)
5089 entry_exists = true;
5093 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5094 rule_lock = &recp_list->filt_rule_lock;
5095 ice_acquire_lock(rule_lock);
5096 /* Get the book keeping entry for the filter */
5097 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5101 /* If counter action was enabled for this rule then don't enable
5102 * sw marker large action
5104 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5105 ret = ICE_ERR_PARAM;
5109 /* if same marker was added before */
5110 if (m_entry->sw_marker_id == sw_marker) {
5111 ret = ICE_ERR_ALREADY_EXISTS;
5115 /* Allocate a hardware table entry to hold large act. Three actions
5116 * for marker based large action
5118 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5122 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5125 /* Update the switch rule to add the marker action */
5126 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5128 ice_release_lock(rule_lock);
5133 ice_release_lock(rule_lock);
5134 /* only remove entry if it did not exist previously */
5136 ret = ice_remove_mac(hw, &l_head);
5142 * ice_add_mac_with_counter - add filter with counter enabled
5143 * @hw: pointer to the hardware structure
5144 * @f_info: pointer to filter info structure containing the MAC filter
5148 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5150 struct ice_fltr_mgmt_list_entry *m_entry;
5151 struct ice_fltr_list_entry fl_info;
5152 struct ice_sw_recipe *recp_list;
5153 struct LIST_HEAD_TYPE l_head;
5154 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5155 enum ice_status ret;
5160 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5161 return ICE_ERR_PARAM;
5163 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5164 return ICE_ERR_PARAM;
5166 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5167 return ICE_ERR_PARAM;
5168 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5169 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5171 entry_exist = false;
5173 rule_lock = &recp_list->filt_rule_lock;
5175 /* Add filter if it doesn't exist so then the adding of large
5176 * action always results in update
5178 INIT_LIST_HEAD(&l_head);
5180 fl_info.fltr_info = *f_info;
5181 LIST_ADD(&fl_info.list_entry, &l_head);
5183 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5184 hw->port_info->lport);
5185 if (ret == ICE_ERR_ALREADY_EXISTS)
5190 ice_acquire_lock(rule_lock);
5191 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5193 ret = ICE_ERR_BAD_PTR;
5197 /* Don't enable counter for a filter for which sw marker was enabled */
5198 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5199 ret = ICE_ERR_PARAM;
5203 /* If a counter was already enabled then don't need to add again */
5204 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5205 ret = ICE_ERR_ALREADY_EXISTS;
5209 /* Allocate a hardware table entry to VLAN counter */
5210 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5214 /* Allocate a hardware table entry to hold large act. Two actions for
5215 * counter based large action
5217 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5221 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5224 /* Update the switch rule to add the counter action */
5225 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5227 ice_release_lock(rule_lock);
5232 ice_release_lock(rule_lock);
5233 /* only remove entry if it did not exist previously */
5235 ret = ice_remove_mac(hw, &l_head);
5240 /* This is mapping table entry that maps every word within a given protocol
5241 * structure to the real byte offset as per the specification of that
5243 * for example dst address is 3 words in ethertype header and corresponding
5244 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5245 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5246 * matching entry describing its field. This needs to be updated if new
5247 * structure is added to that union.
5249 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5250 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5251 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5252 { ICE_ETYPE_OL, { 0 } },
5253 { ICE_VLAN_OFOS, { 0, 2 } },
5254 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5255 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5256 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5257 26, 28, 30, 32, 34, 36, 38 } },
5258 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5259 26, 28, 30, 32, 34, 36, 38 } },
5260 { ICE_TCP_IL, { 0, 2 } },
5261 { ICE_UDP_OF, { 0, 2 } },
5262 { ICE_UDP_ILOS, { 0, 2 } },
5263 { ICE_SCTP_IL, { 0, 2 } },
5264 { ICE_VXLAN, { 8, 10, 12, 14 } },
5265 { ICE_GENEVE, { 8, 10, 12, 14 } },
5266 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5267 { ICE_NVGRE, { 0, 2, 4, 6 } },
5268 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5269 { ICE_PPPOE, { 0, 2, 4, 6 } },
5270 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5271 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5272 { ICE_ESP, { 0, 2, 4, 6 } },
5273 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5274 { ICE_NAT_T, { 8, 10, 12, 14 } },
5277 /* The following table describes preferred grouping of recipes.
5278 * If a recipe that needs to be programmed is a superset or matches one of the
5279 * following combinations, then the recipe needs to be chained as per the
5283 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5284 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5285 { ICE_MAC_IL, ICE_MAC_IL_HW },
5286 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5287 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5288 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5289 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5290 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5291 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5292 { ICE_TCP_IL, ICE_TCP_IL_HW },
5293 { ICE_UDP_OF, ICE_UDP_OF_HW },
5294 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5295 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5296 { ICE_VXLAN, ICE_UDP_OF_HW },
5297 { ICE_GENEVE, ICE_UDP_OF_HW },
5298 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5299 { ICE_NVGRE, ICE_GRE_OF_HW },
5300 { ICE_GTP, ICE_UDP_OF_HW },
5301 { ICE_PPPOE, ICE_PPPOE_HW },
5302 { ICE_PFCP, ICE_UDP_ILOS_HW },
5303 { ICE_L2TPV3, ICE_L2TPV3_HW },
5304 { ICE_ESP, ICE_ESP_HW },
5305 { ICE_AH, ICE_AH_HW },
5306 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5310 * ice_find_recp - find a recipe
5311 * @hw: pointer to the hardware structure
5312 * @lkup_exts: extension sequence to match
5314 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5316 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5317 enum ice_sw_tunnel_type tun_type)
5319 bool refresh_required = true;
5320 struct ice_sw_recipe *recp;
5323 /* Walk through existing recipes to find a match */
5324 recp = hw->switch_info->recp_list;
5325 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5326 /* If recipe was not created for this ID, in SW bookkeeping,
5327 * check if FW has an entry for this recipe. If the FW has an
5328 * entry update it in our SW bookkeeping and continue with the
5331 if (!recp[i].recp_created)
5332 if (ice_get_recp_frm_fw(hw,
5333 hw->switch_info->recp_list, i,
5337 /* Skip inverse action recipes */
5338 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5339 ICE_AQ_RECIPE_ACT_INV_ACT)
5342 /* if number of words we are looking for match */
5343 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5344 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5345 struct ice_fv_word *be = lkup_exts->fv_words;
5346 u16 *cr = recp[i].lkup_exts.field_mask;
5347 u16 *de = lkup_exts->field_mask;
5351 /* ar, cr, and qr are related to the recipe words, while
5352 * be, de, and pe are related to the lookup words
5354 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5355 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5357 if (ar[qr].off == be[pe].off &&
5358 ar[qr].prot_id == be[pe].prot_id &&
5360 /* Found the "pe"th word in the
5365 /* After walking through all the words in the
5366 * "i"th recipe if "p"th word was not found then
5367 * this recipe is not what we are looking for.
5368 * So break out from this loop and try the next
5371 if (qr >= recp[i].lkup_exts.n_val_words) {
5376 /* If for "i"th recipe the found was never set to false
5377 * then it means we found our match
5379 if ((tun_type == recp[i].tun_type ||
5380 tun_type == ICE_SW_TUN_AND_NON_TUN) && found)
5381 return i; /* Return the recipe ID */
5384 return ICE_MAX_NUM_RECIPES;
5388 * ice_prot_type_to_id - get protocol ID from protocol type
5389 * @type: protocol type
5390 * @id: pointer to variable that will receive the ID
5392 * Returns true if found, false otherwise
5394 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5398 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5399 if (ice_prot_id_tbl[i].type == type) {
5400 *id = ice_prot_id_tbl[i].protocol_id;
5407 * ice_find_valid_words - count valid words
5408 * @rule: advanced rule with lookup information
5409 * @lkup_exts: byte offset extractions of the words that are valid
5411 * calculate valid words in a lookup rule using mask value
5414 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5415 struct ice_prot_lkup_ext *lkup_exts)
5417 u8 j, word, prot_id, ret_val;
5419 if (!ice_prot_type_to_id(rule->type, &prot_id))
5422 word = lkup_exts->n_val_words;
5424 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5425 if (((u16 *)&rule->m_u)[j] &&
5426 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5427 /* No more space to accommodate */
5428 if (word >= ICE_MAX_CHAIN_WORDS)
5430 lkup_exts->fv_words[word].off =
5431 ice_prot_ext[rule->type].offs[j];
5432 lkup_exts->fv_words[word].prot_id =
5433 ice_prot_id_tbl[rule->type].protocol_id;
5434 lkup_exts->field_mask[word] =
5435 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5439 ret_val = word - lkup_exts->n_val_words;
5440 lkup_exts->n_val_words = word;
5446 * ice_create_first_fit_recp_def - Create a recipe grouping
5447 * @hw: pointer to the hardware structure
5448 * @lkup_exts: an array of protocol header extractions
5449 * @rg_list: pointer to a list that stores new recipe groups
5450 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5452 * Using first fit algorithm, take all the words that are still not done
5453 * and start grouping them in 4-word groups. Each group makes up one
5456 static enum ice_status
5457 ice_create_first_fit_recp_def(struct ice_hw *hw,
5458 struct ice_prot_lkup_ext *lkup_exts,
5459 struct LIST_HEAD_TYPE *rg_list,
5462 struct ice_pref_recipe_group *grp = NULL;
5467 if (!lkup_exts->n_val_words) {
5468 struct ice_recp_grp_entry *entry;
5470 entry = (struct ice_recp_grp_entry *)
5471 ice_malloc(hw, sizeof(*entry));
5473 return ICE_ERR_NO_MEMORY;
5474 LIST_ADD(&entry->l_entry, rg_list);
5475 grp = &entry->r_group;
5477 grp->n_val_pairs = 0;
5480 /* Walk through every word in the rule to check if it is not done. If so
5481 * then this word needs to be part of a new recipe.
5483 for (j = 0; j < lkup_exts->n_val_words; j++)
5484 if (!ice_is_bit_set(lkup_exts->done, j)) {
5486 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5487 struct ice_recp_grp_entry *entry;
5489 entry = (struct ice_recp_grp_entry *)
5490 ice_malloc(hw, sizeof(*entry));
5492 return ICE_ERR_NO_MEMORY;
5493 LIST_ADD(&entry->l_entry, rg_list);
5494 grp = &entry->r_group;
5498 grp->pairs[grp->n_val_pairs].prot_id =
5499 lkup_exts->fv_words[j].prot_id;
5500 grp->pairs[grp->n_val_pairs].off =
5501 lkup_exts->fv_words[j].off;
5502 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5510 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5511 * @hw: pointer to the hardware structure
5512 * @fv_list: field vector with the extraction sequence information
5513 * @rg_list: recipe groupings with protocol-offset pairs
5515 * Helper function to fill in the field vector indices for protocol-offset
5516 * pairs. These indexes are then ultimately programmed into a recipe.
5518 static enum ice_status
5519 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5520 struct LIST_HEAD_TYPE *rg_list)
5522 struct ice_sw_fv_list_entry *fv;
5523 struct ice_recp_grp_entry *rg;
5524 struct ice_fv_word *fv_ext;
5526 if (LIST_EMPTY(fv_list))
5529 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5530 fv_ext = fv->fv_ptr->ew;
5532 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5535 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5536 struct ice_fv_word *pr;
5541 pr = &rg->r_group.pairs[i];
5542 mask = rg->r_group.mask[i];
5544 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5545 if (fv_ext[j].prot_id == pr->prot_id &&
5546 fv_ext[j].off == pr->off) {
5549 /* Store index of field vector */
5551 rg->fv_mask[i] = mask;
5555 /* Protocol/offset could not be found, caller gave an
5559 return ICE_ERR_PARAM;
5567 * ice_find_free_recp_res_idx - find free result indexes for recipe
5568 * @hw: pointer to hardware structure
5569 * @profiles: bitmap of profiles that will be associated with the new recipe
5570 * @free_idx: pointer to variable to receive the free index bitmap
5572 * The algorithm used here is:
5573 * 1. When creating a new recipe, create a set P which contains all
5574 * Profiles that will be associated with our new recipe
5576 * 2. For each Profile p in set P:
5577 * a. Add all recipes associated with Profile p into set R
5578 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5579 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5580 * i. Or just assume they all have the same possible indexes:
5582 * i.e., PossibleIndexes = 0x0000F00000000000
5584 * 3. For each Recipe r in set R:
5585 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5586 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5588 * FreeIndexes will contain the bits indicating the indexes free for use,
5589 * then the code needs to update the recipe[r].used_result_idx_bits to
5590 * indicate which indexes were selected for use by this recipe.
5593 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5594 ice_bitmap_t *free_idx)
5596 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5597 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5598 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5602 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5603 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5604 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5605 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5607 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5608 ice_set_bit(count, possible_idx);
5610 /* For each profile we are going to associate the recipe with, add the
5611 * recipes that are associated with that profile. This will give us
5612 * the set of recipes that our recipe may collide with. Also, determine
5613 * what possible result indexes are usable given this set of profiles.
5616 while (ICE_MAX_NUM_PROFILES >
5617 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5618 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5619 ICE_MAX_NUM_RECIPES);
5620 ice_and_bitmap(possible_idx, possible_idx,
5621 hw->switch_info->prof_res_bm[bit],
5626 /* For each recipe that our new recipe may collide with, determine
5627 * which indexes have been used.
5629 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5630 if (ice_is_bit_set(recipes, bit)) {
5631 ice_or_bitmap(used_idx, used_idx,
5632 hw->switch_info->recp_list[bit].res_idxs,
5636 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5638 /* return number of free indexes */
5641 while (ICE_MAX_FV_WORDS >
5642 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5651 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5652 * @hw: pointer to hardware structure
5653 * @rm: recipe management list entry
5654 * @match_tun: if field vector index for tunnel needs to be programmed
5655 * @profiles: bitmap of profiles that will be assocated.
5657 static enum ice_status
5658 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5659 bool match_tun, ice_bitmap_t *profiles)
5661 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5662 struct ice_aqc_recipe_data_elem *tmp;
5663 struct ice_aqc_recipe_data_elem *buf;
5664 struct ice_recp_grp_entry *entry;
5665 enum ice_status status;
5671 /* When more than one recipe are required, another recipe is needed to
5672 * chain them together. Matching a tunnel metadata ID takes up one of
5673 * the match fields in the chaining recipe reducing the number of
5674 * chained recipes by one.
5676 /* check number of free result indices */
5677 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5678 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5680 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5681 free_res_idx, rm->n_grp_count);
5683 if (rm->n_grp_count > 1) {
5684 if (rm->n_grp_count > free_res_idx)
5685 return ICE_ERR_MAX_LIMIT;
5690 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5691 return ICE_ERR_MAX_LIMIT;
5693 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5694 ICE_MAX_NUM_RECIPES,
5697 return ICE_ERR_NO_MEMORY;
5699 buf = (struct ice_aqc_recipe_data_elem *)
5700 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5702 status = ICE_ERR_NO_MEMORY;
5706 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5707 recipe_count = ICE_MAX_NUM_RECIPES;
5708 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5710 if (status || recipe_count == 0)
5713 /* Allocate the recipe resources, and configure them according to the
5714 * match fields from protocol headers and extracted field vectors.
5716 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5717 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5720 status = ice_alloc_recipe(hw, &entry->rid);
5724 /* Clear the result index of the located recipe, as this will be
5725 * updated, if needed, later in the recipe creation process.
5727 tmp[0].content.result_indx = 0;
5729 buf[recps] = tmp[0];
5730 buf[recps].recipe_indx = (u8)entry->rid;
5731 /* if the recipe is a non-root recipe RID should be programmed
5732 * as 0 for the rules to be applied correctly.
5734 buf[recps].content.rid = 0;
5735 ice_memset(&buf[recps].content.lkup_indx, 0,
5736 sizeof(buf[recps].content.lkup_indx),
5739 /* All recipes use look-up index 0 to match switch ID. */
5740 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5741 buf[recps].content.mask[0] =
5742 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5743 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5746 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5747 buf[recps].content.lkup_indx[i] = 0x80;
5748 buf[recps].content.mask[i] = 0;
5751 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5752 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5753 buf[recps].content.mask[i + 1] =
5754 CPU_TO_LE16(entry->fv_mask[i]);
5757 if (rm->n_grp_count > 1) {
5758 /* Checks to see if there really is a valid result index
5761 if (chain_idx >= ICE_MAX_FV_WORDS) {
5762 ice_debug(hw, ICE_DBG_SW,
5763 "No chain index available\n");
5764 status = ICE_ERR_MAX_LIMIT;
5768 entry->chain_idx = chain_idx;
5769 buf[recps].content.result_indx =
5770 ICE_AQ_RECIPE_RESULT_EN |
5771 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5772 ICE_AQ_RECIPE_RESULT_DATA_M);
5773 ice_clear_bit(chain_idx, result_idx_bm);
5774 chain_idx = ice_find_first_bit(result_idx_bm,
5778 /* fill recipe dependencies */
5779 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5780 ICE_MAX_NUM_RECIPES);
5781 ice_set_bit(buf[recps].recipe_indx,
5782 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5783 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5787 if (rm->n_grp_count == 1) {
5788 rm->root_rid = buf[0].recipe_indx;
5789 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5790 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5791 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5792 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5793 sizeof(buf[0].recipe_bitmap),
5794 ICE_NONDMA_TO_NONDMA);
5796 status = ICE_ERR_BAD_PTR;
5799 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5800 * the recipe which is getting created if specified
5801 * by user. Usually any advanced switch filter, which results
5802 * into new extraction sequence, ended up creating a new recipe
5803 * of type ROOT and usually recipes are associated with profiles
5804 * Switch rule referreing newly created recipe, needs to have
5805 * either/or 'fwd' or 'join' priority, otherwise switch rule
5806 * evaluation will not happen correctly. In other words, if
5807 * switch rule to be evaluated on priority basis, then recipe
5808 * needs to have priority, otherwise it will be evaluated last.
5810 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5812 struct ice_recp_grp_entry *last_chain_entry;
5815 /* Allocate the last recipe that will chain the outcomes of the
5816 * other recipes together
5818 status = ice_alloc_recipe(hw, &rid);
5822 buf[recps].recipe_indx = (u8)rid;
5823 buf[recps].content.rid = (u8)rid;
5824 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5825 /* the new entry created should also be part of rg_list to
5826 * make sure we have complete recipe
5828 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5829 sizeof(*last_chain_entry));
5830 if (!last_chain_entry) {
5831 status = ICE_ERR_NO_MEMORY;
5834 last_chain_entry->rid = rid;
5835 ice_memset(&buf[recps].content.lkup_indx, 0,
5836 sizeof(buf[recps].content.lkup_indx),
5838 /* All recipes use look-up index 0 to match switch ID. */
5839 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5840 buf[recps].content.mask[0] =
5841 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5842 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5843 buf[recps].content.lkup_indx[i] =
5844 ICE_AQ_RECIPE_LKUP_IGNORE;
5845 buf[recps].content.mask[i] = 0;
5849 /* update r_bitmap with the recp that is used for chaining */
5850 ice_set_bit(rid, rm->r_bitmap);
5851 /* this is the recipe that chains all the other recipes so it
5852 * should not have a chaining ID to indicate the same
5854 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5855 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5857 last_chain_entry->fv_idx[i] = entry->chain_idx;
5858 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5859 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5860 ice_set_bit(entry->rid, rm->r_bitmap);
5862 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5863 if (sizeof(buf[recps].recipe_bitmap) >=
5864 sizeof(rm->r_bitmap)) {
5865 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5866 sizeof(buf[recps].recipe_bitmap),
5867 ICE_NONDMA_TO_NONDMA);
5869 status = ICE_ERR_BAD_PTR;
5872 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5874 /* To differentiate among different UDP tunnels, a meta data ID
5878 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5879 buf[recps].content.mask[i] =
5880 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5884 rm->root_rid = (u8)rid;
5886 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5890 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5891 ice_release_change_lock(hw);
5895 /* Every recipe that just got created add it to the recipe
5898 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5899 struct ice_switch_info *sw = hw->switch_info;
5900 bool is_root, idx_found = false;
5901 struct ice_sw_recipe *recp;
5902 u16 idx, buf_idx = 0;
5904 /* find buffer index for copying some data */
5905 for (idx = 0; idx < rm->n_grp_count; idx++)
5906 if (buf[idx].recipe_indx == entry->rid) {
5912 status = ICE_ERR_OUT_OF_RANGE;
5916 recp = &sw->recp_list[entry->rid];
5917 is_root = (rm->root_rid == entry->rid);
5918 recp->is_root = is_root;
5920 recp->root_rid = entry->rid;
5921 recp->big_recp = (is_root && rm->n_grp_count > 1);
5923 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5924 entry->r_group.n_val_pairs *
5925 sizeof(struct ice_fv_word),
5926 ICE_NONDMA_TO_NONDMA);
5928 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5929 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5931 /* Copy non-result fv index values and masks to recipe. This
5932 * call will also update the result recipe bitmask.
5934 ice_collect_result_idx(&buf[buf_idx], recp);
5936 /* for non-root recipes, also copy to the root, this allows
5937 * easier matching of a complete chained recipe
5940 ice_collect_result_idx(&buf[buf_idx],
5941 &sw->recp_list[rm->root_rid]);
5943 recp->n_ext_words = entry->r_group.n_val_pairs;
5944 recp->chain_idx = entry->chain_idx;
5945 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5946 recp->n_grp_count = rm->n_grp_count;
5947 recp->tun_type = rm->tun_type;
5948 recp->recp_created = true;
5963 * ice_create_recipe_group - creates recipe group
5964 * @hw: pointer to hardware structure
5965 * @rm: recipe management list entry
5966 * @lkup_exts: lookup elements
5968 static enum ice_status
5969 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5970 struct ice_prot_lkup_ext *lkup_exts)
5972 enum ice_status status;
5975 rm->n_grp_count = 0;
5977 /* Create recipes for words that are marked not done by packing them
5980 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5981 &rm->rg_list, &recp_count);
5983 rm->n_grp_count += recp_count;
5984 rm->n_ext_words = lkup_exts->n_val_words;
5985 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5986 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5987 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5988 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5995 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5996 * @hw: pointer to hardware structure
5997 * @lkups: lookup elements or match criteria for the advanced recipe, one
5998 * structure per protocol header
5999 * @lkups_cnt: number of protocols
6000 * @bm: bitmap of field vectors to consider
6001 * @fv_list: pointer to a list that holds the returned field vectors
6003 static enum ice_status
6004 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6005 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6007 enum ice_status status;
6014 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6016 return ICE_ERR_NO_MEMORY;
6018 for (i = 0; i < lkups_cnt; i++)
6019 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6020 status = ICE_ERR_CFG;
6024 /* Find field vectors that include all specified protocol types */
6025 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6028 ice_free(hw, prot_ids);
6033 * ice_tun_type_match_mask - determine if tun type needs a match mask
6034 * @tun_type: tunnel type
6035 * @mask: mask to be used for the tunnel
6037 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6040 case ICE_SW_TUN_VXLAN_GPE:
6041 case ICE_SW_TUN_NVGRE:
6042 case ICE_SW_TUN_UDP:
6043 case ICE_ALL_TUNNELS:
6044 *mask = ICE_TUN_FLAG_MASK;
6054 * ice_add_special_words - Add words that are not protocols, such as metadata
6055 * @rinfo: other information regarding the rule e.g. priority and action info
6056 * @lkup_exts: lookup word structure
6058 static enum ice_status
6059 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6060 struct ice_prot_lkup_ext *lkup_exts)
6064 /* If this is a tunneled packet, then add recipe index to match the
6065 * tunnel bit in the packet metadata flags.
6067 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6068 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6069 u8 word = lkup_exts->n_val_words++;
6071 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6072 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6073 lkup_exts->field_mask[word] = mask;
6075 return ICE_ERR_MAX_LIMIT;
6082 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6083 * @hw: pointer to hardware structure
6084 * @rinfo: other information regarding the rule e.g. priority and action info
6085 * @bm: pointer to memory for returning the bitmap of field vectors
6088 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6091 enum ice_prof_type prof_type;
6093 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6095 switch (rinfo->tun_type) {
6097 prof_type = ICE_PROF_NON_TUN;
6099 case ICE_ALL_TUNNELS:
6100 prof_type = ICE_PROF_TUN_ALL;
6102 case ICE_SW_TUN_VXLAN_GPE:
6103 case ICE_SW_TUN_GENEVE:
6104 case ICE_SW_TUN_VXLAN:
6105 case ICE_SW_TUN_UDP:
6106 case ICE_SW_TUN_GTP:
6107 prof_type = ICE_PROF_TUN_UDP;
6109 case ICE_SW_TUN_NVGRE:
6110 prof_type = ICE_PROF_TUN_GRE;
6112 case ICE_SW_TUN_PPPOE:
6113 prof_type = ICE_PROF_TUN_PPPOE;
6115 case ICE_SW_TUN_PROFID_IPV6_ESP:
6116 case ICE_SW_TUN_IPV6_ESP:
6117 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6119 case ICE_SW_TUN_PROFID_IPV6_AH:
6120 case ICE_SW_TUN_IPV6_AH:
6121 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6123 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6124 case ICE_SW_TUN_IPV6_L2TPV3:
6125 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6127 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6128 case ICE_SW_TUN_IPV6_NAT_T:
6129 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6131 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6132 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6134 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6135 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6137 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6138 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6140 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6141 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6143 case ICE_SW_TUN_IPV4_NAT_T:
6144 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6146 case ICE_SW_TUN_IPV4_L2TPV3:
6147 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6149 case ICE_SW_TUN_IPV4_ESP:
6150 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6152 case ICE_SW_TUN_IPV4_AH:
6153 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6155 case ICE_SW_TUN_AND_NON_TUN:
6157 prof_type = ICE_PROF_ALL;
6161 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6165 * ice_is_prof_rule - determine if rule type is a profile rule
6166 * @type: the rule type
6168 * if the rule type is a profile rule, that means that there no field value
6169 * match required, in this case just a profile hit is required.
6171 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6174 case ICE_SW_TUN_PROFID_IPV6_ESP:
6175 case ICE_SW_TUN_PROFID_IPV6_AH:
6176 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6177 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6178 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6179 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6180 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6181 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6191 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6192 * @hw: pointer to hardware structure
6193 * @lkups: lookup elements or match criteria for the advanced recipe, one
6194 * structure per protocol header
6195 * @lkups_cnt: number of protocols
6196 * @rinfo: other information regarding the rule e.g. priority and action info
6197 * @rid: return the recipe ID of the recipe created
6199 static enum ice_status
6200 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6201 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6203 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6204 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6205 struct ice_prot_lkup_ext *lkup_exts;
6206 struct ice_recp_grp_entry *r_entry;
6207 struct ice_sw_fv_list_entry *fvit;
6208 struct ice_recp_grp_entry *r_tmp;
6209 struct ice_sw_fv_list_entry *tmp;
6210 enum ice_status status = ICE_SUCCESS;
6211 struct ice_sw_recipe *rm;
6212 bool match_tun = false;
6216 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6217 return ICE_ERR_PARAM;
6219 lkup_exts = (struct ice_prot_lkup_ext *)
6220 ice_malloc(hw, sizeof(*lkup_exts));
6222 return ICE_ERR_NO_MEMORY;
6224 /* Determine the number of words to be matched and if it exceeds a
6225 * recipe's restrictions
6227 for (i = 0; i < lkups_cnt; i++) {
6230 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6231 status = ICE_ERR_CFG;
6232 goto err_free_lkup_exts;
6235 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6237 status = ICE_ERR_CFG;
6238 goto err_free_lkup_exts;
6242 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6244 status = ICE_ERR_NO_MEMORY;
6245 goto err_free_lkup_exts;
6248 /* Get field vectors that contain fields extracted from all the protocol
6249 * headers being programmed.
6251 INIT_LIST_HEAD(&rm->fv_list);
6252 INIT_LIST_HEAD(&rm->rg_list);
6254 /* Get bitmap of field vectors (profiles) that are compatible with the
6255 * rule request; only these will be searched in the subsequent call to
6258 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6260 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6264 /* Group match words into recipes using preferred recipe grouping
6267 status = ice_create_recipe_group(hw, rm, lkup_exts);
6271 /* For certain tunnel types it is necessary to use a metadata ID flag to
6272 * differentiate different tunnel types. A separate recipe needs to be
6273 * used for the metadata.
6275 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6276 rm->n_grp_count > 1)
6279 /* set the recipe priority if specified */
6280 rm->priority = (u8)rinfo->priority;
6282 /* Find offsets from the field vector. Pick the first one for all the
6285 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6289 /* An empty FV list means to use all the profiles returned in the
6292 if (LIST_EMPTY(&rm->fv_list)) {
6295 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6296 if (ice_is_bit_set(fv_bitmap, j)) {
6297 struct ice_sw_fv_list_entry *fvl;
6299 fvl = (struct ice_sw_fv_list_entry *)
6300 ice_malloc(hw, sizeof(*fvl));
6304 fvl->profile_id = j;
6305 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6309 /* get bitmap of all profiles the recipe will be associated with */
6310 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6311 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6313 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6314 ice_set_bit((u16)fvit->profile_id, profiles);
6317 /* Create any special protocol/offset pairs, such as looking at tunnel
6318 * bits by extracting metadata
6320 status = ice_add_special_words(rinfo, lkup_exts);
6322 goto err_free_lkup_exts;
6324 /* Look for a recipe which matches our requested fv / mask list */
6325 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6326 if (*rid < ICE_MAX_NUM_RECIPES)
6327 /* Success if found a recipe that match the existing criteria */
6330 rm->tun_type = rinfo->tun_type;
6331 /* Recipe we need does not exist, add a recipe */
6332 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
6336 /* Associate all the recipes created with all the profiles in the
6337 * common field vector.
6339 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6341 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6344 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6345 (u8 *)r_bitmap, NULL);
6349 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6350 ICE_MAX_NUM_RECIPES);
6351 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6355 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6358 ice_release_change_lock(hw);
6363 /* Update profile to recipe bitmap array */
6364 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6365 ICE_MAX_NUM_RECIPES);
6367 /* Update recipe to profile bitmap array */
6368 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6369 if (ice_is_bit_set(r_bitmap, j))
6370 ice_set_bit((u16)fvit->profile_id,
6371 recipe_to_profile[j]);
6374 *rid = rm->root_rid;
6375 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6376 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6378 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6379 ice_recp_grp_entry, l_entry) {
6380 LIST_DEL(&r_entry->l_entry);
6381 ice_free(hw, r_entry);
6384 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6386 LIST_DEL(&fvit->list_entry);
6391 ice_free(hw, rm->root_buf);
6396 ice_free(hw, lkup_exts);
6402 * ice_find_dummy_packet - find dummy packet by tunnel type
6404 * @lkups: lookup elements or match criteria for the advanced recipe, one
6405 * structure per protocol header
6406 * @lkups_cnt: number of protocols
6407 * @tun_type: tunnel type from the match criteria
6408 * @pkt: dummy packet to fill according to filter match criteria
6409 * @pkt_len: packet length of dummy packet
6410 * @offsets: pointer to receive the pointer to the offsets for the packet
6413 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6414 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6416 const struct ice_dummy_pkt_offsets **offsets)
6418 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6422 for (i = 0; i < lkups_cnt; i++) {
6423 if (lkups[i].type == ICE_UDP_ILOS)
6425 else if (lkups[i].type == ICE_TCP_IL)
6427 else if (lkups[i].type == ICE_IPV6_OFOS)
6429 else if (lkups[i].type == ICE_VLAN_OFOS)
6431 else if (lkups[i].type == ICE_IPV4_OFOS &&
6432 lkups[i].h_u.ipv4_hdr.protocol ==
6433 ICE_IPV4_NVGRE_PROTO_ID &&
6434 lkups[i].m_u.ipv4_hdr.protocol ==
6437 else if (lkups[i].type == ICE_PPPOE &&
6438 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6439 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6440 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6443 else if (lkups[i].type == ICE_ETYPE_OL &&
6444 lkups[i].h_u.ethertype.ethtype_id ==
6445 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6446 lkups[i].m_u.ethertype.ethtype_id ==
6451 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6452 *pkt = dummy_ipv4_esp_pkt;
6453 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6454 *offsets = dummy_ipv4_esp_packet_offsets;
6458 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6459 *pkt = dummy_ipv6_esp_pkt;
6460 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6461 *offsets = dummy_ipv6_esp_packet_offsets;
6465 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6466 *pkt = dummy_ipv4_ah_pkt;
6467 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6468 *offsets = dummy_ipv4_ah_packet_offsets;
6472 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6473 *pkt = dummy_ipv6_ah_pkt;
6474 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6475 *offsets = dummy_ipv6_ah_packet_offsets;
6479 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6480 *pkt = dummy_ipv4_nat_pkt;
6481 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6482 *offsets = dummy_ipv4_nat_packet_offsets;
6486 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6487 *pkt = dummy_ipv6_nat_pkt;
6488 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6489 *offsets = dummy_ipv6_nat_packet_offsets;
6493 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6494 *pkt = dummy_ipv4_l2tpv3_pkt;
6495 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6496 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6500 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6501 *pkt = dummy_ipv6_l2tpv3_pkt;
6502 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6503 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6507 if (tun_type == ICE_SW_TUN_GTP) {
6508 *pkt = dummy_udp_gtp_packet;
6509 *pkt_len = sizeof(dummy_udp_gtp_packet);
6510 *offsets = dummy_udp_gtp_packet_offsets;
6513 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6514 *pkt = dummy_pppoe_ipv6_packet;
6515 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6516 *offsets = dummy_pppoe_packet_offsets;
6518 } else if (tun_type == ICE_SW_TUN_PPPOE) {
6519 *pkt = dummy_pppoe_ipv4_packet;
6520 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6521 *offsets = dummy_pppoe_packet_offsets;
6525 if (tun_type == ICE_ALL_TUNNELS) {
6526 *pkt = dummy_gre_udp_packet;
6527 *pkt_len = sizeof(dummy_gre_udp_packet);
6528 *offsets = dummy_gre_udp_packet_offsets;
6532 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6534 *pkt = dummy_gre_tcp_packet;
6535 *pkt_len = sizeof(dummy_gre_tcp_packet);
6536 *offsets = dummy_gre_tcp_packet_offsets;
6540 *pkt = dummy_gre_udp_packet;
6541 *pkt_len = sizeof(dummy_gre_udp_packet);
6542 *offsets = dummy_gre_udp_packet_offsets;
6546 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6547 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
6549 *pkt = dummy_udp_tun_tcp_packet;
6550 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6551 *offsets = dummy_udp_tun_tcp_packet_offsets;
6555 *pkt = dummy_udp_tun_udp_packet;
6556 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6557 *offsets = dummy_udp_tun_udp_packet_offsets;
6563 *pkt = dummy_vlan_udp_packet;
6564 *pkt_len = sizeof(dummy_vlan_udp_packet);
6565 *offsets = dummy_vlan_udp_packet_offsets;
6568 *pkt = dummy_udp_packet;
6569 *pkt_len = sizeof(dummy_udp_packet);
6570 *offsets = dummy_udp_packet_offsets;
6572 } else if (udp && ipv6) {
6574 *pkt = dummy_vlan_udp_ipv6_packet;
6575 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6576 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6579 *pkt = dummy_udp_ipv6_packet;
6580 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6581 *offsets = dummy_udp_ipv6_packet_offsets;
6583 } else if ((tcp && ipv6) || ipv6) {
6585 *pkt = dummy_vlan_tcp_ipv6_packet;
6586 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6587 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6590 *pkt = dummy_tcp_ipv6_packet;
6591 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6592 *offsets = dummy_tcp_ipv6_packet_offsets;
6597 *pkt = dummy_vlan_tcp_packet;
6598 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6599 *offsets = dummy_vlan_tcp_packet_offsets;
6601 *pkt = dummy_tcp_packet;
6602 *pkt_len = sizeof(dummy_tcp_packet);
6603 *offsets = dummy_tcp_packet_offsets;
6608 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6610 * @lkups: lookup elements or match criteria for the advanced recipe, one
6611 * structure per protocol header
6612 * @lkups_cnt: number of protocols
6613 * @s_rule: stores rule information from the match criteria
6614 * @dummy_pkt: dummy packet to fill according to filter match criteria
6615 * @pkt_len: packet length of dummy packet
6616 * @offsets: offset info for the dummy packet
6618 static enum ice_status
6619 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6620 struct ice_aqc_sw_rules_elem *s_rule,
6621 const u8 *dummy_pkt, u16 pkt_len,
6622 const struct ice_dummy_pkt_offsets *offsets)
6627 /* Start with a packet with a pre-defined/dummy content. Then, fill
6628 * in the header values to be looked up or matched.
6630 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6632 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6634 for (i = 0; i < lkups_cnt; i++) {
6635 enum ice_protocol_type type;
6636 u16 offset = 0, len = 0, j;
6639 /* find the start of this layer; it should be found since this
6640 * was already checked when search for the dummy packet
6642 type = lkups[i].type;
6643 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6644 if (type == offsets[j].type) {
6645 offset = offsets[j].offset;
6650 /* this should never happen in a correct calling sequence */
6652 return ICE_ERR_PARAM;
6654 switch (lkups[i].type) {
6657 len = sizeof(struct ice_ether_hdr);
6660 len = sizeof(struct ice_ethtype_hdr);
6663 len = sizeof(struct ice_vlan_hdr);
6667 len = sizeof(struct ice_ipv4_hdr);
6671 len = sizeof(struct ice_ipv6_hdr);
6676 len = sizeof(struct ice_l4_hdr);
6679 len = sizeof(struct ice_sctp_hdr);
6682 len = sizeof(struct ice_nvgre);
6687 len = sizeof(struct ice_udp_tnl_hdr);
6691 len = sizeof(struct ice_udp_gtp_hdr);
6694 len = sizeof(struct ice_pppoe_hdr);
6697 len = sizeof(struct ice_esp_hdr);
6700 len = sizeof(struct ice_nat_t_hdr);
6703 len = sizeof(struct ice_ah_hdr);
6706 len = sizeof(struct ice_l2tpv3_sess_hdr);
6709 return ICE_ERR_PARAM;
6712 /* the length should be a word multiple */
6713 if (len % ICE_BYTES_PER_WORD)
6716 /* We have the offset to the header start, the length, the
6717 * caller's header values and mask. Use this information to
6718 * copy the data into the dummy packet appropriately based on
6719 * the mask. Note that we need to only write the bits as
6720 * indicated by the mask to make sure we don't improperly write
6721 * over any significant packet data.
6723 for (j = 0; j < len / sizeof(u16); j++)
6724 if (((u16 *)&lkups[i].m_u)[j])
6725 ((u16 *)(pkt + offset))[j] =
6726 (((u16 *)(pkt + offset))[j] &
6727 ~((u16 *)&lkups[i].m_u)[j]) |
6728 (((u16 *)&lkups[i].h_u)[j] &
6729 ((u16 *)&lkups[i].m_u)[j]);
6732 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6738 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6739 * @hw: pointer to the hardware structure
6740 * @tun_type: tunnel type
6741 * @pkt: dummy packet to fill in
6742 * @offsets: offset info for the dummy packet
6744 static enum ice_status
6745 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6746 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6751 case ICE_SW_TUN_AND_NON_TUN:
6752 case ICE_SW_TUN_VXLAN_GPE:
6753 case ICE_SW_TUN_VXLAN:
6754 case ICE_SW_TUN_UDP:
6755 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6759 case ICE_SW_TUN_GENEVE:
6760 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6765 /* Nothing needs to be done for this tunnel type */
6769 /* Find the outer UDP protocol header and insert the port number */
6770 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6771 if (offsets[i].type == ICE_UDP_OF) {
6772 struct ice_l4_hdr *hdr;
6775 offset = offsets[i].offset;
6776 hdr = (struct ice_l4_hdr *)&pkt[offset];
6777 hdr->dst_port = CPU_TO_BE16(open_port);
6787 * ice_find_adv_rule_entry - Search a rule entry
6788 * @hw: pointer to the hardware structure
6789 * @lkups: lookup elements or match criteria for the advanced recipe, one
6790 * structure per protocol header
6791 * @lkups_cnt: number of protocols
6792 * @recp_id: recipe ID for which we are finding the rule
6793 * @rinfo: other information regarding the rule e.g. priority and action info
6795 * Helper function to search for a given advance rule entry
6796 * Returns pointer to entry storing the rule if found
6798 static struct ice_adv_fltr_mgmt_list_entry *
6799 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6800 u16 lkups_cnt, u16 recp_id,
6801 struct ice_adv_rule_info *rinfo)
6803 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6804 struct ice_switch_info *sw = hw->switch_info;
6807 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6808 ice_adv_fltr_mgmt_list_entry, list_entry) {
6809 bool lkups_matched = true;
6811 if (lkups_cnt != list_itr->lkups_cnt)
6813 for (i = 0; i < list_itr->lkups_cnt; i++)
6814 if (memcmp(&list_itr->lkups[i], &lkups[i],
6816 lkups_matched = false;
6819 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6820 rinfo->tun_type == list_itr->rule_info.tun_type &&
6828 * ice_adv_add_update_vsi_list
6829 * @hw: pointer to the hardware structure
6830 * @m_entry: pointer to current adv filter management list entry
6831 * @cur_fltr: filter information from the book keeping entry
6832 * @new_fltr: filter information with the new VSI to be added
6834 * Call AQ command to add or update previously created VSI list with new VSI.
6836 * Helper function to do book keeping associated with adding filter information
6837 * The algorithm to do the booking keeping is described below :
6838 * When a VSI needs to subscribe to a given advanced filter
6839 * if only one VSI has been added till now
6840 * Allocate a new VSI list and add two VSIs
6841 * to this list using switch rule command
6842 * Update the previously created switch rule with the
6843 * newly created VSI list ID
6844 * if a VSI list was previously created
6845 * Add the new VSI to the previously created VSI list set
6846 * using the update switch rule command
6848 static enum ice_status
6849 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6850 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6851 struct ice_adv_rule_info *cur_fltr,
6852 struct ice_adv_rule_info *new_fltr)
6854 enum ice_status status;
6855 u16 vsi_list_id = 0;
6857 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6858 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6859 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6860 return ICE_ERR_NOT_IMPL;
6862 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6863 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6864 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6865 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6866 return ICE_ERR_NOT_IMPL;
6868 /* Workaround fix for unexpected rule deletion by kernel PF
6871 if (new_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI &&
6872 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI)
6873 return ICE_ERR_NOT_IMPL;
6875 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6876 /* Only one entry existed in the mapping and it was not already
6877 * a part of a VSI list. So, create a VSI list with the old and
6880 struct ice_fltr_info tmp_fltr;
6881 u16 vsi_handle_arr[2];
6883 /* A rule already exists with the new VSI being added */
6884 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6885 new_fltr->sw_act.fwd_id.hw_vsi_id)
6886 return ICE_ERR_ALREADY_EXISTS;
6888 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6889 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6890 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6896 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6897 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
6898 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6899 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6900 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6901 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6903 /* Update the previous switch rule of "forward to VSI" to
6906 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6910 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6911 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6912 m_entry->vsi_list_info =
6913 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6916 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6918 if (!m_entry->vsi_list_info)
6921 /* A rule already exists with the new VSI being added */
6922 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6925 /* Update the previously created VSI list set with
6926 * the new VSI ID passed in
6928 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6930 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6932 ice_aqc_opc_update_sw_rules,
6934 /* update VSI list mapping info with new VSI ID */
6936 ice_set_bit(vsi_handle,
6937 m_entry->vsi_list_info->vsi_map);
6940 m_entry->vsi_count++;
6945 * ice_add_adv_rule - helper function to create an advanced switch rule
6946 * @hw: pointer to the hardware structure
6947 * @lkups: information on the words that needs to be looked up. All words
6948 * together makes one recipe
6949 * @lkups_cnt: num of entries in the lkups array
6950 * @rinfo: other information related to the rule that needs to be programmed
6951 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6952 * ignored is case of error.
6954 * This function can program only 1 rule at a time. The lkups is used to
6955 * describe the all the words that forms the "lookup" portion of the recipe.
6956 * These words can span multiple protocols. Callers to this function need to
6957 * pass in a list of protocol headers with lookup information along and mask
6958 * that determines which words are valid from the given protocol header.
6959 * rinfo describes other information related to this rule such as forwarding
6960 * IDs, priority of this rule, etc.
6963 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6964 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6965 struct ice_rule_query_data *added_entry)
6967 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6968 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6969 const struct ice_dummy_pkt_offsets *pkt_offsets;
6970 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6971 struct LIST_HEAD_TYPE *rule_head;
6972 struct ice_switch_info *sw;
6973 enum ice_status status;
6974 const u8 *pkt = NULL;
6980 /* Initialize profile to result index bitmap */
6981 if (!hw->switch_info->prof_res_bm_init) {
6982 hw->switch_info->prof_res_bm_init = 1;
6983 ice_init_prof_result_bm(hw);
6986 prof_rule = ice_is_prof_rule(rinfo->tun_type);
6987 if (!prof_rule && !lkups_cnt)
6988 return ICE_ERR_PARAM;
6990 /* get # of words we need to match */
6992 for (i = 0; i < lkups_cnt; i++) {
6995 ptr = (u16 *)&lkups[i].m_u;
6996 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7002 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7003 return ICE_ERR_PARAM;
7005 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7006 return ICE_ERR_PARAM;
7009 /* make sure that we can locate a dummy packet */
7010 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7013 status = ICE_ERR_PARAM;
7014 goto err_ice_add_adv_rule;
7017 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7018 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7019 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7020 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7023 vsi_handle = rinfo->sw_act.vsi_handle;
7024 if (!ice_is_vsi_valid(hw, vsi_handle))
7025 return ICE_ERR_PARAM;
7027 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7028 rinfo->sw_act.fwd_id.hw_vsi_id =
7029 ice_get_hw_vsi_num(hw, vsi_handle);
7030 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7031 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7033 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7036 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7038 /* we have to add VSI to VSI_LIST and increment vsi_count.
7039 * Also Update VSI list so that we can change forwarding rule
7040 * if the rule already exists, we will check if it exists with
7041 * same vsi_id, if not then add it to the VSI list if it already
7042 * exists if not then create a VSI list and add the existing VSI
7043 * ID and the new VSI ID to the list
7044 * We will add that VSI to the list
7046 status = ice_adv_add_update_vsi_list(hw, m_entry,
7047 &m_entry->rule_info,
7050 added_entry->rid = rid;
7051 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7052 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7056 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7057 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7059 return ICE_ERR_NO_MEMORY;
7060 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7061 switch (rinfo->sw_act.fltr_act) {
7062 case ICE_FWD_TO_VSI:
7063 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7064 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7065 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7068 act |= ICE_SINGLE_ACT_TO_Q;
7069 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7070 ICE_SINGLE_ACT_Q_INDEX_M;
7072 case ICE_FWD_TO_QGRP:
7073 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7074 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7075 act |= ICE_SINGLE_ACT_TO_Q;
7076 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7077 ICE_SINGLE_ACT_Q_INDEX_M;
7078 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7079 ICE_SINGLE_ACT_Q_REGION_M;
7081 case ICE_DROP_PACKET:
7082 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7083 ICE_SINGLE_ACT_VALID_BIT;
7086 status = ICE_ERR_CFG;
7087 goto err_ice_add_adv_rule;
7090 /* set the rule LOOKUP type based on caller specified 'RX'
7091 * instead of hardcoding it to be either LOOKUP_TX/RX
7093 * for 'RX' set the source to be the port number
7094 * for 'TX' set the source to be the source HW VSI number (determined
7098 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7099 s_rule->pdata.lkup_tx_rx.src =
7100 CPU_TO_LE16(hw->port_info->lport);
7102 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7103 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7106 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7107 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7109 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7110 pkt_len, pkt_offsets);
7112 goto err_ice_add_adv_rule;
7114 if (rinfo->tun_type != ICE_NON_TUN &&
7115 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7116 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7117 s_rule->pdata.lkup_tx_rx.hdr,
7120 goto err_ice_add_adv_rule;
7123 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7124 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7127 goto err_ice_add_adv_rule;
7128 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7129 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7131 status = ICE_ERR_NO_MEMORY;
7132 goto err_ice_add_adv_rule;
7135 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7136 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7137 ICE_NONDMA_TO_NONDMA);
7138 if (!adv_fltr->lkups && !prof_rule) {
7139 status = ICE_ERR_NO_MEMORY;
7140 goto err_ice_add_adv_rule;
7143 adv_fltr->lkups_cnt = lkups_cnt;
7144 adv_fltr->rule_info = *rinfo;
7145 adv_fltr->rule_info.fltr_rule_id =
7146 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7147 sw = hw->switch_info;
7148 sw->recp_list[rid].adv_rule = true;
7149 rule_head = &sw->recp_list[rid].filt_rules;
7151 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7152 adv_fltr->vsi_count = 1;
7154 /* Add rule entry to book keeping list */
7155 LIST_ADD(&adv_fltr->list_entry, rule_head);
7157 added_entry->rid = rid;
7158 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7159 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7161 err_ice_add_adv_rule:
7162 if (status && adv_fltr) {
7163 ice_free(hw, adv_fltr->lkups);
7164 ice_free(hw, adv_fltr);
7167 ice_free(hw, s_rule);
7173 * ice_adv_rem_update_vsi_list
7174 * @hw: pointer to the hardware structure
7175 * @vsi_handle: VSI handle of the VSI to remove
7176 * @fm_list: filter management entry for which the VSI list management needs to
7179 static enum ice_status
7180 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7181 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7183 struct ice_vsi_list_map_info *vsi_list_info;
7184 enum ice_sw_lkup_type lkup_type;
7185 enum ice_status status;
7188 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7189 fm_list->vsi_count == 0)
7190 return ICE_ERR_PARAM;
7192 /* A rule with the VSI being removed does not exist */
7193 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7194 return ICE_ERR_DOES_NOT_EXIST;
7196 lkup_type = ICE_SW_LKUP_LAST;
7197 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7198 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7199 ice_aqc_opc_update_sw_rules,
7204 fm_list->vsi_count--;
7205 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7206 vsi_list_info = fm_list->vsi_list_info;
7207 if (fm_list->vsi_count == 1) {
7208 struct ice_fltr_info tmp_fltr;
7211 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7213 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7214 return ICE_ERR_OUT_OF_RANGE;
7216 /* Make sure VSI list is empty before removing it below */
7217 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7219 ice_aqc_opc_update_sw_rules,
7224 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7225 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7226 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7227 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7228 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7229 tmp_fltr.fwd_id.hw_vsi_id =
7230 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7231 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7232 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7234 /* Update the previous switch rule of "MAC forward to VSI" to
7235 * "MAC fwd to VSI list"
7237 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7239 ice_debug(hw, ICE_DBG_SW,
7240 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7241 tmp_fltr.fwd_id.hw_vsi_id, status);
7245 /* Remove the VSI list since it is no longer used */
7246 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7248 ice_debug(hw, ICE_DBG_SW,
7249 "Failed to remove VSI list %d, error %d\n",
7250 vsi_list_id, status);
7254 LIST_DEL(&vsi_list_info->list_entry);
7255 ice_free(hw, vsi_list_info);
7256 fm_list->vsi_list_info = NULL;
7263 * ice_rem_adv_rule - removes existing advanced switch rule
7264 * @hw: pointer to the hardware structure
7265 * @lkups: information on the words that needs to be looked up. All words
7266 * together makes one recipe
7267 * @lkups_cnt: num of entries in the lkups array
7268 * @rinfo: Its the pointer to the rule information for the rule
7270 * This function can be used to remove 1 rule at a time. The lkups is
7271 * used to describe all the words that forms the "lookup" portion of the
7272 * rule. These words can span multiple protocols. Callers to this function
7273 * need to pass in a list of protocol headers with lookup information along
7274 * and mask that determines which words are valid from the given protocol
7275 * header. rinfo describes other information related to this rule such as
7276 * forwarding IDs, priority of this rule, etc.
7279 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7280 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7282 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7283 struct ice_prot_lkup_ext lkup_exts;
7284 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7285 enum ice_status status = ICE_SUCCESS;
7286 bool remove_rule = false;
7287 u16 i, rid, vsi_handle;
7289 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7290 for (i = 0; i < lkups_cnt; i++) {
7293 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7296 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7301 /* Create any special protocol/offset pairs, such as looking at tunnel
7302 * bits by extracting metadata
7304 status = ice_add_special_words(rinfo, &lkup_exts);
7308 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7309 /* If did not find a recipe that match the existing criteria */
7310 if (rid == ICE_MAX_NUM_RECIPES)
7311 return ICE_ERR_PARAM;
7313 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7314 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7315 /* the rule is already removed */
7318 ice_acquire_lock(rule_lock);
7319 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7321 } else if (list_elem->vsi_count > 1) {
7322 list_elem->vsi_list_info->ref_cnt--;
7323 remove_rule = false;
7324 vsi_handle = rinfo->sw_act.vsi_handle;
7325 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7327 vsi_handle = rinfo->sw_act.vsi_handle;
7328 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7330 ice_release_lock(rule_lock);
7333 if (list_elem->vsi_count == 0)
7336 ice_release_lock(rule_lock);
7338 struct ice_aqc_sw_rules_elem *s_rule;
7341 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7343 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7346 return ICE_ERR_NO_MEMORY;
7347 s_rule->pdata.lkup_tx_rx.act = 0;
7348 s_rule->pdata.lkup_tx_rx.index =
7349 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7350 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7351 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7353 ice_aqc_opc_remove_sw_rules, NULL);
7354 if (status == ICE_SUCCESS) {
7355 ice_acquire_lock(rule_lock);
7356 LIST_DEL(&list_elem->list_entry);
7357 ice_free(hw, list_elem->lkups);
7358 ice_free(hw, list_elem);
7359 ice_release_lock(rule_lock);
7361 ice_free(hw, s_rule);
7367 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7368 * @hw: pointer to the hardware structure
7369 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7371 * This function is used to remove 1 rule at a time. The removal is based on
7372 * the remove_entry parameter. This function will remove rule for a given
7373 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7376 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7377 struct ice_rule_query_data *remove_entry)
7379 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7380 struct LIST_HEAD_TYPE *list_head;
7381 struct ice_adv_rule_info rinfo;
7382 struct ice_switch_info *sw;
7384 sw = hw->switch_info;
7385 if (!sw->recp_list[remove_entry->rid].recp_created)
7386 return ICE_ERR_PARAM;
7387 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7388 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7390 if (list_itr->rule_info.fltr_rule_id ==
7391 remove_entry->rule_id) {
7392 rinfo = list_itr->rule_info;
7393 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7394 return ice_rem_adv_rule(hw, list_itr->lkups,
7395 list_itr->lkups_cnt, &rinfo);
7398 return ICE_ERR_PARAM;
7402 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7404 * @hw: pointer to the hardware structure
7405 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7407 * This function is used to remove all the rules for a given VSI and as soon
7408 * as removing a rule fails, it will return immediately with the error code,
7409 * else it will return ICE_SUCCESS
7412 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7414 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7415 struct ice_vsi_list_map_info *map_info;
7416 struct LIST_HEAD_TYPE *list_head;
7417 struct ice_adv_rule_info rinfo;
7418 struct ice_switch_info *sw;
7419 enum ice_status status;
7420 u16 vsi_list_id = 0;
7423 sw = hw->switch_info;
7424 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7425 if (!sw->recp_list[rid].recp_created)
7427 if (!sw->recp_list[rid].adv_rule)
7429 list_head = &sw->recp_list[rid].filt_rules;
7431 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7432 ice_adv_fltr_mgmt_list_entry, list_entry) {
7433 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7438 rinfo = list_itr->rule_info;
7439 rinfo.sw_act.vsi_handle = vsi_handle;
7440 status = ice_rem_adv_rule(hw, list_itr->lkups,
7441 list_itr->lkups_cnt, &rinfo);
7451 * ice_replay_fltr - Replay all the filters stored by a specific list head
7452 * @hw: pointer to the hardware structure
7453 * @list_head: list for which filters needs to be replayed
7454 * @recp_id: Recipe ID for which rules need to be replayed
7456 static enum ice_status
7457 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7459 struct ice_fltr_mgmt_list_entry *itr;
7460 enum ice_status status = ICE_SUCCESS;
7461 struct ice_sw_recipe *recp_list;
7462 u8 lport = hw->port_info->lport;
7463 struct LIST_HEAD_TYPE l_head;
7465 if (LIST_EMPTY(list_head))
7468 recp_list = &hw->switch_info->recp_list[recp_id];
7469 /* Move entries from the given list_head to a temporary l_head so that
7470 * they can be replayed. Otherwise when trying to re-add the same
7471 * filter, the function will return already exists
7473 LIST_REPLACE_INIT(list_head, &l_head);
7475 /* Mark the given list_head empty by reinitializing it so filters
7476 * could be added again by *handler
7478 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7480 struct ice_fltr_list_entry f_entry;
7482 f_entry.fltr_info = itr->fltr_info;
7483 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7484 status = ice_add_rule_internal(hw, recp_list, lport,
7486 if (status != ICE_SUCCESS)
7491 /* Add a filter per VSI separately */
7496 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7498 if (!ice_is_vsi_valid(hw, vsi_handle))
7501 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7502 f_entry.fltr_info.vsi_handle = vsi_handle;
7503 f_entry.fltr_info.fwd_id.hw_vsi_id =
7504 ice_get_hw_vsi_num(hw, vsi_handle);
7505 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7506 if (recp_id == ICE_SW_LKUP_VLAN)
7507 status = ice_add_vlan_internal(hw, recp_list,
7510 status = ice_add_rule_internal(hw, recp_list,
7513 if (status != ICE_SUCCESS)
7518 /* Clear the filter management list */
7519 ice_rem_sw_rule_info(hw, &l_head);
7524 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7525 * @hw: pointer to the hardware structure
7527 * NOTE: This function does not clean up partially added filters on error.
7528 * It is up to caller of the function to issue a reset or fail early.
7530 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7532 struct ice_switch_info *sw = hw->switch_info;
7533 enum ice_status status = ICE_SUCCESS;
7536 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7537 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7539 status = ice_replay_fltr(hw, i, head);
7540 if (status != ICE_SUCCESS)
7547 * ice_replay_vsi_fltr - Replay filters for requested VSI
7548 * @hw: pointer to the hardware structure
7549 * @pi: pointer to port information structure
7550 * @sw: pointer to switch info struct for which function replays filters
7551 * @vsi_handle: driver VSI handle
7552 * @recp_id: Recipe ID for which rules need to be replayed
7553 * @list_head: list for which filters need to be replayed
7555 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7556 * It is required to pass valid VSI handle.
7558 static enum ice_status
7559 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7560 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
7561 struct LIST_HEAD_TYPE *list_head)
7563 struct ice_fltr_mgmt_list_entry *itr;
7564 enum ice_status status = ICE_SUCCESS;
7565 struct ice_sw_recipe *recp_list;
7568 if (LIST_EMPTY(list_head))
7570 recp_list = &sw->recp_list[recp_id];
7571 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7573 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7575 struct ice_fltr_list_entry f_entry;
7577 f_entry.fltr_info = itr->fltr_info;
7578 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7579 itr->fltr_info.vsi_handle == vsi_handle) {
7580 /* update the src in case it is VSI num */
7581 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7582 f_entry.fltr_info.src = hw_vsi_id;
7583 status = ice_add_rule_internal(hw, recp_list,
7586 if (status != ICE_SUCCESS)
7590 if (!itr->vsi_list_info ||
7591 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7593 /* Clearing it so that the logic can add it back */
7594 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7595 f_entry.fltr_info.vsi_handle = vsi_handle;
7596 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7597 /* update the src in case it is VSI num */
7598 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7599 f_entry.fltr_info.src = hw_vsi_id;
7600 if (recp_id == ICE_SW_LKUP_VLAN)
7601 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7603 status = ice_add_rule_internal(hw, recp_list,
7606 if (status != ICE_SUCCESS)
7614 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7615 * @hw: pointer to the hardware structure
7616 * @vsi_handle: driver VSI handle
7617 * @list_head: list for which filters need to be replayed
7619 * Replay the advanced rule for the given VSI.
7621 static enum ice_status
7622 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7623 struct LIST_HEAD_TYPE *list_head)
7625 struct ice_rule_query_data added_entry = { 0 };
7626 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7627 enum ice_status status = ICE_SUCCESS;
7629 if (LIST_EMPTY(list_head))
7631 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7633 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7634 u16 lk_cnt = adv_fltr->lkups_cnt;
7636 if (vsi_handle != rinfo->sw_act.vsi_handle)
7638 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7647 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7648 * @hw: pointer to the hardware structure
7649 * @pi: pointer to port information structure
7650 * @vsi_handle: driver VSI handle
7652 * Replays filters for requested VSI via vsi_handle.
7655 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7658 struct ice_switch_info *sw = hw->switch_info;
7659 enum ice_status status;
7662 /* Update the recipes that were created */
7663 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7664 struct LIST_HEAD_TYPE *head;
7666 head = &sw->recp_list[i].filt_replay_rules;
7667 if (!sw->recp_list[i].adv_rule)
7668 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
7671 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7672 if (status != ICE_SUCCESS)
7680 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
7681 * @hw: pointer to the HW struct
7682 * @sw: pointer to switch info struct for which function removes filters
7684 * Deletes the filter replay rules for given switch
7686 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
7693 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7694 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7695 struct LIST_HEAD_TYPE *l_head;
7697 l_head = &sw->recp_list[i].filt_replay_rules;
7698 if (!sw->recp_list[i].adv_rule)
7699 ice_rem_sw_rule_info(hw, l_head);
7701 ice_rem_adv_rule_info(hw, l_head);
7707 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7708 * @hw: pointer to the HW struct
7710 * Deletes the filter replay rules.
7712 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7714 ice_rm_sw_replay_rule_info(hw, hw->switch_info);