1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x10, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
463 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, /* 2 bytes for 4 byte alignment */
468 /* C-tag (802.1Q): IPv6 + UDP */
469 static const struct ice_dummy_pkt_offsets
470 dummy_vlan_udp_ipv6_packet_offsets[] = {
472 { ICE_ETYPE_OL, 12 },
473 { ICE_VLAN_OFOS, 14 },
474 { ICE_IPV6_OFOS, 18 },
475 { ICE_UDP_ILOS, 58 },
476 { ICE_PROTOCOL_LAST, 0 },
479 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
480 static const u8 dummy_vlan_udp_ipv6_packet[] = {
481 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
485 0x81, 0x00, /* ICE_ETYPE_OL 12 */
487 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
490 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
501 0x00, 0x08, 0x00, 0x00,
503 0x00, 0x00, /* 2 bytes for 4 byte alignment */
506 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508 { ICE_IPV4_OFOS, 14 },
511 { ICE_PROTOCOL_LAST, 0 },
514 static const u8 dummy_udp_gtp_packet[] = {
515 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
520 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x11, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
526 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
527 0x00, 0x1c, 0x00, 0x00,
529 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
530 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x00, 0x85,
533 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
534 0x00, 0x00, 0x00, 0x00,
537 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
539 { ICE_ETYPE_OL, 12 },
540 { ICE_VLAN_OFOS, 14},
542 { ICE_PROTOCOL_LAST, 0 },
545 static const u8 dummy_pppoe_ipv4_packet[] = {
546 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x00, 0x00, 0x00,
550 0x81, 0x00, /* ICE_ETYPE_OL 12 */
552 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
554 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
557 0x00, 0x21, /* PPP Link Layer 24 */
559 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
560 0x00, 0x00, 0x00, 0x00,
561 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, 0x00, 0x00,
563 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
568 static const u8 dummy_pppoe_ipv6_packet[] = {
569 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
573 0x81, 0x00, /* ICE_ETYPE_OL 12 */
575 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
577 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
580 0x00, 0x57, /* PPP Link Layer 24 */
582 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
583 0x00, 0x00, 0x3b, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00,
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
596 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
598 { ICE_IPV4_OFOS, 14 },
600 { ICE_PROTOCOL_LAST, 0 },
603 static const u8 dummy_ipv4_esp_pkt[] = {
604 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
609 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
610 0x00, 0x00, 0x40, 0x00,
611 0x40, 0x32, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
620 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
622 { ICE_IPV6_OFOS, 14 },
624 { ICE_PROTOCOL_LAST, 0 },
627 static const u8 dummy_ipv6_esp_pkt[] = {
628 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
629 0x00, 0x00, 0x00, 0x00,
630 0x00, 0x00, 0x00, 0x00,
633 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
634 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
649 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
651 { ICE_IPV4_OFOS, 14 },
653 { ICE_PROTOCOL_LAST, 0 },
656 static const u8 dummy_ipv4_ah_pkt[] = {
657 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
658 0x00, 0x00, 0x00, 0x00,
659 0x00, 0x00, 0x00, 0x00,
662 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
663 0x00, 0x00, 0x40, 0x00,
664 0x40, 0x33, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
669 0x00, 0x00, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00,
671 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
674 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
676 { ICE_IPV6_OFOS, 14 },
678 { ICE_PROTOCOL_LAST, 0 },
681 static const u8 dummy_ipv6_ah_pkt[] = {
682 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
687 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
688 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
699 0x00, 0x00, 0x00, 0x00,
700 0x00, 0x00, 0x00, 0x00,
701 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
704 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
706 { ICE_IPV4_OFOS, 14 },
707 { ICE_UDP_ILOS, 34 },
709 { ICE_PROTOCOL_LAST, 0 },
712 static const u8 dummy_ipv4_nat_pkt[] = {
713 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
718 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
719 0x00, 0x00, 0x40, 0x00,
720 0x40, 0x11, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
725 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
732 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
734 { ICE_IPV6_OFOS, 14 },
735 { ICE_UDP_ILOS, 54 },
737 { ICE_PROTOCOL_LAST, 0 },
740 static const u8 dummy_ipv6_nat_pkt[] = {
741 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
742 0x00, 0x00, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
746 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
747 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00,
753 0x00, 0x00, 0x00, 0x00,
754 0x00, 0x00, 0x00, 0x00,
755 0x00, 0x00, 0x00, 0x00,
757 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
758 0x00, 0x00, 0x00, 0x00,
760 0x00, 0x00, 0x00, 0x00,
761 0x00, 0x00, 0x00, 0x00,
762 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
766 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
768 { ICE_IPV4_OFOS, 14 },
770 { ICE_PROTOCOL_LAST, 0 },
773 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
774 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
779 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
780 0x00, 0x00, 0x40, 0x00,
781 0x40, 0x73, 0x00, 0x00,
782 0x00, 0x00, 0x00, 0x00,
783 0x00, 0x00, 0x00, 0x00,
785 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
786 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
791 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
793 { ICE_IPV6_OFOS, 14 },
795 { ICE_PROTOCOL_LAST, 0 },
798 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
799 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
800 0x00, 0x00, 0x00, 0x00,
801 0x00, 0x00, 0x00, 0x00,
804 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
805 0x00, 0x0c, 0x73, 0x40,
806 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
809 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00,
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
813 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
821 /* this is a recipe to profile association bitmap */
822 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
823 ICE_MAX_NUM_PROFILES);
825 /* this is a profile to recipe association bitmap */
826 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
827 ICE_MAX_NUM_RECIPES);
829 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
832 * ice_collect_result_idx - copy result index values
833 * @buf: buffer that contains the result index
834 * @recp: the recipe struct to copy data into
836 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
837 struct ice_sw_recipe *recp)
839 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
840 ice_set_bit(buf->content.result_indx &
841 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
845 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
846 * @hw: pointer to hardware structure
847 * @recps: struct that we need to populate
848 * @rid: recipe ID that we are populating
849 * @refresh_required: true if we should get recipe to profile mapping from FW
851 * This function is used to populate all the necessary entries into our
852 * bookkeeping so that we have a current list of all the recipes that are
853 * programmed in the firmware.
855 static enum ice_status
856 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
857 bool *refresh_required)
859 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
860 struct ice_aqc_recipe_data_elem *tmp;
861 u16 num_recps = ICE_MAX_NUM_RECIPES;
862 struct ice_prot_lkup_ext *lkup_exts;
863 enum ice_status status;
867 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
869 /* we need a buffer big enough to accommodate all the recipes */
870 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
871 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
873 return ICE_ERR_NO_MEMORY;
875 tmp[0].recipe_indx = rid;
876 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
877 /* non-zero status meaning recipe doesn't exist */
881 /* Get recipe to profile map so that we can get the fv from lkups that
882 * we read for a recipe from FW. Since we want to minimize the number of
883 * times we make this FW call, just make one call and cache the copy
884 * until a new recipe is added. This operation is only required the
885 * first time to get the changes from FW. Then to search existing
886 * entries we don't need to update the cache again until another recipe
889 if (*refresh_required) {
890 ice_get_recp_to_prof_map(hw);
891 *refresh_required = false;
894 /* Start populating all the entries for recps[rid] based on lkups from
895 * firmware. Note that we are only creating the root recipe in our
898 lkup_exts = &recps[rid].lkup_exts;
900 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
901 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
902 struct ice_recp_grp_entry *rg_entry;
903 u8 i, prof, idx, prot = 0;
907 rg_entry = (struct ice_recp_grp_entry *)
908 ice_malloc(hw, sizeof(*rg_entry));
910 status = ICE_ERR_NO_MEMORY;
914 idx = root_bufs.recipe_indx;
915 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
917 /* Mark all result indices in this chain */
918 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
919 ice_set_bit(root_bufs.content.result_indx &
920 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
922 /* get the first profile that is associated with rid */
923 prof = ice_find_first_bit(recipe_to_profile[idx],
924 ICE_MAX_NUM_PROFILES);
925 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
926 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
928 rg_entry->fv_idx[i] = lkup_indx;
929 rg_entry->fv_mask[i] =
930 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
932 /* If the recipe is a chained recipe then all its
933 * child recipe's result will have a result index.
934 * To fill fv_words we should not use those result
935 * index, we only need the protocol ids and offsets.
936 * We will skip all the fv_idx which stores result
937 * index in them. We also need to skip any fv_idx which
938 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
939 * valid offset value.
941 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
942 rg_entry->fv_idx[i]) ||
943 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
944 rg_entry->fv_idx[i] == 0)
947 ice_find_prot_off(hw, ICE_BLK_SW, prof,
948 rg_entry->fv_idx[i], &prot, &off);
949 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
950 lkup_exts->fv_words[fv_word_idx].off = off;
951 lkup_exts->field_mask[fv_word_idx] =
952 rg_entry->fv_mask[i];
955 /* populate rg_list with the data from the child entry of this
958 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
960 /* Propagate some data to the recipe database */
961 recps[idx].is_root = !!is_root;
962 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
963 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
964 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
965 recps[idx].chain_idx = root_bufs.content.result_indx &
966 ~ICE_AQ_RECIPE_RESULT_EN;
967 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
969 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
975 /* Only do the following for root recipes entries */
976 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
977 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
978 recps[idx].root_rid = root_bufs.content.rid &
979 ~ICE_AQ_RECIPE_ID_IS_ROOT;
980 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
983 /* Complete initialization of the root recipe entry */
984 lkup_exts->n_val_words = fv_word_idx;
985 recps[rid].big_recp = (num_recps > 1);
986 recps[rid].n_grp_count = (u8)num_recps;
987 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
988 ice_memdup(hw, tmp, recps[rid].n_grp_count *
989 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
990 if (!recps[rid].root_buf)
993 /* Copy result indexes */
994 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
995 recps[rid].recp_created = true;
1003 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1004 * @hw: pointer to hardware structure
1006 * This function is used to populate recipe_to_profile matrix where index to
1007 * this array is the recipe ID and the element is the mapping of which profiles
1008 * is this recipe mapped to.
1010 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1012 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1015 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1018 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1019 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1020 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1022 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1023 ICE_MAX_NUM_RECIPES);
1024 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1025 if (ice_is_bit_set(r_bitmap, j))
1026 ice_set_bit(i, recipe_to_profile[j]);
1031 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1032 * @hw: pointer to the HW struct
1033 * @recp_list: pointer to sw recipe list
1035 * Allocate memory for the entire recipe table and initialize the structures/
1036 * entries corresponding to basic recipes.
1039 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1041 struct ice_sw_recipe *recps;
1044 recps = (struct ice_sw_recipe *)
1045 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1047 return ICE_ERR_NO_MEMORY;
1049 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1050 recps[i].root_rid = i;
1051 INIT_LIST_HEAD(&recps[i].filt_rules);
1052 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1053 INIT_LIST_HEAD(&recps[i].rg_list);
1054 ice_init_lock(&recps[i].filt_rule_lock);
1063 * ice_aq_get_sw_cfg - get switch configuration
1064 * @hw: pointer to the hardware structure
1065 * @buf: pointer to the result buffer
1066 * @buf_size: length of the buffer available for response
1067 * @req_desc: pointer to requested descriptor
1068 * @num_elems: pointer to number of elements
1069 * @cd: pointer to command details structure or NULL
1071 * Get switch configuration (0x0200) to be placed in 'buff'.
1072 * This admin command returns information such as initial VSI/port number
1073 * and switch ID it belongs to.
1075 * NOTE: *req_desc is both an input/output parameter.
1076 * The caller of this function first calls this function with *request_desc set
1077 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1078 * configuration information has been returned; if non-zero (meaning not all
1079 * the information was returned), the caller should call this function again
1080 * with *req_desc set to the previous value returned by f/w to get the
1081 * next block of switch configuration information.
1083 * *num_elems is output only parameter. This reflects the number of elements
1084 * in response buffer. The caller of this function to use *num_elems while
1085 * parsing the response buffer.
1087 static enum ice_status
1088 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1089 u16 buf_size, u16 *req_desc, u16 *num_elems,
1090 struct ice_sq_cd *cd)
1092 struct ice_aqc_get_sw_cfg *cmd;
1093 enum ice_status status;
1094 struct ice_aq_desc desc;
1096 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1097 cmd = &desc.params.get_sw_conf;
1098 cmd->element = CPU_TO_LE16(*req_desc);
1100 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1102 *req_desc = LE16_TO_CPU(cmd->element);
1103 *num_elems = LE16_TO_CPU(cmd->num_elems);
1110 * ice_alloc_sw - allocate resources specific to switch
1111 * @hw: pointer to the HW struct
1112 * @ena_stats: true to turn on VEB stats
1113 * @shared_res: true for shared resource, false for dedicated resource
1114 * @sw_id: switch ID returned
1115 * @counter_id: VEB counter ID returned
1117 * allocates switch resources (SWID and VEB counter) (0x0208)
1120 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1123 struct ice_aqc_alloc_free_res_elem *sw_buf;
1124 struct ice_aqc_res_elem *sw_ele;
1125 enum ice_status status;
1128 buf_len = sizeof(*sw_buf);
1129 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1130 ice_malloc(hw, buf_len);
1132 return ICE_ERR_NO_MEMORY;
1134 /* Prepare buffer for switch ID.
1135 * The number of resource entries in buffer is passed as 1 since only a
1136 * single switch/VEB instance is allocated, and hence a single sw_id
1139 sw_buf->num_elems = CPU_TO_LE16(1);
1141 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1142 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1143 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1145 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1146 ice_aqc_opc_alloc_res, NULL);
1149 goto ice_alloc_sw_exit;
1151 sw_ele = &sw_buf->elem[0];
1152 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1155 /* Prepare buffer for VEB Counter */
1156 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1157 struct ice_aqc_alloc_free_res_elem *counter_buf;
1158 struct ice_aqc_res_elem *counter_ele;
1160 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1161 ice_malloc(hw, buf_len);
1163 status = ICE_ERR_NO_MEMORY;
1164 goto ice_alloc_sw_exit;
1167 /* The number of resource entries in buffer is passed as 1 since
1168 * only a single switch/VEB instance is allocated, and hence a
1169 * single VEB counter is requested.
1171 counter_buf->num_elems = CPU_TO_LE16(1);
1172 counter_buf->res_type =
1173 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1174 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1175 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1179 ice_free(hw, counter_buf);
1180 goto ice_alloc_sw_exit;
1182 counter_ele = &counter_buf->elem[0];
1183 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1184 ice_free(hw, counter_buf);
1188 ice_free(hw, sw_buf);
1193 * ice_free_sw - free resources specific to switch
1194 * @hw: pointer to the HW struct
1195 * @sw_id: switch ID returned
1196 * @counter_id: VEB counter ID returned
1198 * free switch resources (SWID and VEB counter) (0x0209)
1200 * NOTE: This function frees multiple resources. It continues
1201 * releasing other resources even after it encounters error.
1202 * The error code returned is the last error it encountered.
1204 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1206 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1207 enum ice_status status, ret_status;
1210 buf_len = sizeof(*sw_buf);
1211 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1212 ice_malloc(hw, buf_len);
1214 return ICE_ERR_NO_MEMORY;
1216 /* Prepare buffer to free for switch ID res.
1217 * The number of resource entries in buffer is passed as 1 since only a
1218 * single switch/VEB instance is freed, and hence a single sw_id
1221 sw_buf->num_elems = CPU_TO_LE16(1);
1222 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1223 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1225 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1226 ice_aqc_opc_free_res, NULL);
1229 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1231 /* Prepare buffer to free for VEB Counter resource */
1232 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1233 ice_malloc(hw, buf_len);
1235 ice_free(hw, sw_buf);
1236 return ICE_ERR_NO_MEMORY;
1239 /* The number of resource entries in buffer is passed as 1 since only a
1240 * single switch/VEB instance is freed, and hence a single VEB counter
1243 counter_buf->num_elems = CPU_TO_LE16(1);
1244 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1245 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1247 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1248 ice_aqc_opc_free_res, NULL);
1250 ice_debug(hw, ICE_DBG_SW,
1251 "VEB counter resource could not be freed\n");
1252 ret_status = status;
1255 ice_free(hw, counter_buf);
1256 ice_free(hw, sw_buf);
1262 * @hw: pointer to the HW struct
1263 * @vsi_ctx: pointer to a VSI context struct
1264 * @cd: pointer to command details structure or NULL
1266 * Add a VSI context to the hardware (0x0210)
1269 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1270 struct ice_sq_cd *cd)
1272 struct ice_aqc_add_update_free_vsi_resp *res;
1273 struct ice_aqc_add_get_update_free_vsi *cmd;
1274 struct ice_aq_desc desc;
1275 enum ice_status status;
1277 cmd = &desc.params.vsi_cmd;
1278 res = &desc.params.add_update_free_vsi_res;
1280 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1282 if (!vsi_ctx->alloc_from_pool)
1283 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1284 ICE_AQ_VSI_IS_VALID);
1286 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1288 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1290 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1291 sizeof(vsi_ctx->info), cd);
1294 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1295 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1296 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1304 * @hw: pointer to the HW struct
1305 * @vsi_ctx: pointer to a VSI context struct
1306 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1307 * @cd: pointer to command details structure or NULL
1309 * Free VSI context info from hardware (0x0213)
1312 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1313 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1315 struct ice_aqc_add_update_free_vsi_resp *resp;
1316 struct ice_aqc_add_get_update_free_vsi *cmd;
1317 struct ice_aq_desc desc;
1318 enum ice_status status;
1320 cmd = &desc.params.vsi_cmd;
1321 resp = &desc.params.add_update_free_vsi_res;
1323 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1325 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1327 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1329 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1331 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1332 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1340 * @hw: pointer to the HW struct
1341 * @vsi_ctx: pointer to a VSI context struct
1342 * @cd: pointer to command details structure or NULL
1344 * Update VSI context in the hardware (0x0211)
1347 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1348 struct ice_sq_cd *cd)
1350 struct ice_aqc_add_update_free_vsi_resp *resp;
1351 struct ice_aqc_add_get_update_free_vsi *cmd;
1352 struct ice_aq_desc desc;
1353 enum ice_status status;
1355 cmd = &desc.params.vsi_cmd;
1356 resp = &desc.params.add_update_free_vsi_res;
1358 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1360 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1362 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1364 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1365 sizeof(vsi_ctx->info), cd);
1368 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1369 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1376 * ice_is_vsi_valid - check whether the VSI is valid or not
1377 * @hw: pointer to the HW struct
1378 * @vsi_handle: VSI handle
1380 * check whether the VSI is valid or not
1382 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1384 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1388 * ice_get_hw_vsi_num - return the HW VSI number
1389 * @hw: pointer to the HW struct
1390 * @vsi_handle: VSI handle
1392 * return the HW VSI number
1393 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1395 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1397 return hw->vsi_ctx[vsi_handle]->vsi_num;
1401 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1402 * @hw: pointer to the HW struct
1403 * @vsi_handle: VSI handle
1405 * return the VSI context entry for a given VSI handle
1407 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1409 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1413 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1414 * @hw: pointer to the HW struct
1415 * @vsi_handle: VSI handle
1416 * @vsi: VSI context pointer
1418 * save the VSI context entry for a given VSI handle
1421 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1423 hw->vsi_ctx[vsi_handle] = vsi;
1427 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1428 * @hw: pointer to the HW struct
1429 * @vsi_handle: VSI handle
1431 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1433 struct ice_vsi_ctx *vsi;
1436 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1439 ice_for_each_traffic_class(i) {
1440 if (vsi->lan_q_ctx[i]) {
1441 ice_free(hw, vsi->lan_q_ctx[i]);
1442 vsi->lan_q_ctx[i] = NULL;
1448 * ice_clear_vsi_ctx - clear the VSI context entry
1449 * @hw: pointer to the HW struct
1450 * @vsi_handle: VSI handle
1452 * clear the VSI context entry
1454 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1456 struct ice_vsi_ctx *vsi;
1458 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1460 ice_clear_vsi_q_ctx(hw, vsi_handle);
1462 hw->vsi_ctx[vsi_handle] = NULL;
1467 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1468 * @hw: pointer to the HW struct
1470 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1474 for (i = 0; i < ICE_MAX_VSI; i++)
1475 ice_clear_vsi_ctx(hw, i);
1479 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1480 * @hw: pointer to the HW struct
1481 * @vsi_handle: unique VSI handle provided by drivers
1482 * @vsi_ctx: pointer to a VSI context struct
1483 * @cd: pointer to command details structure or NULL
1485 * Add a VSI context to the hardware also add it into the VSI handle list.
1486 * If this function gets called after reset for existing VSIs then update
1487 * with the new HW VSI number in the corresponding VSI handle list entry.
1490 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1491 struct ice_sq_cd *cd)
1493 struct ice_vsi_ctx *tmp_vsi_ctx;
1494 enum ice_status status;
1496 if (vsi_handle >= ICE_MAX_VSI)
1497 return ICE_ERR_PARAM;
1498 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1501 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1503 /* Create a new VSI context */
1504 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1505 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1507 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1508 return ICE_ERR_NO_MEMORY;
1510 *tmp_vsi_ctx = *vsi_ctx;
1512 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1514 /* update with new HW VSI num */
1515 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1522 * ice_free_vsi- free VSI context from hardware and VSI handle list
1523 * @hw: pointer to the HW struct
1524 * @vsi_handle: unique VSI handle
1525 * @vsi_ctx: pointer to a VSI context struct
1526 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1527 * @cd: pointer to command details structure or NULL
1529 * Free VSI context info from hardware as well as from VSI handle list
1532 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1533 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1535 enum ice_status status;
1537 if (!ice_is_vsi_valid(hw, vsi_handle))
1538 return ICE_ERR_PARAM;
1539 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1540 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1542 ice_clear_vsi_ctx(hw, vsi_handle);
1548 * @hw: pointer to the HW struct
1549 * @vsi_handle: unique VSI handle
1550 * @vsi_ctx: pointer to a VSI context struct
1551 * @cd: pointer to command details structure or NULL
1553 * Update VSI context in the hardware
1556 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1557 struct ice_sq_cd *cd)
1559 if (!ice_is_vsi_valid(hw, vsi_handle))
1560 return ICE_ERR_PARAM;
1561 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1562 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1566 * ice_aq_get_vsi_params
1567 * @hw: pointer to the HW struct
1568 * @vsi_ctx: pointer to a VSI context struct
1569 * @cd: pointer to command details structure or NULL
1571 * Get VSI context info from hardware (0x0212)
1574 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1575 struct ice_sq_cd *cd)
1577 struct ice_aqc_add_get_update_free_vsi *cmd;
1578 struct ice_aqc_get_vsi_resp *resp;
1579 struct ice_aq_desc desc;
1580 enum ice_status status;
1582 cmd = &desc.params.vsi_cmd;
1583 resp = &desc.params.get_vsi_resp;
1585 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1587 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1589 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1590 sizeof(vsi_ctx->info), cd);
1592 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1594 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1595 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1602 * ice_aq_add_update_mir_rule - add/update a mirror rule
1603 * @hw: pointer to the HW struct
1604 * @rule_type: Rule Type
1605 * @dest_vsi: VSI number to which packets will be mirrored
1606 * @count: length of the list
1607 * @mr_buf: buffer for list of mirrored VSI numbers
1608 * @cd: pointer to command details structure or NULL
1611 * Add/Update Mirror Rule (0x260).
1614 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1615 u16 count, struct ice_mir_rule_buf *mr_buf,
1616 struct ice_sq_cd *cd, u16 *rule_id)
1618 struct ice_aqc_add_update_mir_rule *cmd;
1619 struct ice_aq_desc desc;
1620 enum ice_status status;
1621 __le16 *mr_list = NULL;
1624 switch (rule_type) {
1625 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1626 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1627 /* Make sure count and mr_buf are set for these rule_types */
1628 if (!(count && mr_buf))
1629 return ICE_ERR_PARAM;
1631 buf_size = count * sizeof(__le16);
1632 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1634 return ICE_ERR_NO_MEMORY;
1636 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1637 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1638 /* Make sure count and mr_buf are not set for these
1641 if (count || mr_buf)
1642 return ICE_ERR_PARAM;
1645 ice_debug(hw, ICE_DBG_SW,
1646 "Error due to unsupported rule_type %u\n", rule_type);
1647 return ICE_ERR_OUT_OF_RANGE;
1650 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1652 /* Pre-process 'mr_buf' items for add/update of virtual port
1653 * ingress/egress mirroring (but not physical port ingress/egress
1659 for (i = 0; i < count; i++) {
1662 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1664 /* Validate specified VSI number, make sure it is less
1665 * than ICE_MAX_VSI, if not return with error.
1667 if (id >= ICE_MAX_VSI) {
1668 ice_debug(hw, ICE_DBG_SW,
1669 "Error VSI index (%u) out-of-range\n",
1671 ice_free(hw, mr_list);
1672 return ICE_ERR_OUT_OF_RANGE;
1675 /* add VSI to mirror rule */
1678 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1679 else /* remove VSI from mirror rule */
1680 mr_list[i] = CPU_TO_LE16(id);
1684 cmd = &desc.params.add_update_rule;
1685 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1686 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1687 ICE_AQC_RULE_ID_VALID_M);
1688 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1689 cmd->num_entries = CPU_TO_LE16(count);
1690 cmd->dest = CPU_TO_LE16(dest_vsi);
1692 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1694 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1696 ice_free(hw, mr_list);
1702 * ice_aq_delete_mir_rule - delete a mirror rule
1703 * @hw: pointer to the HW struct
1704 * @rule_id: Mirror rule ID (to be deleted)
1705 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1706 * otherwise it is returned to the shared pool
1707 * @cd: pointer to command details structure or NULL
1709 * Delete Mirror Rule (0x261).
1712 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1713 struct ice_sq_cd *cd)
1715 struct ice_aqc_delete_mir_rule *cmd;
1716 struct ice_aq_desc desc;
1718 /* rule_id should be in the range 0...63 */
1719 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1720 return ICE_ERR_OUT_OF_RANGE;
1722 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1724 cmd = &desc.params.del_rule;
1725 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1726 cmd->rule_id = CPU_TO_LE16(rule_id);
1729 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1731 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1735 * ice_aq_alloc_free_vsi_list
1736 * @hw: pointer to the HW struct
1737 * @vsi_list_id: VSI list ID returned or used for lookup
1738 * @lkup_type: switch rule filter lookup type
1739 * @opc: switch rules population command type - pass in the command opcode
1741 * allocates or free a VSI list resource
1743 static enum ice_status
1744 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1745 enum ice_sw_lkup_type lkup_type,
1746 enum ice_adminq_opc opc)
1748 struct ice_aqc_alloc_free_res_elem *sw_buf;
1749 struct ice_aqc_res_elem *vsi_ele;
1750 enum ice_status status;
1753 buf_len = sizeof(*sw_buf);
1754 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1755 ice_malloc(hw, buf_len);
1757 return ICE_ERR_NO_MEMORY;
1758 sw_buf->num_elems = CPU_TO_LE16(1);
1760 if (lkup_type == ICE_SW_LKUP_MAC ||
1761 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1762 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1763 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1764 lkup_type == ICE_SW_LKUP_PROMISC ||
1765 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1766 lkup_type == ICE_SW_LKUP_LAST) {
1767 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1768 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1770 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1772 status = ICE_ERR_PARAM;
1773 goto ice_aq_alloc_free_vsi_list_exit;
1776 if (opc == ice_aqc_opc_free_res)
1777 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1779 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1781 goto ice_aq_alloc_free_vsi_list_exit;
1783 if (opc == ice_aqc_opc_alloc_res) {
1784 vsi_ele = &sw_buf->elem[0];
1785 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1788 ice_aq_alloc_free_vsi_list_exit:
1789 ice_free(hw, sw_buf);
1794 * ice_aq_set_storm_ctrl - Sets storm control configuration
1795 * @hw: pointer to the HW struct
1796 * @bcast_thresh: represents the upper threshold for broadcast storm control
1797 * @mcast_thresh: represents the upper threshold for multicast storm control
1798 * @ctl_bitmask: storm control control knobs
1800 * Sets the storm control configuration (0x0280)
1803 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1806 struct ice_aqc_storm_cfg *cmd;
1807 struct ice_aq_desc desc;
1809 cmd = &desc.params.storm_conf;
1811 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1813 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1814 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1815 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1817 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1821 * ice_aq_get_storm_ctrl - gets storm control configuration
1822 * @hw: pointer to the HW struct
1823 * @bcast_thresh: represents the upper threshold for broadcast storm control
1824 * @mcast_thresh: represents the upper threshold for multicast storm control
1825 * @ctl_bitmask: storm control control knobs
1827 * Gets the storm control configuration (0x0281)
1830 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1833 enum ice_status status;
1834 struct ice_aq_desc desc;
1836 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1838 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1840 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1843 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1846 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1849 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1856 * ice_aq_sw_rules - add/update/remove switch rules
1857 * @hw: pointer to the HW struct
1858 * @rule_list: pointer to switch rule population list
1859 * @rule_list_sz: total size of the rule list in bytes
1860 * @num_rules: number of switch rules in the rule_list
1861 * @opc: switch rules population command type - pass in the command opcode
1862 * @cd: pointer to command details structure or NULL
1864 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1866 static enum ice_status
1867 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1868 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1870 struct ice_aq_desc desc;
1871 enum ice_status status;
1873 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1875 if (opc != ice_aqc_opc_add_sw_rules &&
1876 opc != ice_aqc_opc_update_sw_rules &&
1877 opc != ice_aqc_opc_remove_sw_rules)
1878 return ICE_ERR_PARAM;
1880 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1882 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1883 desc.params.sw_rules.num_rules_fltr_entry_index =
1884 CPU_TO_LE16(num_rules);
1885 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1886 if (opc != ice_aqc_opc_add_sw_rules &&
1887 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1888 status = ICE_ERR_DOES_NOT_EXIST;
1894 * ice_aq_add_recipe - add switch recipe
1895 * @hw: pointer to the HW struct
1896 * @s_recipe_list: pointer to switch rule population list
1897 * @num_recipes: number of switch recipes in the list
1898 * @cd: pointer to command details structure or NULL
1903 ice_aq_add_recipe(struct ice_hw *hw,
1904 struct ice_aqc_recipe_data_elem *s_recipe_list,
1905 u16 num_recipes, struct ice_sq_cd *cd)
1907 struct ice_aqc_add_get_recipe *cmd;
1908 struct ice_aq_desc desc;
1911 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1912 cmd = &desc.params.add_get_recipe;
1913 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1915 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1916 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1918 buf_size = num_recipes * sizeof(*s_recipe_list);
1920 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1924 * ice_aq_get_recipe - get switch recipe
1925 * @hw: pointer to the HW struct
1926 * @s_recipe_list: pointer to switch rule population list
1927 * @num_recipes: pointer to the number of recipes (input and output)
1928 * @recipe_root: root recipe number of recipe(s) to retrieve
1929 * @cd: pointer to command details structure or NULL
1933 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1934 * On output, *num_recipes will equal the number of entries returned in
1937 * The caller must supply enough space in s_recipe_list to hold all possible
1938 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1941 ice_aq_get_recipe(struct ice_hw *hw,
1942 struct ice_aqc_recipe_data_elem *s_recipe_list,
1943 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1945 struct ice_aqc_add_get_recipe *cmd;
1946 struct ice_aq_desc desc;
1947 enum ice_status status;
1950 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1951 return ICE_ERR_PARAM;
1953 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1954 cmd = &desc.params.add_get_recipe;
1955 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1957 cmd->return_index = CPU_TO_LE16(recipe_root);
1958 cmd->num_sub_recipes = 0;
1960 buf_size = *num_recipes * sizeof(*s_recipe_list);
1962 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1963 /* cppcheck-suppress constArgument */
1964 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1970 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1971 * @hw: pointer to the HW struct
1972 * @profile_id: package profile ID to associate the recipe with
1973 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1974 * @cd: pointer to command details structure or NULL
1975 * Recipe to profile association (0x0291)
1978 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1979 struct ice_sq_cd *cd)
1981 struct ice_aqc_recipe_to_profile *cmd;
1982 struct ice_aq_desc desc;
1984 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1985 cmd = &desc.params.recipe_to_profile;
1986 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1987 cmd->profile_id = CPU_TO_LE16(profile_id);
1988 /* Set the recipe ID bit in the bitmask to let the device know which
1989 * profile we are associating the recipe to
1991 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1992 ICE_NONDMA_TO_NONDMA);
1994 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1998 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1999 * @hw: pointer to the HW struct
2000 * @profile_id: package profile ID to associate the recipe with
2001 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2002 * @cd: pointer to command details structure or NULL
2003 * Associate profile ID with given recipe (0x0293)
2006 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2007 struct ice_sq_cd *cd)
2009 struct ice_aqc_recipe_to_profile *cmd;
2010 struct ice_aq_desc desc;
2011 enum ice_status status;
2013 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2014 cmd = &desc.params.recipe_to_profile;
2015 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2016 cmd->profile_id = CPU_TO_LE16(profile_id);
2018 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2020 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2021 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2027 * ice_alloc_recipe - add recipe resource
2028 * @hw: pointer to the hardware structure
2029 * @rid: recipe ID returned as response to AQ call
2031 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2033 struct ice_aqc_alloc_free_res_elem *sw_buf;
2034 enum ice_status status;
2037 buf_len = sizeof(*sw_buf);
2038 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2040 return ICE_ERR_NO_MEMORY;
2042 sw_buf->num_elems = CPU_TO_LE16(1);
2043 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2044 ICE_AQC_RES_TYPE_S) |
2045 ICE_AQC_RES_TYPE_FLAG_SHARED);
2046 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2047 ice_aqc_opc_alloc_res, NULL);
2049 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2050 ice_free(hw, sw_buf);
2055 /* ice_init_port_info - Initialize port_info with switch configuration data
2056 * @pi: pointer to port_info
2057 * @vsi_port_num: VSI number or port number
2058 * @type: Type of switch element (port or VSI)
2059 * @swid: switch ID of the switch the element is attached to
2060 * @pf_vf_num: PF or VF number
2061 * @is_vf: true if the element is a VF, false otherwise
2064 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2065 u16 swid, u16 pf_vf_num, bool is_vf)
2068 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2069 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2071 pi->pf_vf_num = pf_vf_num;
2073 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2074 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2077 ice_debug(pi->hw, ICE_DBG_SW,
2078 "incorrect VSI/port type received\n");
2083 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2084 * @hw: pointer to the hardware structure
2086 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2088 struct ice_aqc_get_sw_cfg_resp *rbuf;
2089 enum ice_status status;
2096 num_total_ports = 1;
2098 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2099 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2102 return ICE_ERR_NO_MEMORY;
2104 /* Multiple calls to ice_aq_get_sw_cfg may be required
2105 * to get all the switch configuration information. The need
2106 * for additional calls is indicated by ice_aq_get_sw_cfg
2107 * writing a non-zero value in req_desc
2110 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2111 &req_desc, &num_elems, NULL);
2116 for (i = 0; i < num_elems; i++) {
2117 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2118 u16 pf_vf_num, swid, vsi_port_num;
2122 ele = rbuf[i].elements;
2123 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2124 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2126 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2127 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2129 swid = LE16_TO_CPU(ele->swid);
2131 if (LE16_TO_CPU(ele->pf_vf_num) &
2132 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2135 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2136 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2139 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2140 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2141 if (j == num_total_ports) {
2142 ice_debug(hw, ICE_DBG_SW,
2143 "more ports than expected\n");
2144 status = ICE_ERR_CFG;
2147 ice_init_port_info(hw->port_info,
2148 vsi_port_num, res_type, swid,
2156 } while (req_desc && !status);
2159 ice_free(hw, (void *)rbuf);
2164 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2165 * @hw: pointer to the hardware structure
2166 * @fi: filter info structure to fill/update
2168 * This helper function populates the lb_en and lan_en elements of the provided
2169 * ice_fltr_info struct using the switch's type and characteristics of the
2170 * switch rule being configured.
2172 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2174 if ((fi->flag & ICE_FLTR_RX) &&
2175 (fi->fltr_act == ICE_FWD_TO_VSI ||
2176 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2177 fi->lkup_type == ICE_SW_LKUP_LAST)
2181 if ((fi->flag & ICE_FLTR_TX) &&
2182 (fi->fltr_act == ICE_FWD_TO_VSI ||
2183 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2184 fi->fltr_act == ICE_FWD_TO_Q ||
2185 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2186 /* Setting LB for prune actions will result in replicated
2187 * packets to the internal switch that will be dropped.
2189 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2192 /* Set lan_en to TRUE if
2193 * 1. The switch is a VEB AND
2195 * 2.1 The lookup is a directional lookup like ethertype,
2196 * promiscuous, ethertype-MAC, promiscuous-VLAN
2197 * and default-port OR
2198 * 2.2 The lookup is VLAN, OR
2199 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2200 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2204 * The switch is a VEPA.
2206 * In all other cases, the LAN enable has to be set to false.
2209 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2210 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2211 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2212 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2213 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2214 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2215 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2216 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2217 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2218 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2227 * ice_fill_sw_rule - Helper function to fill switch rule structure
2228 * @hw: pointer to the hardware structure
2229 * @f_info: entry containing packet forwarding information
2230 * @s_rule: switch rule structure to be filled in based on mac_entry
2231 * @opc: switch rules population command type - pass in the command opcode
2234 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2235 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2237 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2245 if (opc == ice_aqc_opc_remove_sw_rules) {
2246 s_rule->pdata.lkup_tx_rx.act = 0;
2247 s_rule->pdata.lkup_tx_rx.index =
2248 CPU_TO_LE16(f_info->fltr_rule_id);
2249 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2253 eth_hdr_sz = sizeof(dummy_eth_header);
2254 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2256 /* initialize the ether header with a dummy header */
2257 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2258 ice_fill_sw_info(hw, f_info);
2260 switch (f_info->fltr_act) {
2261 case ICE_FWD_TO_VSI:
2262 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2263 ICE_SINGLE_ACT_VSI_ID_M;
2264 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2265 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2266 ICE_SINGLE_ACT_VALID_BIT;
2268 case ICE_FWD_TO_VSI_LIST:
2269 act |= ICE_SINGLE_ACT_VSI_LIST;
2270 act |= (f_info->fwd_id.vsi_list_id <<
2271 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2272 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2273 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2274 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2275 ICE_SINGLE_ACT_VALID_BIT;
2278 act |= ICE_SINGLE_ACT_TO_Q;
2279 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2280 ICE_SINGLE_ACT_Q_INDEX_M;
2282 case ICE_DROP_PACKET:
2283 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2284 ICE_SINGLE_ACT_VALID_BIT;
2286 case ICE_FWD_TO_QGRP:
2287 q_rgn = f_info->qgrp_size > 0 ?
2288 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2289 act |= ICE_SINGLE_ACT_TO_Q;
2290 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2291 ICE_SINGLE_ACT_Q_INDEX_M;
2292 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2293 ICE_SINGLE_ACT_Q_REGION_M;
2300 act |= ICE_SINGLE_ACT_LB_ENABLE;
2302 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2304 switch (f_info->lkup_type) {
2305 case ICE_SW_LKUP_MAC:
2306 daddr = f_info->l_data.mac.mac_addr;
2308 case ICE_SW_LKUP_VLAN:
2309 vlan_id = f_info->l_data.vlan.vlan_id;
2310 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2311 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2312 act |= ICE_SINGLE_ACT_PRUNE;
2313 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2316 case ICE_SW_LKUP_ETHERTYPE_MAC:
2317 daddr = f_info->l_data.ethertype_mac.mac_addr;
2319 case ICE_SW_LKUP_ETHERTYPE:
2320 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2321 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2323 case ICE_SW_LKUP_MAC_VLAN:
2324 daddr = f_info->l_data.mac_vlan.mac_addr;
2325 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2327 case ICE_SW_LKUP_PROMISC_VLAN:
2328 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2330 case ICE_SW_LKUP_PROMISC:
2331 daddr = f_info->l_data.mac_vlan.mac_addr;
2337 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2338 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2339 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2341 /* Recipe set depending on lookup type */
2342 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2343 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2344 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2347 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2348 ICE_NONDMA_TO_NONDMA);
2350 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2351 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2352 *off = CPU_TO_BE16(vlan_id);
2355 /* Create the switch rule with the final dummy Ethernet header */
2356 if (opc != ice_aqc_opc_update_sw_rules)
2357 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2361 * ice_add_marker_act
2362 * @hw: pointer to the hardware structure
2363 * @m_ent: the management entry for which sw marker needs to be added
2364 * @sw_marker: sw marker to tag the Rx descriptor with
2365 * @l_id: large action resource ID
2367 * Create a large action to hold software marker and update the switch rule
2368 * entry pointed by m_ent with newly created large action
2370 static enum ice_status
2371 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2372 u16 sw_marker, u16 l_id)
2374 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2375 /* For software marker we need 3 large actions
2376 * 1. FWD action: FWD TO VSI or VSI LIST
2377 * 2. GENERIC VALUE action to hold the profile ID
2378 * 3. GENERIC VALUE action to hold the software marker ID
2380 const u16 num_lg_acts = 3;
2381 enum ice_status status;
2387 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2388 return ICE_ERR_PARAM;
2390 /* Create two back-to-back switch rules and submit them to the HW using
2391 * one memory buffer:
2395 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2396 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2397 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2399 return ICE_ERR_NO_MEMORY;
2401 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2403 /* Fill in the first switch rule i.e. large action */
2404 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2405 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2406 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2408 /* First action VSI forwarding or VSI list forwarding depending on how
2411 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2412 m_ent->fltr_info.fwd_id.hw_vsi_id;
2414 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2415 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2416 ICE_LG_ACT_VSI_LIST_ID_M;
2417 if (m_ent->vsi_count > 1)
2418 act |= ICE_LG_ACT_VSI_LIST;
2419 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2421 /* Second action descriptor type */
2422 act = ICE_LG_ACT_GENERIC;
2424 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2425 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2427 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2428 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2430 /* Third action Marker value */
2431 act |= ICE_LG_ACT_GENERIC;
2432 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2433 ICE_LG_ACT_GENERIC_VALUE_M;
2435 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2437 /* call the fill switch rule to fill the lookup Tx Rx structure */
2438 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2439 ice_aqc_opc_update_sw_rules);
2441 /* Update the action to point to the large action ID */
2442 rx_tx->pdata.lkup_tx_rx.act =
2443 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2444 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2445 ICE_SINGLE_ACT_PTR_VAL_M));
2447 /* Use the filter rule ID of the previously created rule with single
2448 * act. Once the update happens, hardware will treat this as large
2451 rx_tx->pdata.lkup_tx_rx.index =
2452 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2454 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2455 ice_aqc_opc_update_sw_rules, NULL);
2457 m_ent->lg_act_idx = l_id;
2458 m_ent->sw_marker_id = sw_marker;
2461 ice_free(hw, lg_act);
2466 * ice_add_counter_act - add/update filter rule with counter action
2467 * @hw: pointer to the hardware structure
2468 * @m_ent: the management entry for which counter needs to be added
2469 * @counter_id: VLAN counter ID returned as part of allocate resource
2470 * @l_id: large action resource ID
2472 static enum ice_status
2473 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2474 u16 counter_id, u16 l_id)
2476 struct ice_aqc_sw_rules_elem *lg_act;
2477 struct ice_aqc_sw_rules_elem *rx_tx;
2478 enum ice_status status;
2479 /* 2 actions will be added while adding a large action counter */
2480 const int num_acts = 2;
2487 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2488 return ICE_ERR_PARAM;
2490 /* Create two back-to-back switch rules and submit them to the HW using
2491 * one memory buffer:
2495 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2496 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2497 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2500 return ICE_ERR_NO_MEMORY;
2502 rx_tx = (struct ice_aqc_sw_rules_elem *)
2503 ((u8 *)lg_act + lg_act_size);
2505 /* Fill in the first switch rule i.e. large action */
2506 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2507 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2508 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2510 /* First action VSI forwarding or VSI list forwarding depending on how
2513 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2514 m_ent->fltr_info.fwd_id.hw_vsi_id;
2516 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2517 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2518 ICE_LG_ACT_VSI_LIST_ID_M;
2519 if (m_ent->vsi_count > 1)
2520 act |= ICE_LG_ACT_VSI_LIST;
2521 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2523 /* Second action counter ID */
2524 act = ICE_LG_ACT_STAT_COUNT;
2525 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2526 ICE_LG_ACT_STAT_COUNT_M;
2527 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2529 /* call the fill switch rule to fill the lookup Tx Rx structure */
2530 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2531 ice_aqc_opc_update_sw_rules);
2533 act = ICE_SINGLE_ACT_PTR;
2534 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2535 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2537 /* Use the filter rule ID of the previously created rule with single
2538 * act. Once the update happens, hardware will treat this as large
2541 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2542 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2544 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2545 ice_aqc_opc_update_sw_rules, NULL);
2547 m_ent->lg_act_idx = l_id;
2548 m_ent->counter_index = counter_id;
2551 ice_free(hw, lg_act);
2556 * ice_create_vsi_list_map
2557 * @hw: pointer to the hardware structure
2558 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2559 * @num_vsi: number of VSI handles in the array
2560 * @vsi_list_id: VSI list ID generated as part of allocate resource
2562 * Helper function to create a new entry of VSI list ID to VSI mapping
2563 * using the given VSI list ID
2565 static struct ice_vsi_list_map_info *
2566 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2569 struct ice_switch_info *sw = hw->switch_info;
2570 struct ice_vsi_list_map_info *v_map;
2573 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2578 v_map->vsi_list_id = vsi_list_id;
2580 for (i = 0; i < num_vsi; i++)
2581 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2583 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2588 * ice_update_vsi_list_rule
2589 * @hw: pointer to the hardware structure
2590 * @vsi_handle_arr: array of VSI handles to form a VSI list
2591 * @num_vsi: number of VSI handles in the array
2592 * @vsi_list_id: VSI list ID generated as part of allocate resource
2593 * @remove: Boolean value to indicate if this is a remove action
2594 * @opc: switch rules population command type - pass in the command opcode
2595 * @lkup_type: lookup type of the filter
2597 * Call AQ command to add a new switch rule or update existing switch rule
2598 * using the given VSI list ID
2600 static enum ice_status
2601 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2602 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2603 enum ice_sw_lkup_type lkup_type)
2605 struct ice_aqc_sw_rules_elem *s_rule;
2606 enum ice_status status;
2612 return ICE_ERR_PARAM;
2614 if (lkup_type == ICE_SW_LKUP_MAC ||
2615 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2616 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2617 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2618 lkup_type == ICE_SW_LKUP_PROMISC ||
2619 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2620 lkup_type == ICE_SW_LKUP_LAST)
2621 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2622 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2623 else if (lkup_type == ICE_SW_LKUP_VLAN)
2624 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2625 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2627 return ICE_ERR_PARAM;
2629 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2630 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2632 return ICE_ERR_NO_MEMORY;
2633 for (i = 0; i < num_vsi; i++) {
2634 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2635 status = ICE_ERR_PARAM;
2638 /* AQ call requires hw_vsi_id(s) */
2639 s_rule->pdata.vsi_list.vsi[i] =
2640 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2643 s_rule->type = CPU_TO_LE16(rule_type);
2644 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2645 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2647 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2650 ice_free(hw, s_rule);
2655 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2656 * @hw: pointer to the HW struct
2657 * @vsi_handle_arr: array of VSI handles to form a VSI list
2658 * @num_vsi: number of VSI handles in the array
2659 * @vsi_list_id: stores the ID of the VSI list to be created
2660 * @lkup_type: switch rule filter's lookup type
2662 static enum ice_status
2663 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2664 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2666 enum ice_status status;
2668 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2669 ice_aqc_opc_alloc_res);
2673 /* Update the newly created VSI list to include the specified VSIs */
2674 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2675 *vsi_list_id, false,
2676 ice_aqc_opc_add_sw_rules, lkup_type);
2680 * ice_create_pkt_fwd_rule
2681 * @hw: pointer to the hardware structure
2682 * @recp_list: corresponding filter management list
2683 * @f_entry: entry containing packet forwarding information
2685 * Create switch rule with given filter information and add an entry
2686 * to the corresponding filter management list to track this switch rule
2689 static enum ice_status
2690 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2691 struct ice_fltr_list_entry *f_entry)
2693 struct ice_fltr_mgmt_list_entry *fm_entry;
2694 struct ice_aqc_sw_rules_elem *s_rule;
2695 enum ice_status status;
2697 s_rule = (struct ice_aqc_sw_rules_elem *)
2698 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2700 return ICE_ERR_NO_MEMORY;
2701 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2702 ice_malloc(hw, sizeof(*fm_entry));
2704 status = ICE_ERR_NO_MEMORY;
2705 goto ice_create_pkt_fwd_rule_exit;
2708 fm_entry->fltr_info = f_entry->fltr_info;
2710 /* Initialize all the fields for the management entry */
2711 fm_entry->vsi_count = 1;
2712 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2713 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2714 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2716 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2717 ice_aqc_opc_add_sw_rules);
2719 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2720 ice_aqc_opc_add_sw_rules, NULL);
2722 ice_free(hw, fm_entry);
2723 goto ice_create_pkt_fwd_rule_exit;
2726 f_entry->fltr_info.fltr_rule_id =
2727 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2728 fm_entry->fltr_info.fltr_rule_id =
2729 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2731 /* The book keeping entries will get removed when base driver
2732 * calls remove filter AQ command
2734 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2736 ice_create_pkt_fwd_rule_exit:
2737 ice_free(hw, s_rule);
2742 * ice_update_pkt_fwd_rule
2743 * @hw: pointer to the hardware structure
2744 * @f_info: filter information for switch rule
2746 * Call AQ command to update a previously created switch rule with a
2749 static enum ice_status
2750 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2752 struct ice_aqc_sw_rules_elem *s_rule;
2753 enum ice_status status;
2755 s_rule = (struct ice_aqc_sw_rules_elem *)
2756 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2758 return ICE_ERR_NO_MEMORY;
2760 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2762 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2764 /* Update switch rule with new rule set to forward VSI list */
2765 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2766 ice_aqc_opc_update_sw_rules, NULL);
2768 ice_free(hw, s_rule);
2773 * ice_update_sw_rule_bridge_mode
2774 * @hw: pointer to the HW struct
2776 * Updates unicast switch filter rules based on VEB/VEPA mode
2778 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2780 struct ice_switch_info *sw = hw->switch_info;
2781 struct ice_fltr_mgmt_list_entry *fm_entry;
2782 enum ice_status status = ICE_SUCCESS;
2783 struct LIST_HEAD_TYPE *rule_head;
2784 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2786 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2787 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2789 ice_acquire_lock(rule_lock);
2790 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2792 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2793 u8 *addr = fi->l_data.mac.mac_addr;
2795 /* Update unicast Tx rules to reflect the selected
2798 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2799 (fi->fltr_act == ICE_FWD_TO_VSI ||
2800 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2801 fi->fltr_act == ICE_FWD_TO_Q ||
2802 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2803 status = ice_update_pkt_fwd_rule(hw, fi);
2809 ice_release_lock(rule_lock);
2815 * ice_add_update_vsi_list
2816 * @hw: pointer to the hardware structure
2817 * @m_entry: pointer to current filter management list entry
2818 * @cur_fltr: filter information from the book keeping entry
2819 * @new_fltr: filter information with the new VSI to be added
2821 * Call AQ command to add or update previously created VSI list with new VSI.
2823 * Helper function to do book keeping associated with adding filter information
2824 * The algorithm to do the book keeping is described below :
2825 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2826 * if only one VSI has been added till now
2827 * Allocate a new VSI list and add two VSIs
2828 * to this list using switch rule command
2829 * Update the previously created switch rule with the
2830 * newly created VSI list ID
2831 * if a VSI list was previously created
2832 * Add the new VSI to the previously created VSI list set
2833 * using the update switch rule command
2835 static enum ice_status
2836 ice_add_update_vsi_list(struct ice_hw *hw,
2837 struct ice_fltr_mgmt_list_entry *m_entry,
2838 struct ice_fltr_info *cur_fltr,
2839 struct ice_fltr_info *new_fltr)
2841 enum ice_status status = ICE_SUCCESS;
2842 u16 vsi_list_id = 0;
2844 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2845 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2846 return ICE_ERR_NOT_IMPL;
2848 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2849 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2850 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2851 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2852 return ICE_ERR_NOT_IMPL;
2854 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2855 /* Only one entry existed in the mapping and it was not already
2856 * a part of a VSI list. So, create a VSI list with the old and
2859 struct ice_fltr_info tmp_fltr;
2860 u16 vsi_handle_arr[2];
2862 /* A rule already exists with the new VSI being added */
2863 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2864 return ICE_ERR_ALREADY_EXISTS;
2866 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2867 vsi_handle_arr[1] = new_fltr->vsi_handle;
2868 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2870 new_fltr->lkup_type);
2874 tmp_fltr = *new_fltr;
2875 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2876 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2877 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2878 /* Update the previous switch rule of "MAC forward to VSI" to
2879 * "MAC fwd to VSI list"
2881 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2885 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2886 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2887 m_entry->vsi_list_info =
2888 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2891 /* If this entry was large action then the large action needs
2892 * to be updated to point to FWD to VSI list
2894 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2896 ice_add_marker_act(hw, m_entry,
2897 m_entry->sw_marker_id,
2898 m_entry->lg_act_idx);
2900 u16 vsi_handle = new_fltr->vsi_handle;
2901 enum ice_adminq_opc opcode;
2903 if (!m_entry->vsi_list_info)
2906 /* A rule already exists with the new VSI being added */
2907 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2910 /* Update the previously created VSI list set with
2911 * the new VSI ID passed in
2913 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2914 opcode = ice_aqc_opc_update_sw_rules;
2916 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2917 vsi_list_id, false, opcode,
2918 new_fltr->lkup_type);
2919 /* update VSI list mapping info with new VSI ID */
2921 ice_set_bit(vsi_handle,
2922 m_entry->vsi_list_info->vsi_map);
2925 m_entry->vsi_count++;
2930 * ice_find_rule_entry - Search a rule entry
2931 * @list_head: head of rule list
2932 * @f_info: rule information
2934 * Helper function to search for a given rule entry
2935 * Returns pointer to entry storing the rule if found
2937 static struct ice_fltr_mgmt_list_entry *
2938 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2939 struct ice_fltr_info *f_info)
2941 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2943 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2945 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2946 sizeof(f_info->l_data)) &&
2947 f_info->flag == list_itr->fltr_info.flag) {
2956 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2957 * @recp_list: VSI lists needs to be searched
2958 * @vsi_handle: VSI handle to be found in VSI list
2959 * @vsi_list_id: VSI list ID found containing vsi_handle
2961 * Helper function to search a VSI list with single entry containing given VSI
2962 * handle element. This can be extended further to search VSI list with more
2963 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2965 static struct ice_vsi_list_map_info *
2966 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2969 struct ice_vsi_list_map_info *map_info = NULL;
2970 struct LIST_HEAD_TYPE *list_head;
2972 list_head = &recp_list->filt_rules;
2973 if (recp_list->adv_rule) {
2974 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2976 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2977 ice_adv_fltr_mgmt_list_entry,
2979 if (list_itr->vsi_list_info) {
2980 map_info = list_itr->vsi_list_info;
2981 if (ice_is_bit_set(map_info->vsi_map,
2983 *vsi_list_id = map_info->vsi_list_id;
2989 struct ice_fltr_mgmt_list_entry *list_itr;
2991 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2992 ice_fltr_mgmt_list_entry,
2994 if (list_itr->vsi_count == 1 &&
2995 list_itr->vsi_list_info) {
2996 map_info = list_itr->vsi_list_info;
2997 if (ice_is_bit_set(map_info->vsi_map,
2999 *vsi_list_id = map_info->vsi_list_id;
3009 * ice_add_rule_internal - add rule for a given lookup type
3010 * @hw: pointer to the hardware structure
3011 * @recp_list: recipe list for which rule has to be added
3012 * @lport: logic port number on which function add rule
3013 * @f_entry: structure containing MAC forwarding information
3015 * Adds or updates the rule lists for a given recipe
3017 static enum ice_status
3018 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3019 u8 lport, struct ice_fltr_list_entry *f_entry)
3021 struct ice_fltr_info *new_fltr, *cur_fltr;
3022 struct ice_fltr_mgmt_list_entry *m_entry;
3023 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3024 enum ice_status status = ICE_SUCCESS;
3026 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3027 return ICE_ERR_PARAM;
3029 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3030 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3031 f_entry->fltr_info.fwd_id.hw_vsi_id =
3032 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3034 rule_lock = &recp_list->filt_rule_lock;
3036 ice_acquire_lock(rule_lock);
3037 new_fltr = &f_entry->fltr_info;
3038 if (new_fltr->flag & ICE_FLTR_RX)
3039 new_fltr->src = lport;
3040 else if (new_fltr->flag & ICE_FLTR_TX)
3042 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3044 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3046 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3047 goto exit_add_rule_internal;
3050 cur_fltr = &m_entry->fltr_info;
3051 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3053 exit_add_rule_internal:
3054 ice_release_lock(rule_lock);
3059 * ice_remove_vsi_list_rule
3060 * @hw: pointer to the hardware structure
3061 * @vsi_list_id: VSI list ID generated as part of allocate resource
3062 * @lkup_type: switch rule filter lookup type
3064 * The VSI list should be emptied before this function is called to remove the
3067 static enum ice_status
3068 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3069 enum ice_sw_lkup_type lkup_type)
3071 struct ice_aqc_sw_rules_elem *s_rule;
3072 enum ice_status status;
3075 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3076 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3078 return ICE_ERR_NO_MEMORY;
3080 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3081 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3083 /* Free the vsi_list resource that we allocated. It is assumed that the
3084 * list is empty at this point.
3086 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3087 ice_aqc_opc_free_res);
3089 ice_free(hw, s_rule);
3094 * ice_rem_update_vsi_list
3095 * @hw: pointer to the hardware structure
3096 * @vsi_handle: VSI handle of the VSI to remove
3097 * @fm_list: filter management entry for which the VSI list management needs to
3100 static enum ice_status
3101 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3102 struct ice_fltr_mgmt_list_entry *fm_list)
3104 enum ice_sw_lkup_type lkup_type;
3105 enum ice_status status = ICE_SUCCESS;
3108 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3109 fm_list->vsi_count == 0)
3110 return ICE_ERR_PARAM;
3112 /* A rule with the VSI being removed does not exist */
3113 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3114 return ICE_ERR_DOES_NOT_EXIST;
3116 lkup_type = fm_list->fltr_info.lkup_type;
3117 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3118 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3119 ice_aqc_opc_update_sw_rules,
3124 fm_list->vsi_count--;
3125 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3127 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3128 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3129 struct ice_vsi_list_map_info *vsi_list_info =
3130 fm_list->vsi_list_info;
3133 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3135 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3136 return ICE_ERR_OUT_OF_RANGE;
3138 /* Make sure VSI list is empty before removing it below */
3139 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3141 ice_aqc_opc_update_sw_rules,
3146 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3147 tmp_fltr_info.fwd_id.hw_vsi_id =
3148 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3149 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3150 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3152 ice_debug(hw, ICE_DBG_SW,
3153 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3154 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3158 fm_list->fltr_info = tmp_fltr_info;
3161 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3162 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3163 struct ice_vsi_list_map_info *vsi_list_info =
3164 fm_list->vsi_list_info;
3166 /* Remove the VSI list since it is no longer used */
3167 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3169 ice_debug(hw, ICE_DBG_SW,
3170 "Failed to remove VSI list %d, error %d\n",
3171 vsi_list_id, status);
3175 LIST_DEL(&vsi_list_info->list_entry);
3176 ice_free(hw, vsi_list_info);
3177 fm_list->vsi_list_info = NULL;
3184 * ice_remove_rule_internal - Remove a filter rule of a given type
3186 * @hw: pointer to the hardware structure
3187 * @recp_list: recipe list for which the rule needs to removed
3188 * @f_entry: rule entry containing filter information
3190 static enum ice_status
3191 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3192 struct ice_fltr_list_entry *f_entry)
3194 struct ice_fltr_mgmt_list_entry *list_elem;
3195 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3196 enum ice_status status = ICE_SUCCESS;
3197 bool remove_rule = false;
3200 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3201 return ICE_ERR_PARAM;
3202 f_entry->fltr_info.fwd_id.hw_vsi_id =
3203 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3205 rule_lock = &recp_list->filt_rule_lock;
3206 ice_acquire_lock(rule_lock);
3207 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3208 &f_entry->fltr_info);
3210 status = ICE_ERR_DOES_NOT_EXIST;
3214 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3216 } else if (!list_elem->vsi_list_info) {
3217 status = ICE_ERR_DOES_NOT_EXIST;
3219 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3220 /* a ref_cnt > 1 indicates that the vsi_list is being
3221 * shared by multiple rules. Decrement the ref_cnt and
3222 * remove this rule, but do not modify the list, as it
3223 * is in-use by other rules.
3225 list_elem->vsi_list_info->ref_cnt--;
3228 /* a ref_cnt of 1 indicates the vsi_list is only used
3229 * by one rule. However, the original removal request is only
3230 * for a single VSI. Update the vsi_list first, and only
3231 * remove the rule if there are no further VSIs in this list.
3233 vsi_handle = f_entry->fltr_info.vsi_handle;
3234 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3237 /* if VSI count goes to zero after updating the VSI list */
3238 if (list_elem->vsi_count == 0)
3243 /* Remove the lookup rule */
3244 struct ice_aqc_sw_rules_elem *s_rule;
3246 s_rule = (struct ice_aqc_sw_rules_elem *)
3247 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3249 status = ICE_ERR_NO_MEMORY;
3253 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3254 ice_aqc_opc_remove_sw_rules);
3256 status = ice_aq_sw_rules(hw, s_rule,
3257 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3258 ice_aqc_opc_remove_sw_rules, NULL);
3260 /* Remove a book keeping from the list */
3261 ice_free(hw, s_rule);
3266 LIST_DEL(&list_elem->list_entry);
3267 ice_free(hw, list_elem);
3270 ice_release_lock(rule_lock);
3275 * ice_aq_get_res_alloc - get allocated resources
3276 * @hw: pointer to the HW struct
3277 * @num_entries: pointer to u16 to store the number of resource entries returned
3278 * @buf: pointer to user-supplied buffer
3279 * @buf_size: size of buff
3280 * @cd: pointer to command details structure or NULL
3282 * The user-supplied buffer must be large enough to store the resource
3283 * information for all resource types. Each resource type is an
3284 * ice_aqc_get_res_resp_data_elem structure.
3287 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3288 u16 buf_size, struct ice_sq_cd *cd)
3290 struct ice_aqc_get_res_alloc *resp;
3291 enum ice_status status;
3292 struct ice_aq_desc desc;
3295 return ICE_ERR_BAD_PTR;
3297 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3298 return ICE_ERR_INVAL_SIZE;
3300 resp = &desc.params.get_res;
3302 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3303 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3305 if (!status && num_entries)
3306 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3312 * ice_aq_get_res_descs - get allocated resource descriptors
3313 * @hw: pointer to the hardware structure
3314 * @num_entries: number of resource entries in buffer
3315 * @buf: Indirect buffer to hold data parameters and response
3316 * @buf_size: size of buffer for indirect commands
3317 * @res_type: resource type
3318 * @res_shared: is resource shared
3319 * @desc_id: input - first desc ID to start; output - next desc ID
3320 * @cd: pointer to command details structure or NULL
3323 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3324 struct ice_aqc_get_allocd_res_desc_resp *buf,
3325 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3326 struct ice_sq_cd *cd)
3328 struct ice_aqc_get_allocd_res_desc *cmd;
3329 struct ice_aq_desc desc;
3330 enum ice_status status;
3332 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3334 cmd = &desc.params.get_res_desc;
3337 return ICE_ERR_PARAM;
3339 if (buf_size != (num_entries * sizeof(*buf)))
3340 return ICE_ERR_PARAM;
3342 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3344 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3345 ICE_AQC_RES_TYPE_M) | (res_shared ?
3346 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3347 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3349 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3351 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3357 * ice_add_mac_rule - Add a MAC address based filter rule
3358 * @hw: pointer to the hardware structure
3359 * @m_list: list of MAC addresses and forwarding information
3360 * @sw: pointer to switch info struct for which function add rule
3361 * @lport: logic port number on which function add rule
3363 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3364 * multiple unicast addresses, the function assumes that all the
3365 * addresses are unique in a given add_mac call. It doesn't
3366 * check for duplicates in this case, removing duplicates from a given
3367 * list should be taken care of in the caller of this function.
3369 static enum ice_status
3370 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3371 struct ice_switch_info *sw, u8 lport)
3373 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3374 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3375 struct ice_fltr_list_entry *m_list_itr;
3376 struct LIST_HEAD_TYPE *rule_head;
3377 u16 total_elem_left, s_rule_size;
3378 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3379 enum ice_status status = ICE_SUCCESS;
3380 u16 num_unicast = 0;
3384 rule_lock = &recp_list->filt_rule_lock;
3385 rule_head = &recp_list->filt_rules;
3387 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3389 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3393 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3394 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3395 if (!ice_is_vsi_valid(hw, vsi_handle))
3396 return ICE_ERR_PARAM;
3397 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3398 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3399 /* update the src in case it is VSI num */
3400 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3401 return ICE_ERR_PARAM;
3402 m_list_itr->fltr_info.src = hw_vsi_id;
3403 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3404 IS_ZERO_ETHER_ADDR(add))
3405 return ICE_ERR_PARAM;
3406 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3407 /* Don't overwrite the unicast address */
3408 ice_acquire_lock(rule_lock);
3409 if (ice_find_rule_entry(rule_head,
3410 &m_list_itr->fltr_info)) {
3411 ice_release_lock(rule_lock);
3412 return ICE_ERR_ALREADY_EXISTS;
3414 ice_release_lock(rule_lock);
3416 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3417 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3418 m_list_itr->status =
3419 ice_add_rule_internal(hw, recp_list, lport,
3421 if (m_list_itr->status)
3422 return m_list_itr->status;
3426 ice_acquire_lock(rule_lock);
3427 /* Exit if no suitable entries were found for adding bulk switch rule */
3429 status = ICE_SUCCESS;
3430 goto ice_add_mac_exit;
3433 /* Allocate switch rule buffer for the bulk update for unicast */
3434 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3435 s_rule = (struct ice_aqc_sw_rules_elem *)
3436 ice_calloc(hw, num_unicast, s_rule_size);
3438 status = ICE_ERR_NO_MEMORY;
3439 goto ice_add_mac_exit;
3443 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3445 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3446 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3448 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3449 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3450 ice_aqc_opc_add_sw_rules);
3451 r_iter = (struct ice_aqc_sw_rules_elem *)
3452 ((u8 *)r_iter + s_rule_size);
3456 /* Call AQ bulk switch rule update for all unicast addresses */
3458 /* Call AQ switch rule in AQ_MAX chunk */
3459 for (total_elem_left = num_unicast; total_elem_left > 0;
3460 total_elem_left -= elem_sent) {
3461 struct ice_aqc_sw_rules_elem *entry = r_iter;
3463 elem_sent = MIN_T(u8, total_elem_left,
3464 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3465 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3466 elem_sent, ice_aqc_opc_add_sw_rules,
3469 goto ice_add_mac_exit;
3470 r_iter = (struct ice_aqc_sw_rules_elem *)
3471 ((u8 *)r_iter + (elem_sent * s_rule_size));
3474 /* Fill up rule ID based on the value returned from FW */
3476 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3478 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3479 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3480 struct ice_fltr_mgmt_list_entry *fm_entry;
3482 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3483 f_info->fltr_rule_id =
3484 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3485 f_info->fltr_act = ICE_FWD_TO_VSI;
3486 /* Create an entry to track this MAC address */
3487 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3488 ice_malloc(hw, sizeof(*fm_entry));
3490 status = ICE_ERR_NO_MEMORY;
3491 goto ice_add_mac_exit;
3493 fm_entry->fltr_info = *f_info;
3494 fm_entry->vsi_count = 1;
3495 /* The book keeping entries will get removed when
3496 * base driver calls remove filter AQ command
3499 LIST_ADD(&fm_entry->list_entry, rule_head);
3500 r_iter = (struct ice_aqc_sw_rules_elem *)
3501 ((u8 *)r_iter + s_rule_size);
3506 ice_release_lock(rule_lock);
3508 ice_free(hw, s_rule);
3513 * ice_add_mac - Add a MAC address based filter rule
3514 * @hw: pointer to the hardware structure
3515 * @m_list: list of MAC addresses and forwarding information
3517 * Function add MAC rule for logical port from HW struct
3519 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3522 return ICE_ERR_PARAM;
3524 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3525 hw->port_info->lport);
3529 * ice_add_vlan_internal - Add one VLAN based filter rule
3530 * @hw: pointer to the hardware structure
3531 * @recp_list: recipe list for which rule has to be added
3532 * @f_entry: filter entry containing one VLAN information
3534 static enum ice_status
3535 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3536 struct ice_fltr_list_entry *f_entry)
3538 struct ice_fltr_mgmt_list_entry *v_list_itr;
3539 struct ice_fltr_info *new_fltr, *cur_fltr;
3540 enum ice_sw_lkup_type lkup_type;
3541 u16 vsi_list_id = 0, vsi_handle;
3542 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3543 enum ice_status status = ICE_SUCCESS;
3545 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3546 return ICE_ERR_PARAM;
3548 f_entry->fltr_info.fwd_id.hw_vsi_id =
3549 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3550 new_fltr = &f_entry->fltr_info;
3552 /* VLAN ID should only be 12 bits */
3553 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3554 return ICE_ERR_PARAM;
3556 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3557 return ICE_ERR_PARAM;
3559 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3560 lkup_type = new_fltr->lkup_type;
3561 vsi_handle = new_fltr->vsi_handle;
3562 rule_lock = &recp_list->filt_rule_lock;
3563 ice_acquire_lock(rule_lock);
3564 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3566 struct ice_vsi_list_map_info *map_info = NULL;
3568 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3569 /* All VLAN pruning rules use a VSI list. Check if
3570 * there is already a VSI list containing VSI that we
3571 * want to add. If found, use the same vsi_list_id for
3572 * this new VLAN rule or else create a new list.
3574 map_info = ice_find_vsi_list_entry(recp_list,
3578 status = ice_create_vsi_list_rule(hw,
3586 /* Convert the action to forwarding to a VSI list. */
3587 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3588 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3591 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3593 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3596 status = ICE_ERR_DOES_NOT_EXIST;
3599 /* reuse VSI list for new rule and increment ref_cnt */
3601 v_list_itr->vsi_list_info = map_info;
3602 map_info->ref_cnt++;
3604 v_list_itr->vsi_list_info =
3605 ice_create_vsi_list_map(hw, &vsi_handle,
3609 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3610 /* Update existing VSI list to add new VSI ID only if it used
3613 cur_fltr = &v_list_itr->fltr_info;
3614 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3617 /* If VLAN rule exists and VSI list being used by this rule is
3618 * referenced by more than 1 VLAN rule. Then create a new VSI
3619 * list appending previous VSI with new VSI and update existing
3620 * VLAN rule to point to new VSI list ID
3622 struct ice_fltr_info tmp_fltr;
3623 u16 vsi_handle_arr[2];
3626 /* Current implementation only supports reusing VSI list with
3627 * one VSI count. We should never hit below condition
3629 if (v_list_itr->vsi_count > 1 &&
3630 v_list_itr->vsi_list_info->ref_cnt > 1) {
3631 ice_debug(hw, ICE_DBG_SW,
3632 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3633 status = ICE_ERR_CFG;
3638 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3641 /* A rule already exists with the new VSI being added */
3642 if (cur_handle == vsi_handle) {
3643 status = ICE_ERR_ALREADY_EXISTS;
3647 vsi_handle_arr[0] = cur_handle;
3648 vsi_handle_arr[1] = vsi_handle;
3649 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3650 &vsi_list_id, lkup_type);
3654 tmp_fltr = v_list_itr->fltr_info;
3655 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3656 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3657 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3658 /* Update the previous switch rule to a new VSI list which
3659 * includes current VSI that is requested
3661 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3665 /* before overriding VSI list map info. decrement ref_cnt of
3668 v_list_itr->vsi_list_info->ref_cnt--;
3670 /* now update to newly created list */
3671 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3672 v_list_itr->vsi_list_info =
3673 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3675 v_list_itr->vsi_count++;
3679 ice_release_lock(rule_lock);
3684 * ice_add_vlan_rule - Add VLAN based filter rule
3685 * @hw: pointer to the hardware structure
3686 * @v_list: list of VLAN entries and forwarding information
3687 * @sw: pointer to switch info struct for which function add rule
3689 static enum ice_status
3690 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3691 struct ice_switch_info *sw)
3693 struct ice_fltr_list_entry *v_list_itr;
3694 struct ice_sw_recipe *recp_list;
3696 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3697 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3699 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3700 return ICE_ERR_PARAM;
3701 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3702 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3704 if (v_list_itr->status)
3705 return v_list_itr->status;
3711 * ice_add_vlan - Add a VLAN based filter rule
3712 * @hw: pointer to the hardware structure
3713 * @v_list: list of VLAN and forwarding information
3715 * Function add VLAN rule for logical port from HW struct
3717 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3720 return ICE_ERR_PARAM;
3722 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3726 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3727 * @hw: pointer to the hardware structure
3728 * @mv_list: list of MAC and VLAN filters
3729 * @sw: pointer to switch info struct for which function add rule
3730 * @lport: logic port number on which function add rule
3732 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3733 * pruning bits enabled, then it is the responsibility of the caller to make
3734 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3735 * VLAN won't be received on that VSI otherwise.
3737 static enum ice_status
3738 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3739 struct ice_switch_info *sw, u8 lport)
3741 struct ice_fltr_list_entry *mv_list_itr;
3742 struct ice_sw_recipe *recp_list;
3744 if (!mv_list || !hw)
3745 return ICE_ERR_PARAM;
3747 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3748 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3750 enum ice_sw_lkup_type l_type =
3751 mv_list_itr->fltr_info.lkup_type;
3753 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3754 return ICE_ERR_PARAM;
3755 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3756 mv_list_itr->status =
3757 ice_add_rule_internal(hw, recp_list, lport,
3759 if (mv_list_itr->status)
3760 return mv_list_itr->status;
3766 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3767 * @hw: pointer to the hardware structure
3768 * @mv_list: list of MAC VLAN addresses and forwarding information
3770 * Function add MAC VLAN rule for logical port from HW struct
3773 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3775 if (!mv_list || !hw)
3776 return ICE_ERR_PARAM;
3778 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3779 hw->port_info->lport);
3783 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3784 * @hw: pointer to the hardware structure
3785 * @em_list: list of ether type MAC filter, MAC is optional
3786 * @sw: pointer to switch info struct for which function add rule
3787 * @lport: logic port number on which function add rule
3789 * This function requires the caller to populate the entries in
3790 * the filter list with the necessary fields (including flags to
3791 * indicate Tx or Rx rules).
3793 static enum ice_status
3794 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3795 struct ice_switch_info *sw, u8 lport)
3797 struct ice_fltr_list_entry *em_list_itr;
3799 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3801 struct ice_sw_recipe *recp_list;
3802 enum ice_sw_lkup_type l_type;
3804 l_type = em_list_itr->fltr_info.lkup_type;
3805 recp_list = &sw->recp_list[l_type];
3807 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3808 l_type != ICE_SW_LKUP_ETHERTYPE)
3809 return ICE_ERR_PARAM;
3811 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3814 if (em_list_itr->status)
3815 return em_list_itr->status;
3821 * ice_add_eth_mac - Add a ethertype based filter rule
3822 * @hw: pointer to the hardware structure
3823 * @em_list: list of ethertype and forwarding information
3825 * Function add ethertype rule for logical port from HW struct
3828 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3830 if (!em_list || !hw)
3831 return ICE_ERR_PARAM;
3833 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3834 hw->port_info->lport);
3838 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3839 * @hw: pointer to the hardware structure
3840 * @em_list: list of ethertype or ethertype MAC entries
3841 * @sw: pointer to switch info struct for which function add rule
3843 static enum ice_status
3844 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3845 struct ice_switch_info *sw)
3847 struct ice_fltr_list_entry *em_list_itr, *tmp;
3849 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3851 struct ice_sw_recipe *recp_list;
3852 enum ice_sw_lkup_type l_type;
3854 l_type = em_list_itr->fltr_info.lkup_type;
3856 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3857 l_type != ICE_SW_LKUP_ETHERTYPE)
3858 return ICE_ERR_PARAM;
3860 recp_list = &sw->recp_list[l_type];
3861 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3863 if (em_list_itr->status)
3864 return em_list_itr->status;
3870 * ice_remove_eth_mac - remove a ethertype based filter rule
3871 * @hw: pointer to the hardware structure
3872 * @em_list: list of ethertype and forwarding information
3876 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3878 if (!em_list || !hw)
3879 return ICE_ERR_PARAM;
3881 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3885 * ice_rem_sw_rule_info
3886 * @hw: pointer to the hardware structure
3887 * @rule_head: pointer to the switch list structure that we want to delete
3890 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3892 if (!LIST_EMPTY(rule_head)) {
3893 struct ice_fltr_mgmt_list_entry *entry;
3894 struct ice_fltr_mgmt_list_entry *tmp;
3896 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3897 ice_fltr_mgmt_list_entry, list_entry) {
3898 LIST_DEL(&entry->list_entry);
3899 ice_free(hw, entry);
3905 * ice_rem_adv_rule_info
3906 * @hw: pointer to the hardware structure
3907 * @rule_head: pointer to the switch list structure that we want to delete
3910 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3912 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3913 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3915 if (LIST_EMPTY(rule_head))
3918 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3919 ice_adv_fltr_mgmt_list_entry, list_entry) {
3920 LIST_DEL(&lst_itr->list_entry);
3921 ice_free(hw, lst_itr->lkups);
3922 ice_free(hw, lst_itr);
3927 * ice_rem_all_sw_rules_info
3928 * @hw: pointer to the hardware structure
3930 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3932 struct ice_switch_info *sw = hw->switch_info;
3935 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3936 struct LIST_HEAD_TYPE *rule_head;
3938 rule_head = &sw->recp_list[i].filt_rules;
3939 if (!sw->recp_list[i].adv_rule)
3940 ice_rem_sw_rule_info(hw, rule_head);
3942 ice_rem_adv_rule_info(hw, rule_head);
3947 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3948 * @pi: pointer to the port_info structure
3949 * @vsi_handle: VSI handle to set as default
3950 * @set: true to add the above mentioned switch rule, false to remove it
3951 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3953 * add filter rule to set/unset given VSI as default VSI for the switch
3954 * (represented by swid)
3957 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3960 struct ice_aqc_sw_rules_elem *s_rule;
3961 struct ice_fltr_info f_info;
3962 struct ice_hw *hw = pi->hw;
3963 enum ice_adminq_opc opcode;
3964 enum ice_status status;
3968 if (!ice_is_vsi_valid(hw, vsi_handle))
3969 return ICE_ERR_PARAM;
3970 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3972 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3973 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3974 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3976 return ICE_ERR_NO_MEMORY;
3978 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3980 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3981 f_info.flag = direction;
3982 f_info.fltr_act = ICE_FWD_TO_VSI;
3983 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3985 if (f_info.flag & ICE_FLTR_RX) {
3986 f_info.src = pi->lport;
3987 f_info.src_id = ICE_SRC_ID_LPORT;
3989 f_info.fltr_rule_id =
3990 pi->dflt_rx_vsi_rule_id;
3991 } else if (f_info.flag & ICE_FLTR_TX) {
3992 f_info.src_id = ICE_SRC_ID_VSI;
3993 f_info.src = hw_vsi_id;
3995 f_info.fltr_rule_id =
3996 pi->dflt_tx_vsi_rule_id;
4000 opcode = ice_aqc_opc_add_sw_rules;
4002 opcode = ice_aqc_opc_remove_sw_rules;
4004 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4006 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4007 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4010 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4012 if (f_info.flag & ICE_FLTR_TX) {
4013 pi->dflt_tx_vsi_num = hw_vsi_id;
4014 pi->dflt_tx_vsi_rule_id = index;
4015 } else if (f_info.flag & ICE_FLTR_RX) {
4016 pi->dflt_rx_vsi_num = hw_vsi_id;
4017 pi->dflt_rx_vsi_rule_id = index;
4020 if (f_info.flag & ICE_FLTR_TX) {
4021 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4022 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4023 } else if (f_info.flag & ICE_FLTR_RX) {
4024 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4025 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4030 ice_free(hw, s_rule);
4035 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4036 * @list_head: head of rule list
4037 * @f_info: rule information
4039 * Helper function to search for a unicast rule entry - this is to be used
4040 * to remove unicast MAC filter that is not shared with other VSIs on the
4043 * Returns pointer to entry storing the rule if found
4045 static struct ice_fltr_mgmt_list_entry *
4046 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4047 struct ice_fltr_info *f_info)
4049 struct ice_fltr_mgmt_list_entry *list_itr;
4051 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4053 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4054 sizeof(f_info->l_data)) &&
4055 f_info->fwd_id.hw_vsi_id ==
4056 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4057 f_info->flag == list_itr->fltr_info.flag)
4064 * ice_remove_mac_rule - remove a MAC based filter rule
4065 * @hw: pointer to the hardware structure
4066 * @m_list: list of MAC addresses and forwarding information
4067 * @recp_list: list from which function remove MAC address
4069 * This function removes either a MAC filter rule or a specific VSI from a
4070 * VSI list for a multicast MAC address.
4072 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4073 * ice_add_mac. Caller should be aware that this call will only work if all
4074 * the entries passed into m_list were added previously. It will not attempt to
4075 * do a partial remove of entries that were found.
4077 static enum ice_status
4078 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4079 struct ice_sw_recipe *recp_list)
4081 struct ice_fltr_list_entry *list_itr, *tmp;
4082 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4085 return ICE_ERR_PARAM;
4087 rule_lock = &recp_list->filt_rule_lock;
4088 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4090 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4091 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4094 if (l_type != ICE_SW_LKUP_MAC)
4095 return ICE_ERR_PARAM;
4097 vsi_handle = list_itr->fltr_info.vsi_handle;
4098 if (!ice_is_vsi_valid(hw, vsi_handle))
4099 return ICE_ERR_PARAM;
4101 list_itr->fltr_info.fwd_id.hw_vsi_id =
4102 ice_get_hw_vsi_num(hw, vsi_handle);
4103 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4104 /* Don't remove the unicast address that belongs to
4105 * another VSI on the switch, since it is not being
4108 ice_acquire_lock(rule_lock);
4109 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4110 &list_itr->fltr_info)) {
4111 ice_release_lock(rule_lock);
4112 return ICE_ERR_DOES_NOT_EXIST;
4114 ice_release_lock(rule_lock);
4116 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4118 if (list_itr->status)
4119 return list_itr->status;
4125 * ice_remove_mac - remove a MAC address based filter rule
4126 * @hw: pointer to the hardware structure
4127 * @m_list: list of MAC addresses and forwarding information
4130 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4132 struct ice_sw_recipe *recp_list;
4134 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4135 return ice_remove_mac_rule(hw, m_list, recp_list);
4139 * ice_remove_vlan_rule - Remove VLAN based filter rule
4140 * @hw: pointer to the hardware structure
4141 * @v_list: list of VLAN entries and forwarding information
4142 * @recp_list: list from which function remove VLAN
4144 static enum ice_status
4145 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4146 struct ice_sw_recipe *recp_list)
4148 struct ice_fltr_list_entry *v_list_itr, *tmp;
4150 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4152 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4154 if (l_type != ICE_SW_LKUP_VLAN)
4155 return ICE_ERR_PARAM;
4156 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4158 if (v_list_itr->status)
4159 return v_list_itr->status;
4165 * ice_remove_vlan - remove a VLAN address based filter rule
4166 * @hw: pointer to the hardware structure
4167 * @v_list: list of VLAN and forwarding information
4171 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4173 struct ice_sw_recipe *recp_list;
4176 return ICE_ERR_PARAM;
4178 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4179 return ice_remove_vlan_rule(hw, v_list, recp_list);
4183 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4184 * @hw: pointer to the hardware structure
4185 * @v_list: list of MAC VLAN entries and forwarding information
4186 * @recp_list: list from which function remove MAC VLAN
4188 static enum ice_status
4189 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4190 struct ice_sw_recipe *recp_list)
4192 struct ice_fltr_list_entry *v_list_itr, *tmp;
4194 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4195 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4197 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4199 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4200 return ICE_ERR_PARAM;
4201 v_list_itr->status =
4202 ice_remove_rule_internal(hw, recp_list,
4204 if (v_list_itr->status)
4205 return v_list_itr->status;
4211 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4212 * @hw: pointer to the hardware structure
4213 * @mv_list: list of MAC VLAN and forwarding information
4216 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4218 struct ice_sw_recipe *recp_list;
4220 if (!mv_list || !hw)
4221 return ICE_ERR_PARAM;
4223 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4224 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4228 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4229 * @fm_entry: filter entry to inspect
4230 * @vsi_handle: VSI handle to compare with filter info
4233 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4235 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4236 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4237 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4238 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4243 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4244 * @hw: pointer to the hardware structure
4245 * @vsi_handle: VSI handle to remove filters from
4246 * @vsi_list_head: pointer to the list to add entry to
4247 * @fi: pointer to fltr_info of filter entry to copy & add
4249 * Helper function, used when creating a list of filters to remove from
4250 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4251 * original filter entry, with the exception of fltr_info.fltr_act and
4252 * fltr_info.fwd_id fields. These are set such that later logic can
4253 * extract which VSI to remove the fltr from, and pass on that information.
4255 static enum ice_status
4256 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4257 struct LIST_HEAD_TYPE *vsi_list_head,
4258 struct ice_fltr_info *fi)
4260 struct ice_fltr_list_entry *tmp;
4262 /* this memory is freed up in the caller function
4263 * once filters for this VSI are removed
4265 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4267 return ICE_ERR_NO_MEMORY;
4269 tmp->fltr_info = *fi;
4271 /* Overwrite these fields to indicate which VSI to remove filter from,
4272 * so find and remove logic can extract the information from the
4273 * list entries. Note that original entries will still have proper
4276 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4277 tmp->fltr_info.vsi_handle = vsi_handle;
4278 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4280 LIST_ADD(&tmp->list_entry, vsi_list_head);
4286 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4287 * @hw: pointer to the hardware structure
4288 * @vsi_handle: VSI handle to remove filters from
4289 * @lkup_list_head: pointer to the list that has certain lookup type filters
4290 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4292 * Locates all filters in lkup_list_head that are used by the given VSI,
4293 * and adds COPIES of those entries to vsi_list_head (intended to be used
4294 * to remove the listed filters).
4295 * Note that this means all entries in vsi_list_head must be explicitly
4296 * deallocated by the caller when done with list.
4298 static enum ice_status
4299 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4300 struct LIST_HEAD_TYPE *lkup_list_head,
4301 struct LIST_HEAD_TYPE *vsi_list_head)
4303 struct ice_fltr_mgmt_list_entry *fm_entry;
4304 enum ice_status status = ICE_SUCCESS;
4306 /* check to make sure VSI ID is valid and within boundary */
4307 if (!ice_is_vsi_valid(hw, vsi_handle))
4308 return ICE_ERR_PARAM;
4310 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4311 ice_fltr_mgmt_list_entry, list_entry) {
4312 struct ice_fltr_info *fi;
4314 fi = &fm_entry->fltr_info;
4315 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4318 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4327 * ice_determine_promisc_mask
4328 * @fi: filter info to parse
4330 * Helper function to determine which ICE_PROMISC_ mask corresponds
4331 * to given filter into.
4333 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4335 u16 vid = fi->l_data.mac_vlan.vlan_id;
4336 u8 *macaddr = fi->l_data.mac.mac_addr;
4337 bool is_tx_fltr = false;
4338 u8 promisc_mask = 0;
4340 if (fi->flag == ICE_FLTR_TX)
4343 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4344 promisc_mask |= is_tx_fltr ?
4345 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4346 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4347 promisc_mask |= is_tx_fltr ?
4348 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4349 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4350 promisc_mask |= is_tx_fltr ?
4351 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4353 promisc_mask |= is_tx_fltr ?
4354 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4356 return promisc_mask;
4360 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4361 * @hw: pointer to the hardware structure
4362 * @vsi_handle: VSI handle to retrieve info from
4363 * @promisc_mask: pointer to mask to be filled in
4364 * @vid: VLAN ID of promisc VLAN VSI
4365 * @sw: pointer to switch info struct for which function add rule
4367 static enum ice_status
4368 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4369 u16 *vid, struct ice_switch_info *sw)
4371 struct ice_fltr_mgmt_list_entry *itr;
4372 struct LIST_HEAD_TYPE *rule_head;
4373 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4375 if (!ice_is_vsi_valid(hw, vsi_handle))
4376 return ICE_ERR_PARAM;
4380 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4381 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4383 ice_acquire_lock(rule_lock);
4384 LIST_FOR_EACH_ENTRY(itr, rule_head,
4385 ice_fltr_mgmt_list_entry, list_entry) {
4386 /* Continue if this filter doesn't apply to this VSI or the
4387 * VSI ID is not in the VSI map for this filter
4389 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4392 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4394 ice_release_lock(rule_lock);
4400 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4401 * @hw: pointer to the hardware structure
4402 * @vsi_handle: VSI handle to retrieve info from
4403 * @promisc_mask: pointer to mask to be filled in
4404 * @vid: VLAN ID of promisc VLAN VSI
4407 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4410 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4411 vid, hw->switch_info);
4415 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4416 * @hw: pointer to the hardware structure
4417 * @vsi_handle: VSI handle to retrieve info from
4418 * @promisc_mask: pointer to mask to be filled in
4419 * @vid: VLAN ID of promisc VLAN VSI
4420 * @sw: pointer to switch info struct for which function add rule
4422 static enum ice_status
4423 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4424 u16 *vid, struct ice_switch_info *sw)
4426 struct ice_fltr_mgmt_list_entry *itr;
4427 struct LIST_HEAD_TYPE *rule_head;
4428 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4430 if (!ice_is_vsi_valid(hw, vsi_handle))
4431 return ICE_ERR_PARAM;
4435 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4436 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4438 ice_acquire_lock(rule_lock);
4439 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4441 /* Continue if this filter doesn't apply to this VSI or the
4442 * VSI ID is not in the VSI map for this filter
4444 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4447 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4449 ice_release_lock(rule_lock);
4455 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4456 * @hw: pointer to the hardware structure
4457 * @vsi_handle: VSI handle to retrieve info from
4458 * @promisc_mask: pointer to mask to be filled in
4459 * @vid: VLAN ID of promisc VLAN VSI
4462 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4465 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4466 vid, hw->switch_info);
4470 * ice_remove_promisc - Remove promisc based filter rules
4471 * @hw: pointer to the hardware structure
4472 * @recp_id: recipe ID for which the rule needs to removed
4473 * @v_list: list of promisc entries
4475 static enum ice_status
4476 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4477 struct LIST_HEAD_TYPE *v_list)
4479 struct ice_fltr_list_entry *v_list_itr, *tmp;
4480 struct ice_sw_recipe *recp_list;
4482 recp_list = &hw->switch_info->recp_list[recp_id];
4483 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4485 v_list_itr->status =
4486 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4487 if (v_list_itr->status)
4488 return v_list_itr->status;
4494 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4495 * @hw: pointer to the hardware structure
4496 * @vsi_handle: VSI handle to clear mode
4497 * @promisc_mask: mask of promiscuous config bits to clear
4498 * @vid: VLAN ID to clear VLAN promiscuous
4499 * @sw: pointer to switch info struct for which function add rule
4501 static enum ice_status
4502 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4503 u16 vid, struct ice_switch_info *sw)
4505 struct ice_fltr_list_entry *fm_entry, *tmp;
4506 struct LIST_HEAD_TYPE remove_list_head;
4507 struct ice_fltr_mgmt_list_entry *itr;
4508 struct LIST_HEAD_TYPE *rule_head;
4509 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4510 enum ice_status status = ICE_SUCCESS;
4513 if (!ice_is_vsi_valid(hw, vsi_handle))
4514 return ICE_ERR_PARAM;
4516 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4517 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4519 recipe_id = ICE_SW_LKUP_PROMISC;
4521 rule_head = &sw->recp_list[recipe_id].filt_rules;
4522 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4524 INIT_LIST_HEAD(&remove_list_head);
4526 ice_acquire_lock(rule_lock);
4527 LIST_FOR_EACH_ENTRY(itr, rule_head,
4528 ice_fltr_mgmt_list_entry, list_entry) {
4529 struct ice_fltr_info *fltr_info;
4530 u8 fltr_promisc_mask = 0;
4532 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4534 fltr_info = &itr->fltr_info;
4536 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4537 vid != fltr_info->l_data.mac_vlan.vlan_id)
4540 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4542 /* Skip if filter is not completely specified by given mask */
4543 if (fltr_promisc_mask & ~promisc_mask)
4546 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4550 ice_release_lock(rule_lock);
4551 goto free_fltr_list;
4554 ice_release_lock(rule_lock);
4556 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4559 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4560 ice_fltr_list_entry, list_entry) {
4561 LIST_DEL(&fm_entry->list_entry);
4562 ice_free(hw, fm_entry);
4569 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4570 * @hw: pointer to the hardware structure
4571 * @vsi_handle: VSI handle to clear mode
4572 * @promisc_mask: mask of promiscuous config bits to clear
4573 * @vid: VLAN ID to clear VLAN promiscuous
4576 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4577 u8 promisc_mask, u16 vid)
4579 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4580 vid, hw->switch_info);
4584 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4585 * @hw: pointer to the hardware structure
4586 * @vsi_handle: VSI handle to configure
4587 * @promisc_mask: mask of promiscuous config bits
4588 * @vid: VLAN ID to set VLAN promiscuous
4589 * @lport: logical port number to configure promisc mode
4590 * @sw: pointer to switch info struct for which function add rule
4592 static enum ice_status
4593 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4594 u16 vid, u8 lport, struct ice_switch_info *sw)
4596 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4597 struct ice_fltr_list_entry f_list_entry;
4598 struct ice_fltr_info new_fltr;
4599 enum ice_status status = ICE_SUCCESS;
4605 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4607 if (!ice_is_vsi_valid(hw, vsi_handle))
4608 return ICE_ERR_PARAM;
4609 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4611 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4613 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4614 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4615 new_fltr.l_data.mac_vlan.vlan_id = vid;
4616 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4618 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4619 recipe_id = ICE_SW_LKUP_PROMISC;
4622 /* Separate filters must be set for each direction/packet type
4623 * combination, so we will loop over the mask value, store the
4624 * individual type, and clear it out in the input mask as it
4627 while (promisc_mask) {
4628 struct ice_sw_recipe *recp_list;
4634 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4635 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4636 pkt_type = UCAST_FLTR;
4637 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4638 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4639 pkt_type = UCAST_FLTR;
4641 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4642 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4643 pkt_type = MCAST_FLTR;
4644 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4645 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4646 pkt_type = MCAST_FLTR;
4648 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4649 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4650 pkt_type = BCAST_FLTR;
4651 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4652 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4653 pkt_type = BCAST_FLTR;
4657 /* Check for VLAN promiscuous flag */
4658 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4659 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4660 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4661 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4665 /* Set filter DA based on packet type */
4666 mac_addr = new_fltr.l_data.mac.mac_addr;
4667 if (pkt_type == BCAST_FLTR) {
4668 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4669 } else if (pkt_type == MCAST_FLTR ||
4670 pkt_type == UCAST_FLTR) {
4671 /* Use the dummy ether header DA */
4672 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4673 ICE_NONDMA_TO_NONDMA);
4674 if (pkt_type == MCAST_FLTR)
4675 mac_addr[0] |= 0x1; /* Set multicast bit */
4678 /* Need to reset this to zero for all iterations */
4681 new_fltr.flag |= ICE_FLTR_TX;
4682 new_fltr.src = hw_vsi_id;
4684 new_fltr.flag |= ICE_FLTR_RX;
4685 new_fltr.src = lport;
4688 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4689 new_fltr.vsi_handle = vsi_handle;
4690 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4691 f_list_entry.fltr_info = new_fltr;
4692 recp_list = &sw->recp_list[recipe_id];
4694 status = ice_add_rule_internal(hw, recp_list, lport,
4696 if (status != ICE_SUCCESS)
4697 goto set_promisc_exit;
4705 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4706 * @hw: pointer to the hardware structure
4707 * @vsi_handle: VSI handle to configure
4708 * @promisc_mask: mask of promiscuous config bits
4709 * @vid: VLAN ID to set VLAN promiscuous
4712 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4715 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
4716 hw->port_info->lport,
4721 * _ice_set_vlan_vsi_promisc
4722 * @hw: pointer to the hardware structure
4723 * @vsi_handle: VSI handle to configure
4724 * @promisc_mask: mask of promiscuous config bits
4725 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4726 * @lport: logical port number to configure promisc mode
4727 * @sw: pointer to switch info struct for which function add rule
4729 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4731 static enum ice_status
4732 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4733 bool rm_vlan_promisc, u8 lport,
4734 struct ice_switch_info *sw)
4736 struct ice_fltr_list_entry *list_itr, *tmp;
4737 struct LIST_HEAD_TYPE vsi_list_head;
4738 struct LIST_HEAD_TYPE *vlan_head;
4739 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4740 enum ice_status status;
4743 INIT_LIST_HEAD(&vsi_list_head);
4744 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4745 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4746 ice_acquire_lock(vlan_lock);
4747 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4749 ice_release_lock(vlan_lock);
4751 goto free_fltr_list;
4753 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4755 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4756 if (rm_vlan_promisc)
4757 status = _ice_clear_vsi_promisc(hw, vsi_handle,
4761 status = _ice_set_vsi_promisc(hw, vsi_handle,
4762 promisc_mask, vlan_id,
4769 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4770 ice_fltr_list_entry, list_entry) {
4771 LIST_DEL(&list_itr->list_entry);
4772 ice_free(hw, list_itr);
4778 * ice_set_vlan_vsi_promisc
4779 * @hw: pointer to the hardware structure
4780 * @vsi_handle: VSI handle to configure
4781 * @promisc_mask: mask of promiscuous config bits
4782 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4784 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4787 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4788 bool rm_vlan_promisc)
4790 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
4791 rm_vlan_promisc, hw->port_info->lport,
4796 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4797 * @hw: pointer to the hardware structure
4798 * @vsi_handle: VSI handle to remove filters from
4799 * @recp_list: recipe list from which function remove fltr
4800 * @lkup: switch rule filter lookup type
4803 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4804 struct ice_sw_recipe *recp_list,
4805 enum ice_sw_lkup_type lkup)
4807 struct ice_fltr_list_entry *fm_entry;
4808 struct LIST_HEAD_TYPE remove_list_head;
4809 struct LIST_HEAD_TYPE *rule_head;
4810 struct ice_fltr_list_entry *tmp;
4811 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4812 enum ice_status status;
4814 INIT_LIST_HEAD(&remove_list_head);
4815 rule_lock = &recp_list[lkup].filt_rule_lock;
4816 rule_head = &recp_list[lkup].filt_rules;
4817 ice_acquire_lock(rule_lock);
4818 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4820 ice_release_lock(rule_lock);
4825 case ICE_SW_LKUP_MAC:
4826 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4828 case ICE_SW_LKUP_VLAN:
4829 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4831 case ICE_SW_LKUP_PROMISC:
4832 case ICE_SW_LKUP_PROMISC_VLAN:
4833 ice_remove_promisc(hw, lkup, &remove_list_head);
4835 case ICE_SW_LKUP_MAC_VLAN:
4836 ice_remove_mac_vlan(hw, &remove_list_head);
4838 case ICE_SW_LKUP_ETHERTYPE:
4839 case ICE_SW_LKUP_ETHERTYPE_MAC:
4840 ice_remove_eth_mac(hw, &remove_list_head);
4842 case ICE_SW_LKUP_DFLT:
4843 ice_debug(hw, ICE_DBG_SW,
4844 "Remove filters for this lookup type hasn't been implemented yet\n");
4846 case ICE_SW_LKUP_LAST:
4847 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4851 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4852 ice_fltr_list_entry, list_entry) {
4853 LIST_DEL(&fm_entry->list_entry);
4854 ice_free(hw, fm_entry);
4859 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4860 * @hw: pointer to the hardware structure
4861 * @vsi_handle: VSI handle to remove filters from
4862 * @sw: pointer to switch info struct
4865 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4866 struct ice_switch_info *sw)
4868 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4870 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4871 sw->recp_list, ICE_SW_LKUP_MAC);
4872 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4873 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4874 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4875 sw->recp_list, ICE_SW_LKUP_PROMISC);
4876 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4877 sw->recp_list, ICE_SW_LKUP_VLAN);
4878 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4879 sw->recp_list, ICE_SW_LKUP_DFLT);
4880 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4881 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4882 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4883 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4884 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4885 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4889 * ice_remove_vsi_fltr - Remove all filters for a VSI
4890 * @hw: pointer to the hardware structure
4891 * @vsi_handle: VSI handle to remove filters from
4893 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4895 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4899 * ice_alloc_res_cntr - allocating resource counter
4900 * @hw: pointer to the hardware structure
4901 * @type: type of resource
4902 * @alloc_shared: if set it is shared else dedicated
4903 * @num_items: number of entries requested for FD resource type
4904 * @counter_id: counter index returned by AQ call
4907 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4910 struct ice_aqc_alloc_free_res_elem *buf;
4911 enum ice_status status;
4914 /* Allocate resource */
4915 buf_len = sizeof(*buf);
4916 buf = (struct ice_aqc_alloc_free_res_elem *)
4917 ice_malloc(hw, buf_len);
4919 return ICE_ERR_NO_MEMORY;
4921 buf->num_elems = CPU_TO_LE16(num_items);
4922 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4923 ICE_AQC_RES_TYPE_M) | alloc_shared);
4925 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4926 ice_aqc_opc_alloc_res, NULL);
4930 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4938 * ice_free_res_cntr - free resource counter
4939 * @hw: pointer to the hardware structure
4940 * @type: type of resource
4941 * @alloc_shared: if set it is shared else dedicated
4942 * @num_items: number of entries to be freed for FD resource type
4943 * @counter_id: counter ID resource which needs to be freed
4946 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4949 struct ice_aqc_alloc_free_res_elem *buf;
4950 enum ice_status status;
4954 buf_len = sizeof(*buf);
4955 buf = (struct ice_aqc_alloc_free_res_elem *)
4956 ice_malloc(hw, buf_len);
4958 return ICE_ERR_NO_MEMORY;
4960 buf->num_elems = CPU_TO_LE16(num_items);
4961 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4962 ICE_AQC_RES_TYPE_M) | alloc_shared);
4963 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4965 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4966 ice_aqc_opc_free_res, NULL);
4968 ice_debug(hw, ICE_DBG_SW,
4969 "counter resource could not be freed\n");
4976 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4977 * @hw: pointer to the hardware structure
4978 * @counter_id: returns counter index
4980 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4982 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4983 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4988 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4989 * @hw: pointer to the hardware structure
4990 * @counter_id: counter index to be freed
4992 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4994 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4995 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5000 * ice_alloc_res_lg_act - add large action resource
5001 * @hw: pointer to the hardware structure
5002 * @l_id: large action ID to fill it in
5003 * @num_acts: number of actions to hold with a large action entry
5005 static enum ice_status
5006 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5008 struct ice_aqc_alloc_free_res_elem *sw_buf;
5009 enum ice_status status;
5012 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5013 return ICE_ERR_PARAM;
5015 /* Allocate resource for large action */
5016 buf_len = sizeof(*sw_buf);
5017 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5018 ice_malloc(hw, buf_len);
5020 return ICE_ERR_NO_MEMORY;
5022 sw_buf->num_elems = CPU_TO_LE16(1);
5024 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5025 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5026 * If num_acts is greater than 2, then use
5027 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5028 * The num_acts cannot exceed 4. This was ensured at the
5029 * beginning of the function.
5032 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5033 else if (num_acts == 2)
5034 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5036 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5038 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5039 ice_aqc_opc_alloc_res, NULL);
5041 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5043 ice_free(hw, sw_buf);
5048 * ice_add_mac_with_sw_marker - add filter with sw marker
5049 * @hw: pointer to the hardware structure
5050 * @f_info: filter info structure containing the MAC filter information
5051 * @sw_marker: sw marker to tag the Rx descriptor with
5054 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5057 struct ice_fltr_mgmt_list_entry *m_entry;
5058 struct ice_fltr_list_entry fl_info;
5059 struct ice_sw_recipe *recp_list;
5060 struct LIST_HEAD_TYPE l_head;
5061 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5062 enum ice_status ret;
5066 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5067 return ICE_ERR_PARAM;
5069 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5070 return ICE_ERR_PARAM;
5072 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5073 return ICE_ERR_PARAM;
5075 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5076 return ICE_ERR_PARAM;
5077 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5079 /* Add filter if it doesn't exist so then the adding of large
5080 * action always results in update
5083 INIT_LIST_HEAD(&l_head);
5084 fl_info.fltr_info = *f_info;
5085 LIST_ADD(&fl_info.list_entry, &l_head);
5087 entry_exists = false;
5088 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5089 hw->port_info->lport);
5090 if (ret == ICE_ERR_ALREADY_EXISTS)
5091 entry_exists = true;
5095 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5096 rule_lock = &recp_list->filt_rule_lock;
5097 ice_acquire_lock(rule_lock);
5098 /* Get the book keeping entry for the filter */
5099 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5103 /* If counter action was enabled for this rule then don't enable
5104 * sw marker large action
5106 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5107 ret = ICE_ERR_PARAM;
5111 /* if same marker was added before */
5112 if (m_entry->sw_marker_id == sw_marker) {
5113 ret = ICE_ERR_ALREADY_EXISTS;
5117 /* Allocate a hardware table entry to hold large act. Three actions
5118 * for marker based large action
5120 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5124 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5127 /* Update the switch rule to add the marker action */
5128 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5130 ice_release_lock(rule_lock);
5135 ice_release_lock(rule_lock);
5136 /* only remove entry if it did not exist previously */
5138 ret = ice_remove_mac(hw, &l_head);
5144 * ice_add_mac_with_counter - add filter with counter enabled
5145 * @hw: pointer to the hardware structure
5146 * @f_info: pointer to filter info structure containing the MAC filter
5150 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5152 struct ice_fltr_mgmt_list_entry *m_entry;
5153 struct ice_fltr_list_entry fl_info;
5154 struct ice_sw_recipe *recp_list;
5155 struct LIST_HEAD_TYPE l_head;
5156 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5157 enum ice_status ret;
5162 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5163 return ICE_ERR_PARAM;
5165 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5166 return ICE_ERR_PARAM;
5168 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5169 return ICE_ERR_PARAM;
5170 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5171 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5173 entry_exist = false;
5175 rule_lock = &recp_list->filt_rule_lock;
5177 /* Add filter if it doesn't exist so then the adding of large
5178 * action always results in update
5180 INIT_LIST_HEAD(&l_head);
5182 fl_info.fltr_info = *f_info;
5183 LIST_ADD(&fl_info.list_entry, &l_head);
5185 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5186 hw->port_info->lport);
5187 if (ret == ICE_ERR_ALREADY_EXISTS)
5192 ice_acquire_lock(rule_lock);
5193 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5195 ret = ICE_ERR_BAD_PTR;
5199 /* Don't enable counter for a filter for which sw marker was enabled */
5200 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5201 ret = ICE_ERR_PARAM;
5205 /* If a counter was already enabled then don't need to add again */
5206 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5207 ret = ICE_ERR_ALREADY_EXISTS;
5211 /* Allocate a hardware table entry to VLAN counter */
5212 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5216 /* Allocate a hardware table entry to hold large act. Two actions for
5217 * counter based large action
5219 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5223 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5226 /* Update the switch rule to add the counter action */
5227 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5229 ice_release_lock(rule_lock);
5234 ice_release_lock(rule_lock);
5235 /* only remove entry if it did not exist previously */
5237 ret = ice_remove_mac(hw, &l_head);
5242 /* This is mapping table entry that maps every word within a given protocol
5243 * structure to the real byte offset as per the specification of that
5245 * for example dst address is 3 words in ethertype header and corresponding
5246 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5247 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5248 * matching entry describing its field. This needs to be updated if new
5249 * structure is added to that union.
5251 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5252 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5253 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5254 { ICE_ETYPE_OL, { 0 } },
5255 { ICE_VLAN_OFOS, { 0, 2 } },
5256 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5257 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5258 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5259 26, 28, 30, 32, 34, 36, 38 } },
5260 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5261 26, 28, 30, 32, 34, 36, 38 } },
5262 { ICE_TCP_IL, { 0, 2 } },
5263 { ICE_UDP_OF, { 0, 2 } },
5264 { ICE_UDP_ILOS, { 0, 2 } },
5265 { ICE_SCTP_IL, { 0, 2 } },
5266 { ICE_VXLAN, { 8, 10, 12, 14 } },
5267 { ICE_GENEVE, { 8, 10, 12, 14 } },
5268 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5269 { ICE_NVGRE, { 0, 2, 4, 6 } },
5270 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5271 { ICE_PPPOE, { 0, 2, 4, 6 } },
5272 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5273 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5274 { ICE_ESP, { 0, 2, 4, 6 } },
5275 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5276 { ICE_NAT_T, { 8, 10, 12, 14 } },
5279 /* The following table describes preferred grouping of recipes.
5280 * If a recipe that needs to be programmed is a superset or matches one of the
5281 * following combinations, then the recipe needs to be chained as per the
5285 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5286 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5287 { ICE_MAC_IL, ICE_MAC_IL_HW },
5288 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5289 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5290 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5291 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5292 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5293 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5294 { ICE_TCP_IL, ICE_TCP_IL_HW },
5295 { ICE_UDP_OF, ICE_UDP_OF_HW },
5296 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5297 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5298 { ICE_VXLAN, ICE_UDP_OF_HW },
5299 { ICE_GENEVE, ICE_UDP_OF_HW },
5300 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5301 { ICE_NVGRE, ICE_GRE_OF_HW },
5302 { ICE_GTP, ICE_UDP_OF_HW },
5303 { ICE_PPPOE, ICE_PPPOE_HW },
5304 { ICE_PFCP, ICE_UDP_ILOS_HW },
5305 { ICE_L2TPV3, ICE_L2TPV3_HW },
5306 { ICE_ESP, ICE_ESP_HW },
5307 { ICE_AH, ICE_AH_HW },
5308 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5312 * ice_find_recp - find a recipe
5313 * @hw: pointer to the hardware structure
5314 * @lkup_exts: extension sequence to match
5316 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5318 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5319 enum ice_sw_tunnel_type tun_type)
5321 bool refresh_required = true;
5322 struct ice_sw_recipe *recp;
5325 /* Walk through existing recipes to find a match */
5326 recp = hw->switch_info->recp_list;
5327 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5328 /* If recipe was not created for this ID, in SW bookkeeping,
5329 * check if FW has an entry for this recipe. If the FW has an
5330 * entry update it in our SW bookkeeping and continue with the
5333 if (!recp[i].recp_created)
5334 if (ice_get_recp_frm_fw(hw,
5335 hw->switch_info->recp_list, i,
5339 /* Skip inverse action recipes */
5340 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5341 ICE_AQ_RECIPE_ACT_INV_ACT)
5344 /* if number of words we are looking for match */
5345 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5346 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5347 struct ice_fv_word *be = lkup_exts->fv_words;
5348 u16 *cr = recp[i].lkup_exts.field_mask;
5349 u16 *de = lkup_exts->field_mask;
5353 /* ar, cr, and qr are related to the recipe words, while
5354 * be, de, and pe are related to the lookup words
5356 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5357 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5359 if (ar[qr].off == be[pe].off &&
5360 ar[qr].prot_id == be[pe].prot_id &&
5362 /* Found the "pe"th word in the
5367 /* After walking through all the words in the
5368 * "i"th recipe if "p"th word was not found then
5369 * this recipe is not what we are looking for.
5370 * So break out from this loop and try the next
5373 if (qr >= recp[i].lkup_exts.n_val_words) {
5378 /* If for "i"th recipe the found was never set to false
5379 * then it means we found our match
5381 if ((tun_type == recp[i].tun_type ||
5382 tun_type == ICE_SW_TUN_AND_NON_TUN) && found)
5383 return i; /* Return the recipe ID */
5386 return ICE_MAX_NUM_RECIPES;
5390 * ice_prot_type_to_id - get protocol ID from protocol type
5391 * @type: protocol type
5392 * @id: pointer to variable that will receive the ID
5394 * Returns true if found, false otherwise
5396 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5400 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5401 if (ice_prot_id_tbl[i].type == type) {
5402 *id = ice_prot_id_tbl[i].protocol_id;
5409 * ice_find_valid_words - count valid words
5410 * @rule: advanced rule with lookup information
5411 * @lkup_exts: byte offset extractions of the words that are valid
5413 * calculate valid words in a lookup rule using mask value
5416 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5417 struct ice_prot_lkup_ext *lkup_exts)
5419 u8 j, word, prot_id, ret_val;
5421 if (!ice_prot_type_to_id(rule->type, &prot_id))
5424 word = lkup_exts->n_val_words;
5426 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5427 if (((u16 *)&rule->m_u)[j] &&
5428 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5429 /* No more space to accommodate */
5430 if (word >= ICE_MAX_CHAIN_WORDS)
5432 lkup_exts->fv_words[word].off =
5433 ice_prot_ext[rule->type].offs[j];
5434 lkup_exts->fv_words[word].prot_id =
5435 ice_prot_id_tbl[rule->type].protocol_id;
5436 lkup_exts->field_mask[word] =
5437 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5441 ret_val = word - lkup_exts->n_val_words;
5442 lkup_exts->n_val_words = word;
5448 * ice_create_first_fit_recp_def - Create a recipe grouping
5449 * @hw: pointer to the hardware structure
5450 * @lkup_exts: an array of protocol header extractions
5451 * @rg_list: pointer to a list that stores new recipe groups
5452 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5454 * Using first fit algorithm, take all the words that are still not done
5455 * and start grouping them in 4-word groups. Each group makes up one
5458 static enum ice_status
5459 ice_create_first_fit_recp_def(struct ice_hw *hw,
5460 struct ice_prot_lkup_ext *lkup_exts,
5461 struct LIST_HEAD_TYPE *rg_list,
5464 struct ice_pref_recipe_group *grp = NULL;
5469 if (!lkup_exts->n_val_words) {
5470 struct ice_recp_grp_entry *entry;
5472 entry = (struct ice_recp_grp_entry *)
5473 ice_malloc(hw, sizeof(*entry));
5475 return ICE_ERR_NO_MEMORY;
5476 LIST_ADD(&entry->l_entry, rg_list);
5477 grp = &entry->r_group;
5479 grp->n_val_pairs = 0;
5482 /* Walk through every word in the rule to check if it is not done. If so
5483 * then this word needs to be part of a new recipe.
5485 for (j = 0; j < lkup_exts->n_val_words; j++)
5486 if (!ice_is_bit_set(lkup_exts->done, j)) {
5488 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5489 struct ice_recp_grp_entry *entry;
5491 entry = (struct ice_recp_grp_entry *)
5492 ice_malloc(hw, sizeof(*entry));
5494 return ICE_ERR_NO_MEMORY;
5495 LIST_ADD(&entry->l_entry, rg_list);
5496 grp = &entry->r_group;
5500 grp->pairs[grp->n_val_pairs].prot_id =
5501 lkup_exts->fv_words[j].prot_id;
5502 grp->pairs[grp->n_val_pairs].off =
5503 lkup_exts->fv_words[j].off;
5504 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5512 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5513 * @hw: pointer to the hardware structure
5514 * @fv_list: field vector with the extraction sequence information
5515 * @rg_list: recipe groupings with protocol-offset pairs
5517 * Helper function to fill in the field vector indices for protocol-offset
5518 * pairs. These indexes are then ultimately programmed into a recipe.
5520 static enum ice_status
5521 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5522 struct LIST_HEAD_TYPE *rg_list)
5524 struct ice_sw_fv_list_entry *fv;
5525 struct ice_recp_grp_entry *rg;
5526 struct ice_fv_word *fv_ext;
5528 if (LIST_EMPTY(fv_list))
5531 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5532 fv_ext = fv->fv_ptr->ew;
5534 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5537 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5538 struct ice_fv_word *pr;
5543 pr = &rg->r_group.pairs[i];
5544 mask = rg->r_group.mask[i];
5546 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5547 if (fv_ext[j].prot_id == pr->prot_id &&
5548 fv_ext[j].off == pr->off) {
5551 /* Store index of field vector */
5553 rg->fv_mask[i] = mask;
5557 /* Protocol/offset could not be found, caller gave an
5561 return ICE_ERR_PARAM;
5569 * ice_find_free_recp_res_idx - find free result indexes for recipe
5570 * @hw: pointer to hardware structure
5571 * @profiles: bitmap of profiles that will be associated with the new recipe
5572 * @free_idx: pointer to variable to receive the free index bitmap
5574 * The algorithm used here is:
5575 * 1. When creating a new recipe, create a set P which contains all
5576 * Profiles that will be associated with our new recipe
5578 * 2. For each Profile p in set P:
5579 * a. Add all recipes associated with Profile p into set R
5580 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5581 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5582 * i. Or just assume they all have the same possible indexes:
5584 * i.e., PossibleIndexes = 0x0000F00000000000
5586 * 3. For each Recipe r in set R:
5587 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5588 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5590 * FreeIndexes will contain the bits indicating the indexes free for use,
5591 * then the code needs to update the recipe[r].used_result_idx_bits to
5592 * indicate which indexes were selected for use by this recipe.
5595 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5596 ice_bitmap_t *free_idx)
5598 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5599 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5600 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5604 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5605 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5606 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5607 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5609 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5610 ice_set_bit(count, possible_idx);
5612 /* For each profile we are going to associate the recipe with, add the
5613 * recipes that are associated with that profile. This will give us
5614 * the set of recipes that our recipe may collide with. Also, determine
5615 * what possible result indexes are usable given this set of profiles.
5618 while (ICE_MAX_NUM_PROFILES >
5619 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5620 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5621 ICE_MAX_NUM_RECIPES);
5622 ice_and_bitmap(possible_idx, possible_idx,
5623 hw->switch_info->prof_res_bm[bit],
5628 /* For each recipe that our new recipe may collide with, determine
5629 * which indexes have been used.
5631 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5632 if (ice_is_bit_set(recipes, bit)) {
5633 ice_or_bitmap(used_idx, used_idx,
5634 hw->switch_info->recp_list[bit].res_idxs,
5638 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5640 /* return number of free indexes */
5643 while (ICE_MAX_FV_WORDS >
5644 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5653 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5654 * @hw: pointer to hardware structure
5655 * @rm: recipe management list entry
5656 * @match_tun_mask: tunnel mask that needs to be programmed
5657 * @profiles: bitmap of profiles that will be associated.
5659 static enum ice_status
5660 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5661 u16 match_tun_mask, ice_bitmap_t *profiles)
5663 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5664 struct ice_aqc_recipe_data_elem *tmp;
5665 struct ice_aqc_recipe_data_elem *buf;
5666 struct ice_recp_grp_entry *entry;
5667 enum ice_status status;
5673 /* When more than one recipe are required, another recipe is needed to
5674 * chain them together. Matching a tunnel metadata ID takes up one of
5675 * the match fields in the chaining recipe reducing the number of
5676 * chained recipes by one.
5678 /* check number of free result indices */
5679 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5680 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5682 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5683 free_res_idx, rm->n_grp_count);
5685 if (rm->n_grp_count > 1) {
5686 if (rm->n_grp_count > free_res_idx)
5687 return ICE_ERR_MAX_LIMIT;
5692 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5693 return ICE_ERR_MAX_LIMIT;
5695 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5696 ICE_MAX_NUM_RECIPES,
5699 return ICE_ERR_NO_MEMORY;
5701 buf = (struct ice_aqc_recipe_data_elem *)
5702 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5704 status = ICE_ERR_NO_MEMORY;
5708 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5709 recipe_count = ICE_MAX_NUM_RECIPES;
5710 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5712 if (status || recipe_count == 0)
5715 /* Allocate the recipe resources, and configure them according to the
5716 * match fields from protocol headers and extracted field vectors.
5718 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5719 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5722 status = ice_alloc_recipe(hw, &entry->rid);
5726 /* Clear the result index of the located recipe, as this will be
5727 * updated, if needed, later in the recipe creation process.
5729 tmp[0].content.result_indx = 0;
5731 buf[recps] = tmp[0];
5732 buf[recps].recipe_indx = (u8)entry->rid;
5733 /* if the recipe is a non-root recipe RID should be programmed
5734 * as 0 for the rules to be applied correctly.
5736 buf[recps].content.rid = 0;
5737 ice_memset(&buf[recps].content.lkup_indx, 0,
5738 sizeof(buf[recps].content.lkup_indx),
5741 /* All recipes use look-up index 0 to match switch ID. */
5742 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5743 buf[recps].content.mask[0] =
5744 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5745 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5748 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5749 buf[recps].content.lkup_indx[i] = 0x80;
5750 buf[recps].content.mask[i] = 0;
5753 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5754 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5755 buf[recps].content.mask[i + 1] =
5756 CPU_TO_LE16(entry->fv_mask[i]);
5759 if (rm->n_grp_count > 1) {
5760 /* Checks to see if there really is a valid result index
5763 if (chain_idx >= ICE_MAX_FV_WORDS) {
5764 ice_debug(hw, ICE_DBG_SW,
5765 "No chain index available\n");
5766 status = ICE_ERR_MAX_LIMIT;
5770 entry->chain_idx = chain_idx;
5771 buf[recps].content.result_indx =
5772 ICE_AQ_RECIPE_RESULT_EN |
5773 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5774 ICE_AQ_RECIPE_RESULT_DATA_M);
5775 ice_clear_bit(chain_idx, result_idx_bm);
5776 chain_idx = ice_find_first_bit(result_idx_bm,
5780 /* fill recipe dependencies */
5781 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5782 ICE_MAX_NUM_RECIPES);
5783 ice_set_bit(buf[recps].recipe_indx,
5784 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5785 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5789 if (rm->n_grp_count == 1) {
5790 rm->root_rid = buf[0].recipe_indx;
5791 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5792 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5793 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5794 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5795 sizeof(buf[0].recipe_bitmap),
5796 ICE_NONDMA_TO_NONDMA);
5798 status = ICE_ERR_BAD_PTR;
5801 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5802 * the recipe which is getting created if specified
5803 * by user. Usually any advanced switch filter, which results
5804 * into new extraction sequence, ended up creating a new recipe
5805 * of type ROOT and usually recipes are associated with profiles
5806 * Switch rule referreing newly created recipe, needs to have
5807 * either/or 'fwd' or 'join' priority, otherwise switch rule
5808 * evaluation will not happen correctly. In other words, if
5809 * switch rule to be evaluated on priority basis, then recipe
5810 * needs to have priority, otherwise it will be evaluated last.
5812 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5814 struct ice_recp_grp_entry *last_chain_entry;
5817 /* Allocate the last recipe that will chain the outcomes of the
5818 * other recipes together
5820 status = ice_alloc_recipe(hw, &rid);
5824 buf[recps].recipe_indx = (u8)rid;
5825 buf[recps].content.rid = (u8)rid;
5826 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5827 /* the new entry created should also be part of rg_list to
5828 * make sure we have complete recipe
5830 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5831 sizeof(*last_chain_entry));
5832 if (!last_chain_entry) {
5833 status = ICE_ERR_NO_MEMORY;
5836 last_chain_entry->rid = rid;
5837 ice_memset(&buf[recps].content.lkup_indx, 0,
5838 sizeof(buf[recps].content.lkup_indx),
5840 /* All recipes use look-up index 0 to match switch ID. */
5841 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5842 buf[recps].content.mask[0] =
5843 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5844 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5845 buf[recps].content.lkup_indx[i] =
5846 ICE_AQ_RECIPE_LKUP_IGNORE;
5847 buf[recps].content.mask[i] = 0;
5851 /* update r_bitmap with the recp that is used for chaining */
5852 ice_set_bit(rid, rm->r_bitmap);
5853 /* this is the recipe that chains all the other recipes so it
5854 * should not have a chaining ID to indicate the same
5856 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5857 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5859 last_chain_entry->fv_idx[i] = entry->chain_idx;
5860 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5861 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5862 ice_set_bit(entry->rid, rm->r_bitmap);
5864 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5865 if (sizeof(buf[recps].recipe_bitmap) >=
5866 sizeof(rm->r_bitmap)) {
5867 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5868 sizeof(buf[recps].recipe_bitmap),
5869 ICE_NONDMA_TO_NONDMA);
5871 status = ICE_ERR_BAD_PTR;
5874 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5876 /* To differentiate among different UDP tunnels, a meta data ID
5879 if (match_tun_mask) {
5880 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5881 buf[recps].content.mask[i] =
5882 CPU_TO_LE16(match_tun_mask);
5886 rm->root_rid = (u8)rid;
5888 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5892 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5893 ice_release_change_lock(hw);
5897 /* Every recipe that just got created add it to the recipe
5900 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5901 struct ice_switch_info *sw = hw->switch_info;
5902 bool is_root, idx_found = false;
5903 struct ice_sw_recipe *recp;
5904 u16 idx, buf_idx = 0;
5906 /* find buffer index for copying some data */
5907 for (idx = 0; idx < rm->n_grp_count; idx++)
5908 if (buf[idx].recipe_indx == entry->rid) {
5914 status = ICE_ERR_OUT_OF_RANGE;
5918 recp = &sw->recp_list[entry->rid];
5919 is_root = (rm->root_rid == entry->rid);
5920 recp->is_root = is_root;
5922 recp->root_rid = entry->rid;
5923 recp->big_recp = (is_root && rm->n_grp_count > 1);
5925 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5926 entry->r_group.n_val_pairs *
5927 sizeof(struct ice_fv_word),
5928 ICE_NONDMA_TO_NONDMA);
5930 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5931 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5933 /* Copy non-result fv index values and masks to recipe. This
5934 * call will also update the result recipe bitmask.
5936 ice_collect_result_idx(&buf[buf_idx], recp);
5938 /* for non-root recipes, also copy to the root, this allows
5939 * easier matching of a complete chained recipe
5942 ice_collect_result_idx(&buf[buf_idx],
5943 &sw->recp_list[rm->root_rid]);
5945 recp->n_ext_words = entry->r_group.n_val_pairs;
5946 recp->chain_idx = entry->chain_idx;
5947 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5948 recp->n_grp_count = rm->n_grp_count;
5949 recp->tun_type = rm->tun_type;
5950 recp->recp_created = true;
5965 * ice_create_recipe_group - creates recipe group
5966 * @hw: pointer to hardware structure
5967 * @rm: recipe management list entry
5968 * @lkup_exts: lookup elements
5970 static enum ice_status
5971 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5972 struct ice_prot_lkup_ext *lkup_exts)
5974 enum ice_status status;
5977 rm->n_grp_count = 0;
5979 /* Create recipes for words that are marked not done by packing them
5982 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5983 &rm->rg_list, &recp_count);
5985 rm->n_grp_count += recp_count;
5986 rm->n_ext_words = lkup_exts->n_val_words;
5987 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5988 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5989 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5990 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5997 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5998 * @hw: pointer to hardware structure
5999 * @lkups: lookup elements or match criteria for the advanced recipe, one
6000 * structure per protocol header
6001 * @lkups_cnt: number of protocols
6002 * @bm: bitmap of field vectors to consider
6003 * @fv_list: pointer to a list that holds the returned field vectors
6005 static enum ice_status
6006 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6007 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6009 enum ice_status status;
6016 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6018 return ICE_ERR_NO_MEMORY;
6020 for (i = 0; i < lkups_cnt; i++)
6021 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6022 status = ICE_ERR_CFG;
6026 /* Find field vectors that include all specified protocol types */
6027 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6030 ice_free(hw, prot_ids);
6035 * ice_tun_type_match_mask - determine if tun type needs a match mask
6036 * @tun_type: tunnel type
6037 * @mask: mask to be used for the tunnel
6039 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6042 case ICE_SW_TUN_VXLAN_GPE:
6043 case ICE_SW_TUN_GENEVE:
6044 case ICE_SW_TUN_VXLAN:
6045 case ICE_SW_TUN_NVGRE:
6046 case ICE_SW_TUN_UDP:
6047 case ICE_ALL_TUNNELS:
6048 *mask = ICE_TUN_FLAG_MASK;
6051 case ICE_SW_TUN_GENEVE_VLAN:
6052 case ICE_SW_TUN_VXLAN_VLAN:
6053 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6063 * ice_add_special_words - Add words that are not protocols, such as metadata
6064 * @rinfo: other information regarding the rule e.g. priority and action info
6065 * @lkup_exts: lookup word structure
6067 static enum ice_status
6068 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6069 struct ice_prot_lkup_ext *lkup_exts)
6073 /* If this is a tunneled packet, then add recipe index to match the
6074 * tunnel bit in the packet metadata flags.
6076 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6077 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6078 u8 word = lkup_exts->n_val_words++;
6080 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6081 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6082 lkup_exts->field_mask[word] = mask;
6084 return ICE_ERR_MAX_LIMIT;
6091 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6092 * @hw: pointer to hardware structure
6093 * @rinfo: other information regarding the rule e.g. priority and action info
6094 * @bm: pointer to memory for returning the bitmap of field vectors
6097 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6100 enum ice_prof_type prof_type;
6102 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6104 switch (rinfo->tun_type) {
6106 prof_type = ICE_PROF_NON_TUN;
6108 case ICE_ALL_TUNNELS:
6109 prof_type = ICE_PROF_TUN_ALL;
6111 case ICE_SW_TUN_VXLAN_GPE:
6112 case ICE_SW_TUN_GENEVE:
6113 case ICE_SW_TUN_GENEVE_VLAN:
6114 case ICE_SW_TUN_VXLAN:
6115 case ICE_SW_TUN_VXLAN_VLAN:
6116 case ICE_SW_TUN_UDP:
6117 case ICE_SW_TUN_GTP:
6118 prof_type = ICE_PROF_TUN_UDP;
6120 case ICE_SW_TUN_NVGRE:
6121 prof_type = ICE_PROF_TUN_GRE;
6123 case ICE_SW_TUN_PPPOE:
6124 prof_type = ICE_PROF_TUN_PPPOE;
6126 case ICE_SW_TUN_PROFID_IPV6_ESP:
6127 case ICE_SW_TUN_IPV6_ESP:
6128 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6130 case ICE_SW_TUN_PROFID_IPV6_AH:
6131 case ICE_SW_TUN_IPV6_AH:
6132 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6134 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6135 case ICE_SW_TUN_IPV6_L2TPV3:
6136 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6138 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6139 case ICE_SW_TUN_IPV6_NAT_T:
6140 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6142 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6143 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6145 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6146 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6148 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6149 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6151 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6152 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6154 case ICE_SW_TUN_IPV4_NAT_T:
6155 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6157 case ICE_SW_TUN_IPV4_L2TPV3:
6158 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6160 case ICE_SW_TUN_IPV4_ESP:
6161 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6163 case ICE_SW_TUN_IPV4_AH:
6164 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6166 case ICE_SW_TUN_AND_NON_TUN:
6168 prof_type = ICE_PROF_ALL;
6172 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6176 * ice_is_prof_rule - determine if rule type is a profile rule
6177 * @type: the rule type
6179 * if the rule type is a profile rule, that means that there no field value
6180 * match required, in this case just a profile hit is required.
6182 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6185 case ICE_SW_TUN_PROFID_IPV6_ESP:
6186 case ICE_SW_TUN_PROFID_IPV6_AH:
6187 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6188 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6189 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6190 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6191 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6192 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6202 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6203 * @hw: pointer to hardware structure
6204 * @lkups: lookup elements or match criteria for the advanced recipe, one
6205 * structure per protocol header
6206 * @lkups_cnt: number of protocols
6207 * @rinfo: other information regarding the rule e.g. priority and action info
6208 * @rid: return the recipe ID of the recipe created
6210 static enum ice_status
6211 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6212 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6214 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6215 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6216 struct ice_prot_lkup_ext *lkup_exts;
6217 struct ice_recp_grp_entry *r_entry;
6218 struct ice_sw_fv_list_entry *fvit;
6219 struct ice_recp_grp_entry *r_tmp;
6220 struct ice_sw_fv_list_entry *tmp;
6221 enum ice_status status = ICE_SUCCESS;
6222 struct ice_sw_recipe *rm;
6223 u16 match_tun_mask = 0;
6227 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6228 return ICE_ERR_PARAM;
6230 lkup_exts = (struct ice_prot_lkup_ext *)
6231 ice_malloc(hw, sizeof(*lkup_exts));
6233 return ICE_ERR_NO_MEMORY;
6235 /* Determine the number of words to be matched and if it exceeds a
6236 * recipe's restrictions
6238 for (i = 0; i < lkups_cnt; i++) {
6241 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6242 status = ICE_ERR_CFG;
6243 goto err_free_lkup_exts;
6246 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6248 status = ICE_ERR_CFG;
6249 goto err_free_lkup_exts;
6253 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6255 status = ICE_ERR_NO_MEMORY;
6256 goto err_free_lkup_exts;
6259 /* Get field vectors that contain fields extracted from all the protocol
6260 * headers being programmed.
6262 INIT_LIST_HEAD(&rm->fv_list);
6263 INIT_LIST_HEAD(&rm->rg_list);
6265 /* Get bitmap of field vectors (profiles) that are compatible with the
6266 * rule request; only these will be searched in the subsequent call to
6269 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6271 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6275 /* Group match words into recipes using preferred recipe grouping
6278 status = ice_create_recipe_group(hw, rm, lkup_exts);
6282 /* For certain tunnel types it is necessary to use a metadata ID flag to
6283 * differentiate different tunnel types. A separate recipe needs to be
6284 * used for the metadata.
6286 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6287 rm->n_grp_count > 1)
6288 match_tun_mask = mask;
6290 /* set the recipe priority if specified */
6291 rm->priority = (u8)rinfo->priority;
6293 /* Find offsets from the field vector. Pick the first one for all the
6296 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6300 /* An empty FV list means to use all the profiles returned in the
6303 if (LIST_EMPTY(&rm->fv_list)) {
6306 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6307 if (ice_is_bit_set(fv_bitmap, j)) {
6308 struct ice_sw_fv_list_entry *fvl;
6310 fvl = (struct ice_sw_fv_list_entry *)
6311 ice_malloc(hw, sizeof(*fvl));
6315 fvl->profile_id = j;
6316 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6320 /* get bitmap of all profiles the recipe will be associated with */
6321 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6322 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6324 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6325 ice_set_bit((u16)fvit->profile_id, profiles);
6328 /* Create any special protocol/offset pairs, such as looking at tunnel
6329 * bits by extracting metadata
6331 status = ice_add_special_words(rinfo, lkup_exts);
6333 goto err_free_lkup_exts;
6335 /* Look for a recipe which matches our requested fv / mask list */
6336 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6337 if (*rid < ICE_MAX_NUM_RECIPES)
6338 /* Success if found a recipe that match the existing criteria */
6341 rm->tun_type = rinfo->tun_type;
6342 /* Recipe we need does not exist, add a recipe */
6343 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
6347 /* Associate all the recipes created with all the profiles in the
6348 * common field vector.
6350 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6352 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6355 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6356 (u8 *)r_bitmap, NULL);
6360 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6361 ICE_MAX_NUM_RECIPES);
6362 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6366 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6369 ice_release_change_lock(hw);
6374 /* Update profile to recipe bitmap array */
6375 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6376 ICE_MAX_NUM_RECIPES);
6378 /* Update recipe to profile bitmap array */
6379 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6380 if (ice_is_bit_set(r_bitmap, j))
6381 ice_set_bit((u16)fvit->profile_id,
6382 recipe_to_profile[j]);
6385 *rid = rm->root_rid;
6386 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6387 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6389 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6390 ice_recp_grp_entry, l_entry) {
6391 LIST_DEL(&r_entry->l_entry);
6392 ice_free(hw, r_entry);
6395 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6397 LIST_DEL(&fvit->list_entry);
6402 ice_free(hw, rm->root_buf);
6407 ice_free(hw, lkup_exts);
6413 * ice_find_dummy_packet - find dummy packet by tunnel type
6415 * @lkups: lookup elements or match criteria for the advanced recipe, one
6416 * structure per protocol header
6417 * @lkups_cnt: number of protocols
6418 * @tun_type: tunnel type from the match criteria
6419 * @pkt: dummy packet to fill according to filter match criteria
6420 * @pkt_len: packet length of dummy packet
6421 * @offsets: pointer to receive the pointer to the offsets for the packet
6424 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6425 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6427 const struct ice_dummy_pkt_offsets **offsets)
6429 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6433 for (i = 0; i < lkups_cnt; i++) {
6434 if (lkups[i].type == ICE_UDP_ILOS)
6436 else if (lkups[i].type == ICE_TCP_IL)
6438 else if (lkups[i].type == ICE_IPV6_OFOS)
6440 else if (lkups[i].type == ICE_VLAN_OFOS)
6442 else if (lkups[i].type == ICE_IPV4_OFOS &&
6443 lkups[i].h_u.ipv4_hdr.protocol ==
6444 ICE_IPV4_NVGRE_PROTO_ID &&
6445 lkups[i].m_u.ipv4_hdr.protocol ==
6448 else if (lkups[i].type == ICE_PPPOE &&
6449 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6450 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6451 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6454 else if (lkups[i].type == ICE_ETYPE_OL &&
6455 lkups[i].h_u.ethertype.ethtype_id ==
6456 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6457 lkups[i].m_u.ethertype.ethtype_id ==
6462 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6463 *pkt = dummy_ipv4_esp_pkt;
6464 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6465 *offsets = dummy_ipv4_esp_packet_offsets;
6469 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6470 *pkt = dummy_ipv6_esp_pkt;
6471 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6472 *offsets = dummy_ipv6_esp_packet_offsets;
6476 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6477 *pkt = dummy_ipv4_ah_pkt;
6478 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6479 *offsets = dummy_ipv4_ah_packet_offsets;
6483 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6484 *pkt = dummy_ipv6_ah_pkt;
6485 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6486 *offsets = dummy_ipv6_ah_packet_offsets;
6490 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6491 *pkt = dummy_ipv4_nat_pkt;
6492 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6493 *offsets = dummy_ipv4_nat_packet_offsets;
6497 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6498 *pkt = dummy_ipv6_nat_pkt;
6499 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6500 *offsets = dummy_ipv6_nat_packet_offsets;
6504 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6505 *pkt = dummy_ipv4_l2tpv3_pkt;
6506 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6507 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6511 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6512 *pkt = dummy_ipv6_l2tpv3_pkt;
6513 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6514 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6518 if (tun_type == ICE_SW_TUN_GTP) {
6519 *pkt = dummy_udp_gtp_packet;
6520 *pkt_len = sizeof(dummy_udp_gtp_packet);
6521 *offsets = dummy_udp_gtp_packet_offsets;
6525 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6526 *pkt = dummy_pppoe_ipv6_packet;
6527 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6528 *offsets = dummy_pppoe_packet_offsets;
6530 } else if (tun_type == ICE_SW_TUN_PPPOE) {
6531 *pkt = dummy_pppoe_ipv4_packet;
6532 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6533 *offsets = dummy_pppoe_packet_offsets;
6537 if (tun_type == ICE_ALL_TUNNELS) {
6538 *pkt = dummy_gre_udp_packet;
6539 *pkt_len = sizeof(dummy_gre_udp_packet);
6540 *offsets = dummy_gre_udp_packet_offsets;
6544 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6546 *pkt = dummy_gre_tcp_packet;
6547 *pkt_len = sizeof(dummy_gre_tcp_packet);
6548 *offsets = dummy_gre_tcp_packet_offsets;
6552 *pkt = dummy_gre_udp_packet;
6553 *pkt_len = sizeof(dummy_gre_udp_packet);
6554 *offsets = dummy_gre_udp_packet_offsets;
6558 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6559 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
6560 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
6561 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
6563 *pkt = dummy_udp_tun_tcp_packet;
6564 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6565 *offsets = dummy_udp_tun_tcp_packet_offsets;
6569 *pkt = dummy_udp_tun_udp_packet;
6570 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6571 *offsets = dummy_udp_tun_udp_packet_offsets;
6577 *pkt = dummy_vlan_udp_packet;
6578 *pkt_len = sizeof(dummy_vlan_udp_packet);
6579 *offsets = dummy_vlan_udp_packet_offsets;
6582 *pkt = dummy_udp_packet;
6583 *pkt_len = sizeof(dummy_udp_packet);
6584 *offsets = dummy_udp_packet_offsets;
6586 } else if (udp && ipv6) {
6588 *pkt = dummy_vlan_udp_ipv6_packet;
6589 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6590 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6593 *pkt = dummy_udp_ipv6_packet;
6594 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6595 *offsets = dummy_udp_ipv6_packet_offsets;
6597 } else if ((tcp && ipv6) || ipv6) {
6599 *pkt = dummy_vlan_tcp_ipv6_packet;
6600 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6601 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6604 *pkt = dummy_tcp_ipv6_packet;
6605 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6606 *offsets = dummy_tcp_ipv6_packet_offsets;
6611 *pkt = dummy_vlan_tcp_packet;
6612 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6613 *offsets = dummy_vlan_tcp_packet_offsets;
6615 *pkt = dummy_tcp_packet;
6616 *pkt_len = sizeof(dummy_tcp_packet);
6617 *offsets = dummy_tcp_packet_offsets;
6622 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6624 * @lkups: lookup elements or match criteria for the advanced recipe, one
6625 * structure per protocol header
6626 * @lkups_cnt: number of protocols
6627 * @s_rule: stores rule information from the match criteria
6628 * @dummy_pkt: dummy packet to fill according to filter match criteria
6629 * @pkt_len: packet length of dummy packet
6630 * @offsets: offset info for the dummy packet
6632 static enum ice_status
6633 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6634 struct ice_aqc_sw_rules_elem *s_rule,
6635 const u8 *dummy_pkt, u16 pkt_len,
6636 const struct ice_dummy_pkt_offsets *offsets)
6641 /* Start with a packet with a pre-defined/dummy content. Then, fill
6642 * in the header values to be looked up or matched.
6644 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6646 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6648 for (i = 0; i < lkups_cnt; i++) {
6649 enum ice_protocol_type type;
6650 u16 offset = 0, len = 0, j;
6653 /* find the start of this layer; it should be found since this
6654 * was already checked when search for the dummy packet
6656 type = lkups[i].type;
6657 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6658 if (type == offsets[j].type) {
6659 offset = offsets[j].offset;
6664 /* this should never happen in a correct calling sequence */
6666 return ICE_ERR_PARAM;
6668 switch (lkups[i].type) {
6671 len = sizeof(struct ice_ether_hdr);
6674 len = sizeof(struct ice_ethtype_hdr);
6677 len = sizeof(struct ice_vlan_hdr);
6681 len = sizeof(struct ice_ipv4_hdr);
6685 len = sizeof(struct ice_ipv6_hdr);
6690 len = sizeof(struct ice_l4_hdr);
6693 len = sizeof(struct ice_sctp_hdr);
6696 len = sizeof(struct ice_nvgre);
6701 len = sizeof(struct ice_udp_tnl_hdr);
6705 len = sizeof(struct ice_udp_gtp_hdr);
6708 len = sizeof(struct ice_pppoe_hdr);
6711 len = sizeof(struct ice_esp_hdr);
6714 len = sizeof(struct ice_nat_t_hdr);
6717 len = sizeof(struct ice_ah_hdr);
6720 len = sizeof(struct ice_l2tpv3_sess_hdr);
6723 return ICE_ERR_PARAM;
6726 /* the length should be a word multiple */
6727 if (len % ICE_BYTES_PER_WORD)
6730 /* We have the offset to the header start, the length, the
6731 * caller's header values and mask. Use this information to
6732 * copy the data into the dummy packet appropriately based on
6733 * the mask. Note that we need to only write the bits as
6734 * indicated by the mask to make sure we don't improperly write
6735 * over any significant packet data.
6737 for (j = 0; j < len / sizeof(u16); j++)
6738 if (((u16 *)&lkups[i].m_u)[j])
6739 ((u16 *)(pkt + offset))[j] =
6740 (((u16 *)(pkt + offset))[j] &
6741 ~((u16 *)&lkups[i].m_u)[j]) |
6742 (((u16 *)&lkups[i].h_u)[j] &
6743 ((u16 *)&lkups[i].m_u)[j]);
6746 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6752 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6753 * @hw: pointer to the hardware structure
6754 * @tun_type: tunnel type
6755 * @pkt: dummy packet to fill in
6756 * @offsets: offset info for the dummy packet
6758 static enum ice_status
6759 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6760 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6765 case ICE_SW_TUN_AND_NON_TUN:
6766 case ICE_SW_TUN_VXLAN_GPE:
6767 case ICE_SW_TUN_VXLAN:
6768 case ICE_SW_TUN_VXLAN_VLAN:
6769 case ICE_SW_TUN_UDP:
6770 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6774 case ICE_SW_TUN_GENEVE:
6775 case ICE_SW_TUN_GENEVE_VLAN:
6776 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6781 /* Nothing needs to be done for this tunnel type */
6785 /* Find the outer UDP protocol header and insert the port number */
6786 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6787 if (offsets[i].type == ICE_UDP_OF) {
6788 struct ice_l4_hdr *hdr;
6791 offset = offsets[i].offset;
6792 hdr = (struct ice_l4_hdr *)&pkt[offset];
6793 hdr->dst_port = CPU_TO_BE16(open_port);
6803 * ice_find_adv_rule_entry - Search a rule entry
6804 * @hw: pointer to the hardware structure
6805 * @lkups: lookup elements or match criteria for the advanced recipe, one
6806 * structure per protocol header
6807 * @lkups_cnt: number of protocols
6808 * @recp_id: recipe ID for which we are finding the rule
6809 * @rinfo: other information regarding the rule e.g. priority and action info
6811 * Helper function to search for a given advance rule entry
6812 * Returns pointer to entry storing the rule if found
6814 static struct ice_adv_fltr_mgmt_list_entry *
6815 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6816 u16 lkups_cnt, u16 recp_id,
6817 struct ice_adv_rule_info *rinfo)
6819 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6820 struct ice_switch_info *sw = hw->switch_info;
6823 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6824 ice_adv_fltr_mgmt_list_entry, list_entry) {
6825 bool lkups_matched = true;
6827 if (lkups_cnt != list_itr->lkups_cnt)
6829 for (i = 0; i < list_itr->lkups_cnt; i++)
6830 if (memcmp(&list_itr->lkups[i], &lkups[i],
6832 lkups_matched = false;
6835 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6836 rinfo->tun_type == list_itr->rule_info.tun_type &&
6844 * ice_adv_add_update_vsi_list
6845 * @hw: pointer to the hardware structure
6846 * @m_entry: pointer to current adv filter management list entry
6847 * @cur_fltr: filter information from the book keeping entry
6848 * @new_fltr: filter information with the new VSI to be added
6850 * Call AQ command to add or update previously created VSI list with new VSI.
6852 * Helper function to do book keeping associated with adding filter information
6853 * The algorithm to do the booking keeping is described below :
6854 * When a VSI needs to subscribe to a given advanced filter
6855 * if only one VSI has been added till now
6856 * Allocate a new VSI list and add two VSIs
6857 * to this list using switch rule command
6858 * Update the previously created switch rule with the
6859 * newly created VSI list ID
6860 * if a VSI list was previously created
6861 * Add the new VSI to the previously created VSI list set
6862 * using the update switch rule command
6864 static enum ice_status
6865 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6866 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6867 struct ice_adv_rule_info *cur_fltr,
6868 struct ice_adv_rule_info *new_fltr)
6870 enum ice_status status;
6871 u16 vsi_list_id = 0;
6873 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6874 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6875 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6876 return ICE_ERR_NOT_IMPL;
6878 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6879 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6880 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6881 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6882 return ICE_ERR_NOT_IMPL;
6884 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6885 /* Only one entry existed in the mapping and it was not already
6886 * a part of a VSI list. So, create a VSI list with the old and
6889 struct ice_fltr_info tmp_fltr;
6890 u16 vsi_handle_arr[2];
6892 /* A rule already exists with the new VSI being added */
6893 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6894 new_fltr->sw_act.fwd_id.hw_vsi_id)
6895 return ICE_ERR_ALREADY_EXISTS;
6897 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6898 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6899 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6905 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6906 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
6907 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6908 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6909 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6910 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6912 /* Update the previous switch rule of "forward to VSI" to
6915 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6919 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6920 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6921 m_entry->vsi_list_info =
6922 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6925 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6927 if (!m_entry->vsi_list_info)
6930 /* A rule already exists with the new VSI being added */
6931 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6934 /* Update the previously created VSI list set with
6935 * the new VSI ID passed in
6937 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6939 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6941 ice_aqc_opc_update_sw_rules,
6943 /* update VSI list mapping info with new VSI ID */
6945 ice_set_bit(vsi_handle,
6946 m_entry->vsi_list_info->vsi_map);
6949 m_entry->vsi_count++;
6954 * ice_add_adv_rule - helper function to create an advanced switch rule
6955 * @hw: pointer to the hardware structure
6956 * @lkups: information on the words that needs to be looked up. All words
6957 * together makes one recipe
6958 * @lkups_cnt: num of entries in the lkups array
6959 * @rinfo: other information related to the rule that needs to be programmed
6960 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6961 * ignored is case of error.
6963 * This function can program only 1 rule at a time. The lkups is used to
6964 * describe the all the words that forms the "lookup" portion of the recipe.
6965 * These words can span multiple protocols. Callers to this function need to
6966 * pass in a list of protocol headers with lookup information along and mask
6967 * that determines which words are valid from the given protocol header.
6968 * rinfo describes other information related to this rule such as forwarding
6969 * IDs, priority of this rule, etc.
6972 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6973 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6974 struct ice_rule_query_data *added_entry)
6976 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6977 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6978 const struct ice_dummy_pkt_offsets *pkt_offsets;
6979 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6980 struct LIST_HEAD_TYPE *rule_head;
6981 struct ice_switch_info *sw;
6982 enum ice_status status;
6983 const u8 *pkt = NULL;
6989 /* Initialize profile to result index bitmap */
6990 if (!hw->switch_info->prof_res_bm_init) {
6991 hw->switch_info->prof_res_bm_init = 1;
6992 ice_init_prof_result_bm(hw);
6995 prof_rule = ice_is_prof_rule(rinfo->tun_type);
6996 if (!prof_rule && !lkups_cnt)
6997 return ICE_ERR_PARAM;
6999 /* get # of words we need to match */
7001 for (i = 0; i < lkups_cnt; i++) {
7004 ptr = (u16 *)&lkups[i].m_u;
7005 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7011 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7012 return ICE_ERR_PARAM;
7014 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7015 return ICE_ERR_PARAM;
7018 /* make sure that we can locate a dummy packet */
7019 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7022 status = ICE_ERR_PARAM;
7023 goto err_ice_add_adv_rule;
7026 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7027 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7028 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7029 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7032 vsi_handle = rinfo->sw_act.vsi_handle;
7033 if (!ice_is_vsi_valid(hw, vsi_handle))
7034 return ICE_ERR_PARAM;
7036 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7037 rinfo->sw_act.fwd_id.hw_vsi_id =
7038 ice_get_hw_vsi_num(hw, vsi_handle);
7039 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7040 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7042 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7045 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7047 /* we have to add VSI to VSI_LIST and increment vsi_count.
7048 * Also Update VSI list so that we can change forwarding rule
7049 * if the rule already exists, we will check if it exists with
7050 * same vsi_id, if not then add it to the VSI list if it already
7051 * exists if not then create a VSI list and add the existing VSI
7052 * ID and the new VSI ID to the list
7053 * We will add that VSI to the list
7055 status = ice_adv_add_update_vsi_list(hw, m_entry,
7056 &m_entry->rule_info,
7059 added_entry->rid = rid;
7060 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7061 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7065 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7066 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7068 return ICE_ERR_NO_MEMORY;
7069 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7070 switch (rinfo->sw_act.fltr_act) {
7071 case ICE_FWD_TO_VSI:
7072 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7073 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7074 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7077 act |= ICE_SINGLE_ACT_TO_Q;
7078 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7079 ICE_SINGLE_ACT_Q_INDEX_M;
7081 case ICE_FWD_TO_QGRP:
7082 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7083 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7084 act |= ICE_SINGLE_ACT_TO_Q;
7085 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7086 ICE_SINGLE_ACT_Q_INDEX_M;
7087 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7088 ICE_SINGLE_ACT_Q_REGION_M;
7090 case ICE_DROP_PACKET:
7091 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7092 ICE_SINGLE_ACT_VALID_BIT;
7095 status = ICE_ERR_CFG;
7096 goto err_ice_add_adv_rule;
7099 /* set the rule LOOKUP type based on caller specified 'RX'
7100 * instead of hardcoding it to be either LOOKUP_TX/RX
7102 * for 'RX' set the source to be the port number
7103 * for 'TX' set the source to be the source HW VSI number (determined
7107 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7108 s_rule->pdata.lkup_tx_rx.src =
7109 CPU_TO_LE16(hw->port_info->lport);
7111 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7112 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7115 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7116 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7118 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7119 pkt_len, pkt_offsets);
7121 goto err_ice_add_adv_rule;
7123 if (rinfo->tun_type != ICE_NON_TUN &&
7124 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7125 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7126 s_rule->pdata.lkup_tx_rx.hdr,
7129 goto err_ice_add_adv_rule;
7132 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7133 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7136 goto err_ice_add_adv_rule;
7137 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7138 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7140 status = ICE_ERR_NO_MEMORY;
7141 goto err_ice_add_adv_rule;
7144 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7145 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7146 ICE_NONDMA_TO_NONDMA);
7147 if (!adv_fltr->lkups && !prof_rule) {
7148 status = ICE_ERR_NO_MEMORY;
7149 goto err_ice_add_adv_rule;
7152 adv_fltr->lkups_cnt = lkups_cnt;
7153 adv_fltr->rule_info = *rinfo;
7154 adv_fltr->rule_info.fltr_rule_id =
7155 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7156 sw = hw->switch_info;
7157 sw->recp_list[rid].adv_rule = true;
7158 rule_head = &sw->recp_list[rid].filt_rules;
7160 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7161 adv_fltr->vsi_count = 1;
7163 /* Add rule entry to book keeping list */
7164 LIST_ADD(&adv_fltr->list_entry, rule_head);
7166 added_entry->rid = rid;
7167 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7168 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7170 err_ice_add_adv_rule:
7171 if (status && adv_fltr) {
7172 ice_free(hw, adv_fltr->lkups);
7173 ice_free(hw, adv_fltr);
7176 ice_free(hw, s_rule);
7182 * ice_adv_rem_update_vsi_list
7183 * @hw: pointer to the hardware structure
7184 * @vsi_handle: VSI handle of the VSI to remove
7185 * @fm_list: filter management entry for which the VSI list management needs to
7188 static enum ice_status
7189 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7190 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7192 struct ice_vsi_list_map_info *vsi_list_info;
7193 enum ice_sw_lkup_type lkup_type;
7194 enum ice_status status;
7197 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7198 fm_list->vsi_count == 0)
7199 return ICE_ERR_PARAM;
7201 /* A rule with the VSI being removed does not exist */
7202 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7203 return ICE_ERR_DOES_NOT_EXIST;
7205 lkup_type = ICE_SW_LKUP_LAST;
7206 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7207 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7208 ice_aqc_opc_update_sw_rules,
7213 fm_list->vsi_count--;
7214 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7215 vsi_list_info = fm_list->vsi_list_info;
7216 if (fm_list->vsi_count == 1) {
7217 struct ice_fltr_info tmp_fltr;
7220 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7222 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7223 return ICE_ERR_OUT_OF_RANGE;
7225 /* Make sure VSI list is empty before removing it below */
7226 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7228 ice_aqc_opc_update_sw_rules,
7233 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7234 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7235 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7236 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7237 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7238 tmp_fltr.fwd_id.hw_vsi_id =
7239 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7240 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7241 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7243 /* Update the previous switch rule of "MAC forward to VSI" to
7244 * "MAC fwd to VSI list"
7246 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7248 ice_debug(hw, ICE_DBG_SW,
7249 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7250 tmp_fltr.fwd_id.hw_vsi_id, status);
7254 /* Remove the VSI list since it is no longer used */
7255 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7257 ice_debug(hw, ICE_DBG_SW,
7258 "Failed to remove VSI list %d, error %d\n",
7259 vsi_list_id, status);
7263 LIST_DEL(&vsi_list_info->list_entry);
7264 ice_free(hw, vsi_list_info);
7265 fm_list->vsi_list_info = NULL;
7272 * ice_rem_adv_rule - removes existing advanced switch rule
7273 * @hw: pointer to the hardware structure
7274 * @lkups: information on the words that needs to be looked up. All words
7275 * together makes one recipe
7276 * @lkups_cnt: num of entries in the lkups array
7277 * @rinfo: Its the pointer to the rule information for the rule
7279 * This function can be used to remove 1 rule at a time. The lkups is
7280 * used to describe all the words that forms the "lookup" portion of the
7281 * rule. These words can span multiple protocols. Callers to this function
7282 * need to pass in a list of protocol headers with lookup information along
7283 * and mask that determines which words are valid from the given protocol
7284 * header. rinfo describes other information related to this rule such as
7285 * forwarding IDs, priority of this rule, etc.
7288 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7289 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7291 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7292 struct ice_prot_lkup_ext lkup_exts;
7293 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7294 enum ice_status status = ICE_SUCCESS;
7295 bool remove_rule = false;
7296 u16 i, rid, vsi_handle;
7298 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7299 for (i = 0; i < lkups_cnt; i++) {
7302 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7305 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7310 /* Create any special protocol/offset pairs, such as looking at tunnel
7311 * bits by extracting metadata
7313 status = ice_add_special_words(rinfo, &lkup_exts);
7317 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7318 /* If did not find a recipe that match the existing criteria */
7319 if (rid == ICE_MAX_NUM_RECIPES)
7320 return ICE_ERR_PARAM;
7322 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7323 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7324 /* the rule is already removed */
7327 ice_acquire_lock(rule_lock);
7328 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7330 } else if (list_elem->vsi_count > 1) {
7331 list_elem->vsi_list_info->ref_cnt--;
7332 remove_rule = false;
7333 vsi_handle = rinfo->sw_act.vsi_handle;
7334 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7336 vsi_handle = rinfo->sw_act.vsi_handle;
7337 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7339 ice_release_lock(rule_lock);
7342 if (list_elem->vsi_count == 0)
7345 ice_release_lock(rule_lock);
7347 struct ice_aqc_sw_rules_elem *s_rule;
7350 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7352 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7355 return ICE_ERR_NO_MEMORY;
7356 s_rule->pdata.lkup_tx_rx.act = 0;
7357 s_rule->pdata.lkup_tx_rx.index =
7358 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7359 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7360 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7362 ice_aqc_opc_remove_sw_rules, NULL);
7363 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
7364 ice_acquire_lock(rule_lock);
7365 LIST_DEL(&list_elem->list_entry);
7366 ice_free(hw, list_elem->lkups);
7367 ice_free(hw, list_elem);
7368 ice_release_lock(rule_lock);
7370 ice_free(hw, s_rule);
7376 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7377 * @hw: pointer to the hardware structure
7378 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7380 * This function is used to remove 1 rule at a time. The removal is based on
7381 * the remove_entry parameter. This function will remove rule for a given
7382 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7385 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7386 struct ice_rule_query_data *remove_entry)
7388 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7389 struct LIST_HEAD_TYPE *list_head;
7390 struct ice_adv_rule_info rinfo;
7391 struct ice_switch_info *sw;
7393 sw = hw->switch_info;
7394 if (!sw->recp_list[remove_entry->rid].recp_created)
7395 return ICE_ERR_PARAM;
7396 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7397 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7399 if (list_itr->rule_info.fltr_rule_id ==
7400 remove_entry->rule_id) {
7401 rinfo = list_itr->rule_info;
7402 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7403 return ice_rem_adv_rule(hw, list_itr->lkups,
7404 list_itr->lkups_cnt, &rinfo);
7407 return ICE_ERR_PARAM;
7411 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7413 * @hw: pointer to the hardware structure
7414 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7416 * This function is used to remove all the rules for a given VSI and as soon
7417 * as removing a rule fails, it will return immediately with the error code,
7418 * else it will return ICE_SUCCESS
7420 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7422 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7423 struct ice_vsi_list_map_info *map_info;
7424 struct LIST_HEAD_TYPE *list_head;
7425 struct ice_adv_rule_info rinfo;
7426 struct ice_switch_info *sw;
7427 enum ice_status status;
7428 u16 vsi_list_id = 0;
7431 sw = hw->switch_info;
7432 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7433 if (!sw->recp_list[rid].recp_created)
7435 if (!sw->recp_list[rid].adv_rule)
7437 list_head = &sw->recp_list[rid].filt_rules;
7439 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7440 ice_adv_fltr_mgmt_list_entry, list_entry) {
7441 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7446 rinfo = list_itr->rule_info;
7447 rinfo.sw_act.vsi_handle = vsi_handle;
7448 status = ice_rem_adv_rule(hw, list_itr->lkups,
7449 list_itr->lkups_cnt, &rinfo);
7459 * ice_replay_fltr - Replay all the filters stored by a specific list head
7460 * @hw: pointer to the hardware structure
7461 * @list_head: list for which filters needs to be replayed
7462 * @recp_id: Recipe ID for which rules need to be replayed
7464 static enum ice_status
7465 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7467 struct ice_fltr_mgmt_list_entry *itr;
7468 enum ice_status status = ICE_SUCCESS;
7469 struct ice_sw_recipe *recp_list;
7470 u8 lport = hw->port_info->lport;
7471 struct LIST_HEAD_TYPE l_head;
7473 if (LIST_EMPTY(list_head))
7476 recp_list = &hw->switch_info->recp_list[recp_id];
7477 /* Move entries from the given list_head to a temporary l_head so that
7478 * they can be replayed. Otherwise when trying to re-add the same
7479 * filter, the function will return already exists
7481 LIST_REPLACE_INIT(list_head, &l_head);
7483 /* Mark the given list_head empty by reinitializing it so filters
7484 * could be added again by *handler
7486 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7488 struct ice_fltr_list_entry f_entry;
7490 f_entry.fltr_info = itr->fltr_info;
7491 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7492 status = ice_add_rule_internal(hw, recp_list, lport,
7494 if (status != ICE_SUCCESS)
7499 /* Add a filter per VSI separately */
7504 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7506 if (!ice_is_vsi_valid(hw, vsi_handle))
7509 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7510 f_entry.fltr_info.vsi_handle = vsi_handle;
7511 f_entry.fltr_info.fwd_id.hw_vsi_id =
7512 ice_get_hw_vsi_num(hw, vsi_handle);
7513 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7514 if (recp_id == ICE_SW_LKUP_VLAN)
7515 status = ice_add_vlan_internal(hw, recp_list,
7518 status = ice_add_rule_internal(hw, recp_list,
7521 if (status != ICE_SUCCESS)
7526 /* Clear the filter management list */
7527 ice_rem_sw_rule_info(hw, &l_head);
7532 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7533 * @hw: pointer to the hardware structure
7535 * NOTE: This function does not clean up partially added filters on error.
7536 * It is up to caller of the function to issue a reset or fail early.
7538 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7540 struct ice_switch_info *sw = hw->switch_info;
7541 enum ice_status status = ICE_SUCCESS;
7544 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7545 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7547 status = ice_replay_fltr(hw, i, head);
7548 if (status != ICE_SUCCESS)
7555 * ice_replay_vsi_fltr - Replay filters for requested VSI
7556 * @hw: pointer to the hardware structure
7557 * @pi: pointer to port information structure
7558 * @sw: pointer to switch info struct for which function replays filters
7559 * @vsi_handle: driver VSI handle
7560 * @recp_id: Recipe ID for which rules need to be replayed
7561 * @list_head: list for which filters need to be replayed
7563 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7564 * It is required to pass valid VSI handle.
7566 static enum ice_status
7567 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7568 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
7569 struct LIST_HEAD_TYPE *list_head)
7571 struct ice_fltr_mgmt_list_entry *itr;
7572 enum ice_status status = ICE_SUCCESS;
7573 struct ice_sw_recipe *recp_list;
7576 if (LIST_EMPTY(list_head))
7578 recp_list = &sw->recp_list[recp_id];
7579 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7581 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7583 struct ice_fltr_list_entry f_entry;
7585 f_entry.fltr_info = itr->fltr_info;
7586 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7587 itr->fltr_info.vsi_handle == vsi_handle) {
7588 /* update the src in case it is VSI num */
7589 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7590 f_entry.fltr_info.src = hw_vsi_id;
7591 status = ice_add_rule_internal(hw, recp_list,
7594 if (status != ICE_SUCCESS)
7598 if (!itr->vsi_list_info ||
7599 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7601 /* Clearing it so that the logic can add it back */
7602 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7603 f_entry.fltr_info.vsi_handle = vsi_handle;
7604 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7605 /* update the src in case it is VSI num */
7606 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7607 f_entry.fltr_info.src = hw_vsi_id;
7608 if (recp_id == ICE_SW_LKUP_VLAN)
7609 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7611 status = ice_add_rule_internal(hw, recp_list,
7614 if (status != ICE_SUCCESS)
7622 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7623 * @hw: pointer to the hardware structure
7624 * @vsi_handle: driver VSI handle
7625 * @list_head: list for which filters need to be replayed
7627 * Replay the advanced rule for the given VSI.
7629 static enum ice_status
7630 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7631 struct LIST_HEAD_TYPE *list_head)
7633 struct ice_rule_query_data added_entry = { 0 };
7634 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7635 enum ice_status status = ICE_SUCCESS;
7637 if (LIST_EMPTY(list_head))
7639 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7641 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7642 u16 lk_cnt = adv_fltr->lkups_cnt;
7644 if (vsi_handle != rinfo->sw_act.vsi_handle)
7646 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7655 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7656 * @hw: pointer to the hardware structure
7657 * @pi: pointer to port information structure
7658 * @vsi_handle: driver VSI handle
7660 * Replays filters for requested VSI via vsi_handle.
7663 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7666 struct ice_switch_info *sw = hw->switch_info;
7667 enum ice_status status;
7670 /* Update the recipes that were created */
7671 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7672 struct LIST_HEAD_TYPE *head;
7674 head = &sw->recp_list[i].filt_replay_rules;
7675 if (!sw->recp_list[i].adv_rule)
7676 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
7679 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7680 if (status != ICE_SUCCESS)
7688 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
7689 * @hw: pointer to the HW struct
7690 * @sw: pointer to switch info struct for which function removes filters
7692 * Deletes the filter replay rules for given switch
7694 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
7701 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7702 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7703 struct LIST_HEAD_TYPE *l_head;
7705 l_head = &sw->recp_list[i].filt_replay_rules;
7706 if (!sw->recp_list[i].adv_rule)
7707 ice_rem_sw_rule_info(hw, l_head);
7709 ice_rem_adv_rule_info(hw, l_head);
7715 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7716 * @hw: pointer to the HW struct
7718 * Deletes the filter replay rules.
7720 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7722 ice_rm_sw_replay_rule_info(hw, hw->switch_info);