1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x10, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
463 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, /* 2 bytes for 4 byte alignment */
468 /* C-tag (802.1Q): IPv6 + UDP */
469 static const struct ice_dummy_pkt_offsets
470 dummy_vlan_udp_ipv6_packet_offsets[] = {
472 { ICE_ETYPE_OL, 12 },
473 { ICE_VLAN_OFOS, 14 },
474 { ICE_IPV6_OFOS, 18 },
475 { ICE_UDP_ILOS, 58 },
476 { ICE_PROTOCOL_LAST, 0 },
479 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
480 static const u8 dummy_vlan_udp_ipv6_packet[] = {
481 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
485 0x81, 0x00, /* ICE_ETYPE_OL 12 */
487 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
490 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
501 0x00, 0x08, 0x00, 0x00,
503 0x00, 0x00, /* 2 bytes for 4 byte alignment */
506 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508 { ICE_IPV4_OFOS, 14 },
511 { ICE_PROTOCOL_LAST, 0 },
514 static const u8 dummy_udp_gtp_packet[] = {
515 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
520 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x11, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
526 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
527 0x00, 0x1c, 0x00, 0x00,
529 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
530 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x00, 0x85,
533 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
534 0x00, 0x00, 0x00, 0x00,
537 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
539 { ICE_ETYPE_OL, 12 },
540 { ICE_VLAN_OFOS, 14},
542 { ICE_PROTOCOL_LAST, 0 },
545 static const u8 dummy_pppoe_ipv4_packet[] = {
546 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x00, 0x00, 0x00,
550 0x81, 0x00, /* ICE_ETYPE_OL 12 */
552 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
554 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
557 0x00, 0x21, /* PPP Link Layer 24 */
559 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
560 0x00, 0x00, 0x00, 0x00,
561 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, 0x00, 0x00,
563 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
568 static const u8 dummy_pppoe_ipv6_packet[] = {
569 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
573 0x81, 0x00, /* ICE_ETYPE_OL 12 */
575 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
577 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
580 0x00, 0x57, /* PPP Link Layer 24 */
582 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
583 0x00, 0x00, 0x3b, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00,
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
596 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
598 { ICE_IPV4_OFOS, 14 },
600 { ICE_PROTOCOL_LAST, 0 },
603 static const u8 dummy_ipv4_esp_pkt[] = {
604 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
609 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
610 0x00, 0x00, 0x40, 0x00,
611 0x40, 0x32, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
620 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
622 { ICE_IPV6_OFOS, 14 },
624 { ICE_PROTOCOL_LAST, 0 },
627 static const u8 dummy_ipv6_esp_pkt[] = {
628 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
629 0x00, 0x00, 0x00, 0x00,
630 0x00, 0x00, 0x00, 0x00,
633 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
634 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
649 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
651 { ICE_IPV4_OFOS, 14 },
653 { ICE_PROTOCOL_LAST, 0 },
656 static const u8 dummy_ipv4_ah_pkt[] = {
657 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
658 0x00, 0x00, 0x00, 0x00,
659 0x00, 0x00, 0x00, 0x00,
662 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
663 0x00, 0x00, 0x40, 0x00,
664 0x40, 0x33, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
669 0x00, 0x00, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00,
671 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
674 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
676 { ICE_IPV6_OFOS, 14 },
678 { ICE_PROTOCOL_LAST, 0 },
681 static const u8 dummy_ipv6_ah_pkt[] = {
682 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
687 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
688 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
699 0x00, 0x00, 0x00, 0x00,
700 0x00, 0x00, 0x00, 0x00,
701 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
704 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
706 { ICE_IPV4_OFOS, 14 },
707 { ICE_UDP_ILOS, 34 },
709 { ICE_PROTOCOL_LAST, 0 },
712 static const u8 dummy_ipv4_nat_pkt[] = {
713 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
718 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
719 0x00, 0x00, 0x40, 0x00,
720 0x40, 0x11, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
725 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
732 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
734 { ICE_IPV6_OFOS, 14 },
735 { ICE_UDP_ILOS, 54 },
737 { ICE_PROTOCOL_LAST, 0 },
740 static const u8 dummy_ipv6_nat_pkt[] = {
741 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
742 0x00, 0x00, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
746 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
747 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00,
753 0x00, 0x00, 0x00, 0x00,
754 0x00, 0x00, 0x00, 0x00,
755 0x00, 0x00, 0x00, 0x00,
757 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
758 0x00, 0x00, 0x00, 0x00,
760 0x00, 0x00, 0x00, 0x00,
761 0x00, 0x00, 0x00, 0x00,
762 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
766 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
768 { ICE_IPV4_OFOS, 14 },
770 { ICE_PROTOCOL_LAST, 0 },
773 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
774 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
779 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
780 0x00, 0x00, 0x40, 0x00,
781 0x40, 0x73, 0x00, 0x00,
782 0x00, 0x00, 0x00, 0x00,
783 0x00, 0x00, 0x00, 0x00,
785 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
786 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
791 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
793 { ICE_IPV6_OFOS, 14 },
795 { ICE_PROTOCOL_LAST, 0 },
798 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
799 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
800 0x00, 0x00, 0x00, 0x00,
801 0x00, 0x00, 0x00, 0x00,
804 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
805 0x00, 0x0c, 0x73, 0x40,
806 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
809 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00,
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
813 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
821 /* this is a recipe to profile association bitmap */
822 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
823 ICE_MAX_NUM_PROFILES);
825 /* this is a profile to recipe association bitmap */
826 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
827 ICE_MAX_NUM_RECIPES);
829 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
832 * ice_collect_result_idx - copy result index values
833 * @buf: buffer that contains the result index
834 * @recp: the recipe struct to copy data into
836 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
837 struct ice_sw_recipe *recp)
839 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
840 ice_set_bit(buf->content.result_indx &
841 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
845 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
846 * @hw: pointer to hardware structure
847 * @recps: struct that we need to populate
848 * @rid: recipe ID that we are populating
849 * @refresh_required: true if we should get recipe to profile mapping from FW
851 * This function is used to populate all the necessary entries into our
852 * bookkeeping so that we have a current list of all the recipes that are
853 * programmed in the firmware.
855 static enum ice_status
856 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
857 bool *refresh_required)
859 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
860 struct ice_aqc_recipe_data_elem *tmp;
861 u16 num_recps = ICE_MAX_NUM_RECIPES;
862 struct ice_prot_lkup_ext *lkup_exts;
863 enum ice_status status;
867 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
869 /* we need a buffer big enough to accommodate all the recipes */
870 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
871 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
873 return ICE_ERR_NO_MEMORY;
875 tmp[0].recipe_indx = rid;
876 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
877 /* non-zero status meaning recipe doesn't exist */
881 /* Get recipe to profile map so that we can get the fv from lkups that
882 * we read for a recipe from FW. Since we want to minimize the number of
883 * times we make this FW call, just make one call and cache the copy
884 * until a new recipe is added. This operation is only required the
885 * first time to get the changes from FW. Then to search existing
886 * entries we don't need to update the cache again until another recipe
889 if (*refresh_required) {
890 ice_get_recp_to_prof_map(hw);
891 *refresh_required = false;
894 /* Start populating all the entries for recps[rid] based on lkups from
895 * firmware. Note that we are only creating the root recipe in our
898 lkup_exts = &recps[rid].lkup_exts;
900 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
901 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
902 struct ice_recp_grp_entry *rg_entry;
903 u8 i, prof, idx, prot = 0;
907 rg_entry = (struct ice_recp_grp_entry *)
908 ice_malloc(hw, sizeof(*rg_entry));
910 status = ICE_ERR_NO_MEMORY;
914 idx = root_bufs.recipe_indx;
915 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
917 /* Mark all result indices in this chain */
918 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
919 ice_set_bit(root_bufs.content.result_indx &
920 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
922 /* get the first profile that is associated with rid */
923 prof = ice_find_first_bit(recipe_to_profile[idx],
924 ICE_MAX_NUM_PROFILES);
925 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
926 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
928 rg_entry->fv_idx[i] = lkup_indx;
929 rg_entry->fv_mask[i] =
930 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
932 /* If the recipe is a chained recipe then all its
933 * child recipe's result will have a result index.
934 * To fill fv_words we should not use those result
935 * index, we only need the protocol ids and offsets.
936 * We will skip all the fv_idx which stores result
937 * index in them. We also need to skip any fv_idx which
938 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
939 * valid offset value.
941 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
942 rg_entry->fv_idx[i]) ||
943 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
944 rg_entry->fv_idx[i] == 0)
947 ice_find_prot_off(hw, ICE_BLK_SW, prof,
948 rg_entry->fv_idx[i], &prot, &off);
949 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
950 lkup_exts->fv_words[fv_word_idx].off = off;
951 lkup_exts->field_mask[fv_word_idx] =
952 rg_entry->fv_mask[i];
955 /* populate rg_list with the data from the child entry of this
958 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
960 /* Propagate some data to the recipe database */
961 recps[idx].is_root = !!is_root;
962 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
963 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
964 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
965 recps[idx].chain_idx = root_bufs.content.result_indx &
966 ~ICE_AQ_RECIPE_RESULT_EN;
967 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
969 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
975 /* Only do the following for root recipes entries */
976 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
977 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
978 recps[idx].root_rid = root_bufs.content.rid &
979 ~ICE_AQ_RECIPE_ID_IS_ROOT;
980 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
983 /* Complete initialization of the root recipe entry */
984 lkup_exts->n_val_words = fv_word_idx;
985 recps[rid].big_recp = (num_recps > 1);
986 recps[rid].n_grp_count = (u8)num_recps;
987 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
988 ice_memdup(hw, tmp, recps[rid].n_grp_count *
989 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
990 if (!recps[rid].root_buf)
993 /* Copy result indexes */
994 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
995 recps[rid].recp_created = true;
1003 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1004 * @hw: pointer to hardware structure
1006 * This function is used to populate recipe_to_profile matrix where index to
1007 * this array is the recipe ID and the element is the mapping of which profiles
1008 * is this recipe mapped to.
1010 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1012 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1015 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1018 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1019 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1020 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1022 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1023 ICE_MAX_NUM_RECIPES);
1024 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1025 if (ice_is_bit_set(r_bitmap, j))
1026 ice_set_bit(i, recipe_to_profile[j]);
1031 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1032 * @hw: pointer to the HW struct
1033 * @recp_list: pointer to sw recipe list
1035 * Allocate memory for the entire recipe table and initialize the structures/
1036 * entries corresponding to basic recipes.
1039 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1041 struct ice_sw_recipe *recps;
1044 recps = (struct ice_sw_recipe *)
1045 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1047 return ICE_ERR_NO_MEMORY;
1049 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1050 recps[i].root_rid = i;
1051 INIT_LIST_HEAD(&recps[i].filt_rules);
1052 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1053 INIT_LIST_HEAD(&recps[i].rg_list);
1054 ice_init_lock(&recps[i].filt_rule_lock);
1063 * ice_aq_get_sw_cfg - get switch configuration
1064 * @hw: pointer to the hardware structure
1065 * @buf: pointer to the result buffer
1066 * @buf_size: length of the buffer available for response
1067 * @req_desc: pointer to requested descriptor
1068 * @num_elems: pointer to number of elements
1069 * @cd: pointer to command details structure or NULL
1071 * Get switch configuration (0x0200) to be placed in 'buff'.
1072 * This admin command returns information such as initial VSI/port number
1073 * and switch ID it belongs to.
1075 * NOTE: *req_desc is both an input/output parameter.
1076 * The caller of this function first calls this function with *request_desc set
1077 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1078 * configuration information has been returned; if non-zero (meaning not all
1079 * the information was returned), the caller should call this function again
1080 * with *req_desc set to the previous value returned by f/w to get the
1081 * next block of switch configuration information.
1083 * *num_elems is output only parameter. This reflects the number of elements
1084 * in response buffer. The caller of this function to use *num_elems while
1085 * parsing the response buffer.
1087 static enum ice_status
1088 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1089 u16 buf_size, u16 *req_desc, u16 *num_elems,
1090 struct ice_sq_cd *cd)
1092 struct ice_aqc_get_sw_cfg *cmd;
1093 enum ice_status status;
1094 struct ice_aq_desc desc;
1096 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1097 cmd = &desc.params.get_sw_conf;
1098 cmd->element = CPU_TO_LE16(*req_desc);
1100 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1102 *req_desc = LE16_TO_CPU(cmd->element);
1103 *num_elems = LE16_TO_CPU(cmd->num_elems);
1110 * ice_alloc_sw - allocate resources specific to switch
1111 * @hw: pointer to the HW struct
1112 * @ena_stats: true to turn on VEB stats
1113 * @shared_res: true for shared resource, false for dedicated resource
1114 * @sw_id: switch ID returned
1115 * @counter_id: VEB counter ID returned
1117 * allocates switch resources (SWID and VEB counter) (0x0208)
1120 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1123 struct ice_aqc_alloc_free_res_elem *sw_buf;
1124 struct ice_aqc_res_elem *sw_ele;
1125 enum ice_status status;
1128 buf_len = sizeof(*sw_buf);
1129 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1130 ice_malloc(hw, buf_len);
1132 return ICE_ERR_NO_MEMORY;
1134 /* Prepare buffer for switch ID.
1135 * The number of resource entries in buffer is passed as 1 since only a
1136 * single switch/VEB instance is allocated, and hence a single sw_id
1139 sw_buf->num_elems = CPU_TO_LE16(1);
1141 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1142 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1143 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1145 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1146 ice_aqc_opc_alloc_res, NULL);
1149 goto ice_alloc_sw_exit;
1151 sw_ele = &sw_buf->elem[0];
1152 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1155 /* Prepare buffer for VEB Counter */
1156 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1157 struct ice_aqc_alloc_free_res_elem *counter_buf;
1158 struct ice_aqc_res_elem *counter_ele;
1160 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1161 ice_malloc(hw, buf_len);
1163 status = ICE_ERR_NO_MEMORY;
1164 goto ice_alloc_sw_exit;
1167 /* The number of resource entries in buffer is passed as 1 since
1168 * only a single switch/VEB instance is allocated, and hence a
1169 * single VEB counter is requested.
1171 counter_buf->num_elems = CPU_TO_LE16(1);
1172 counter_buf->res_type =
1173 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1174 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1175 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1179 ice_free(hw, counter_buf);
1180 goto ice_alloc_sw_exit;
1182 counter_ele = &counter_buf->elem[0];
1183 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1184 ice_free(hw, counter_buf);
1188 ice_free(hw, sw_buf);
1193 * ice_free_sw - free resources specific to switch
1194 * @hw: pointer to the HW struct
1195 * @sw_id: switch ID returned
1196 * @counter_id: VEB counter ID returned
1198 * free switch resources (SWID and VEB counter) (0x0209)
1200 * NOTE: This function frees multiple resources. It continues
1201 * releasing other resources even after it encounters error.
1202 * The error code returned is the last error it encountered.
1204 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1206 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1207 enum ice_status status, ret_status;
1210 buf_len = sizeof(*sw_buf);
1211 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1212 ice_malloc(hw, buf_len);
1214 return ICE_ERR_NO_MEMORY;
1216 /* Prepare buffer to free for switch ID res.
1217 * The number of resource entries in buffer is passed as 1 since only a
1218 * single switch/VEB instance is freed, and hence a single sw_id
1221 sw_buf->num_elems = CPU_TO_LE16(1);
1222 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1223 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1225 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1226 ice_aqc_opc_free_res, NULL);
1229 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1231 /* Prepare buffer to free for VEB Counter resource */
1232 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1233 ice_malloc(hw, buf_len);
1235 ice_free(hw, sw_buf);
1236 return ICE_ERR_NO_MEMORY;
1239 /* The number of resource entries in buffer is passed as 1 since only a
1240 * single switch/VEB instance is freed, and hence a single VEB counter
1243 counter_buf->num_elems = CPU_TO_LE16(1);
1244 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1245 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1247 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1248 ice_aqc_opc_free_res, NULL);
1250 ice_debug(hw, ICE_DBG_SW,
1251 "VEB counter resource could not be freed\n");
1252 ret_status = status;
1255 ice_free(hw, counter_buf);
1256 ice_free(hw, sw_buf);
1262 * @hw: pointer to the HW struct
1263 * @vsi_ctx: pointer to a VSI context struct
1264 * @cd: pointer to command details structure or NULL
1266 * Add a VSI context to the hardware (0x0210)
1269 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1270 struct ice_sq_cd *cd)
1272 struct ice_aqc_add_update_free_vsi_resp *res;
1273 struct ice_aqc_add_get_update_free_vsi *cmd;
1274 struct ice_aq_desc desc;
1275 enum ice_status status;
1277 cmd = &desc.params.vsi_cmd;
1278 res = &desc.params.add_update_free_vsi_res;
1280 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1282 if (!vsi_ctx->alloc_from_pool)
1283 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1284 ICE_AQ_VSI_IS_VALID);
1286 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1288 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1290 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1291 sizeof(vsi_ctx->info), cd);
1294 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1295 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1296 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1304 * @hw: pointer to the HW struct
1305 * @vsi_ctx: pointer to a VSI context struct
1306 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1307 * @cd: pointer to command details structure or NULL
1309 * Free VSI context info from hardware (0x0213)
1312 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1313 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1315 struct ice_aqc_add_update_free_vsi_resp *resp;
1316 struct ice_aqc_add_get_update_free_vsi *cmd;
1317 struct ice_aq_desc desc;
1318 enum ice_status status;
1320 cmd = &desc.params.vsi_cmd;
1321 resp = &desc.params.add_update_free_vsi_res;
1323 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1325 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1327 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1329 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1331 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1332 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1340 * @hw: pointer to the HW struct
1341 * @vsi_ctx: pointer to a VSI context struct
1342 * @cd: pointer to command details structure or NULL
1344 * Update VSI context in the hardware (0x0211)
1347 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1348 struct ice_sq_cd *cd)
1350 struct ice_aqc_add_update_free_vsi_resp *resp;
1351 struct ice_aqc_add_get_update_free_vsi *cmd;
1352 struct ice_aq_desc desc;
1353 enum ice_status status;
1355 cmd = &desc.params.vsi_cmd;
1356 resp = &desc.params.add_update_free_vsi_res;
1358 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1360 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1362 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1364 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1365 sizeof(vsi_ctx->info), cd);
1368 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1369 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1376 * ice_is_vsi_valid - check whether the VSI is valid or not
1377 * @hw: pointer to the HW struct
1378 * @vsi_handle: VSI handle
1380 * check whether the VSI is valid or not
1382 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1384 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1388 * ice_get_hw_vsi_num - return the HW VSI number
1389 * @hw: pointer to the HW struct
1390 * @vsi_handle: VSI handle
1392 * return the HW VSI number
1393 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1395 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1397 return hw->vsi_ctx[vsi_handle]->vsi_num;
1401 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1402 * @hw: pointer to the HW struct
1403 * @vsi_handle: VSI handle
1405 * return the VSI context entry for a given VSI handle
1407 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1409 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1413 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1414 * @hw: pointer to the HW struct
1415 * @vsi_handle: VSI handle
1416 * @vsi: VSI context pointer
1418 * save the VSI context entry for a given VSI handle
1421 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1423 hw->vsi_ctx[vsi_handle] = vsi;
1427 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1428 * @hw: pointer to the HW struct
1429 * @vsi_handle: VSI handle
1431 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1433 struct ice_vsi_ctx *vsi;
1436 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1439 ice_for_each_traffic_class(i) {
1440 if (vsi->lan_q_ctx[i]) {
1441 ice_free(hw, vsi->lan_q_ctx[i]);
1442 vsi->lan_q_ctx[i] = NULL;
1448 * ice_clear_vsi_ctx - clear the VSI context entry
1449 * @hw: pointer to the HW struct
1450 * @vsi_handle: VSI handle
1452 * clear the VSI context entry
1454 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1456 struct ice_vsi_ctx *vsi;
1458 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1460 ice_clear_vsi_q_ctx(hw, vsi_handle);
1462 hw->vsi_ctx[vsi_handle] = NULL;
1467 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1468 * @hw: pointer to the HW struct
1470 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1474 for (i = 0; i < ICE_MAX_VSI; i++)
1475 ice_clear_vsi_ctx(hw, i);
1479 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1480 * @hw: pointer to the HW struct
1481 * @vsi_handle: unique VSI handle provided by drivers
1482 * @vsi_ctx: pointer to a VSI context struct
1483 * @cd: pointer to command details structure or NULL
1485 * Add a VSI context to the hardware also add it into the VSI handle list.
1486 * If this function gets called after reset for existing VSIs then update
1487 * with the new HW VSI number in the corresponding VSI handle list entry.
1490 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1491 struct ice_sq_cd *cd)
1493 struct ice_vsi_ctx *tmp_vsi_ctx;
1494 enum ice_status status;
1496 if (vsi_handle >= ICE_MAX_VSI)
1497 return ICE_ERR_PARAM;
1498 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1501 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1503 /* Create a new VSI context */
1504 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1505 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1507 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1508 return ICE_ERR_NO_MEMORY;
1510 *tmp_vsi_ctx = *vsi_ctx;
1512 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1514 /* update with new HW VSI num */
1515 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1522 * ice_free_vsi- free VSI context from hardware and VSI handle list
1523 * @hw: pointer to the HW struct
1524 * @vsi_handle: unique VSI handle
1525 * @vsi_ctx: pointer to a VSI context struct
1526 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1527 * @cd: pointer to command details structure or NULL
1529 * Free VSI context info from hardware as well as from VSI handle list
1532 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1533 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1535 enum ice_status status;
1537 if (!ice_is_vsi_valid(hw, vsi_handle))
1538 return ICE_ERR_PARAM;
1539 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1540 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1542 ice_clear_vsi_ctx(hw, vsi_handle);
1548 * @hw: pointer to the HW struct
1549 * @vsi_handle: unique VSI handle
1550 * @vsi_ctx: pointer to a VSI context struct
1551 * @cd: pointer to command details structure or NULL
1553 * Update VSI context in the hardware
1556 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1557 struct ice_sq_cd *cd)
1559 if (!ice_is_vsi_valid(hw, vsi_handle))
1560 return ICE_ERR_PARAM;
1561 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1562 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1566 * ice_aq_get_vsi_params
1567 * @hw: pointer to the HW struct
1568 * @vsi_ctx: pointer to a VSI context struct
1569 * @cd: pointer to command details structure or NULL
1571 * Get VSI context info from hardware (0x0212)
1574 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1575 struct ice_sq_cd *cd)
1577 struct ice_aqc_add_get_update_free_vsi *cmd;
1578 struct ice_aqc_get_vsi_resp *resp;
1579 struct ice_aq_desc desc;
1580 enum ice_status status;
1582 cmd = &desc.params.vsi_cmd;
1583 resp = &desc.params.get_vsi_resp;
1585 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1587 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1589 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1590 sizeof(vsi_ctx->info), cd);
1592 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1594 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1595 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1602 * ice_aq_add_update_mir_rule - add/update a mirror rule
1603 * @hw: pointer to the HW struct
1604 * @rule_type: Rule Type
1605 * @dest_vsi: VSI number to which packets will be mirrored
1606 * @count: length of the list
1607 * @mr_buf: buffer for list of mirrored VSI numbers
1608 * @cd: pointer to command details structure or NULL
1611 * Add/Update Mirror Rule (0x260).
1614 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1615 u16 count, struct ice_mir_rule_buf *mr_buf,
1616 struct ice_sq_cd *cd, u16 *rule_id)
1618 struct ice_aqc_add_update_mir_rule *cmd;
1619 struct ice_aq_desc desc;
1620 enum ice_status status;
1621 __le16 *mr_list = NULL;
1624 switch (rule_type) {
1625 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1626 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1627 /* Make sure count and mr_buf are set for these rule_types */
1628 if (!(count && mr_buf))
1629 return ICE_ERR_PARAM;
1631 buf_size = count * sizeof(__le16);
1632 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1634 return ICE_ERR_NO_MEMORY;
1636 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1637 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1638 /* Make sure count and mr_buf are not set for these
1641 if (count || mr_buf)
1642 return ICE_ERR_PARAM;
1645 ice_debug(hw, ICE_DBG_SW,
1646 "Error due to unsupported rule_type %u\n", rule_type);
1647 return ICE_ERR_OUT_OF_RANGE;
1650 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1652 /* Pre-process 'mr_buf' items for add/update of virtual port
1653 * ingress/egress mirroring (but not physical port ingress/egress
1659 for (i = 0; i < count; i++) {
1662 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1664 /* Validate specified VSI number, make sure it is less
1665 * than ICE_MAX_VSI, if not return with error.
1667 if (id >= ICE_MAX_VSI) {
1668 ice_debug(hw, ICE_DBG_SW,
1669 "Error VSI index (%u) out-of-range\n",
1671 ice_free(hw, mr_list);
1672 return ICE_ERR_OUT_OF_RANGE;
1675 /* add VSI to mirror rule */
1678 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1679 else /* remove VSI from mirror rule */
1680 mr_list[i] = CPU_TO_LE16(id);
1684 cmd = &desc.params.add_update_rule;
1685 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1686 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1687 ICE_AQC_RULE_ID_VALID_M);
1688 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1689 cmd->num_entries = CPU_TO_LE16(count);
1690 cmd->dest = CPU_TO_LE16(dest_vsi);
1692 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1694 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1696 ice_free(hw, mr_list);
1702 * ice_aq_delete_mir_rule - delete a mirror rule
1703 * @hw: pointer to the HW struct
1704 * @rule_id: Mirror rule ID (to be deleted)
1705 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1706 * otherwise it is returned to the shared pool
1707 * @cd: pointer to command details structure or NULL
1709 * Delete Mirror Rule (0x261).
1712 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1713 struct ice_sq_cd *cd)
1715 struct ice_aqc_delete_mir_rule *cmd;
1716 struct ice_aq_desc desc;
1718 /* rule_id should be in the range 0...63 */
1719 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1720 return ICE_ERR_OUT_OF_RANGE;
1722 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1724 cmd = &desc.params.del_rule;
1725 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1726 cmd->rule_id = CPU_TO_LE16(rule_id);
1729 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1731 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1735 * ice_aq_alloc_free_vsi_list
1736 * @hw: pointer to the HW struct
1737 * @vsi_list_id: VSI list ID returned or used for lookup
1738 * @lkup_type: switch rule filter lookup type
1739 * @opc: switch rules population command type - pass in the command opcode
1741 * allocates or free a VSI list resource
1743 static enum ice_status
1744 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1745 enum ice_sw_lkup_type lkup_type,
1746 enum ice_adminq_opc opc)
1748 struct ice_aqc_alloc_free_res_elem *sw_buf;
1749 struct ice_aqc_res_elem *vsi_ele;
1750 enum ice_status status;
1753 buf_len = sizeof(*sw_buf);
1754 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1755 ice_malloc(hw, buf_len);
1757 return ICE_ERR_NO_MEMORY;
1758 sw_buf->num_elems = CPU_TO_LE16(1);
1760 if (lkup_type == ICE_SW_LKUP_MAC ||
1761 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1762 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1763 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1764 lkup_type == ICE_SW_LKUP_PROMISC ||
1765 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1766 lkup_type == ICE_SW_LKUP_LAST) {
1767 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1768 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1770 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1772 status = ICE_ERR_PARAM;
1773 goto ice_aq_alloc_free_vsi_list_exit;
1776 if (opc == ice_aqc_opc_free_res)
1777 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1779 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1781 goto ice_aq_alloc_free_vsi_list_exit;
1783 if (opc == ice_aqc_opc_alloc_res) {
1784 vsi_ele = &sw_buf->elem[0];
1785 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1788 ice_aq_alloc_free_vsi_list_exit:
1789 ice_free(hw, sw_buf);
1794 * ice_aq_set_storm_ctrl - Sets storm control configuration
1795 * @hw: pointer to the HW struct
1796 * @bcast_thresh: represents the upper threshold for broadcast storm control
1797 * @mcast_thresh: represents the upper threshold for multicast storm control
1798 * @ctl_bitmask: storm control control knobs
1800 * Sets the storm control configuration (0x0280)
1803 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1806 struct ice_aqc_storm_cfg *cmd;
1807 struct ice_aq_desc desc;
1809 cmd = &desc.params.storm_conf;
1811 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1813 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1814 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1815 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1817 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1821 * ice_aq_get_storm_ctrl - gets storm control configuration
1822 * @hw: pointer to the HW struct
1823 * @bcast_thresh: represents the upper threshold for broadcast storm control
1824 * @mcast_thresh: represents the upper threshold for multicast storm control
1825 * @ctl_bitmask: storm control control knobs
1827 * Gets the storm control configuration (0x0281)
1830 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1833 enum ice_status status;
1834 struct ice_aq_desc desc;
1836 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1838 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1840 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1843 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1846 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1849 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1856 * ice_aq_sw_rules - add/update/remove switch rules
1857 * @hw: pointer to the HW struct
1858 * @rule_list: pointer to switch rule population list
1859 * @rule_list_sz: total size of the rule list in bytes
1860 * @num_rules: number of switch rules in the rule_list
1861 * @opc: switch rules population command type - pass in the command opcode
1862 * @cd: pointer to command details structure or NULL
1864 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1866 static enum ice_status
1867 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1868 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1870 struct ice_aq_desc desc;
1872 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1874 if (opc != ice_aqc_opc_add_sw_rules &&
1875 opc != ice_aqc_opc_update_sw_rules &&
1876 opc != ice_aqc_opc_remove_sw_rules)
1877 return ICE_ERR_PARAM;
1879 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1881 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1882 desc.params.sw_rules.num_rules_fltr_entry_index =
1883 CPU_TO_LE16(num_rules);
1884 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1888 * ice_aq_add_recipe - add switch recipe
1889 * @hw: pointer to the HW struct
1890 * @s_recipe_list: pointer to switch rule population list
1891 * @num_recipes: number of switch recipes in the list
1892 * @cd: pointer to command details structure or NULL
1897 ice_aq_add_recipe(struct ice_hw *hw,
1898 struct ice_aqc_recipe_data_elem *s_recipe_list,
1899 u16 num_recipes, struct ice_sq_cd *cd)
1901 struct ice_aqc_add_get_recipe *cmd;
1902 struct ice_aq_desc desc;
1905 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1906 cmd = &desc.params.add_get_recipe;
1907 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1909 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1910 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1912 buf_size = num_recipes * sizeof(*s_recipe_list);
1914 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1918 * ice_aq_get_recipe - get switch recipe
1919 * @hw: pointer to the HW struct
1920 * @s_recipe_list: pointer to switch rule population list
1921 * @num_recipes: pointer to the number of recipes (input and output)
1922 * @recipe_root: root recipe number of recipe(s) to retrieve
1923 * @cd: pointer to command details structure or NULL
1927 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1928 * On output, *num_recipes will equal the number of entries returned in
1931 * The caller must supply enough space in s_recipe_list to hold all possible
1932 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1935 ice_aq_get_recipe(struct ice_hw *hw,
1936 struct ice_aqc_recipe_data_elem *s_recipe_list,
1937 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1939 struct ice_aqc_add_get_recipe *cmd;
1940 struct ice_aq_desc desc;
1941 enum ice_status status;
1944 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1945 return ICE_ERR_PARAM;
1947 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1948 cmd = &desc.params.add_get_recipe;
1949 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1951 cmd->return_index = CPU_TO_LE16(recipe_root);
1952 cmd->num_sub_recipes = 0;
1954 buf_size = *num_recipes * sizeof(*s_recipe_list);
1956 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1957 /* cppcheck-suppress constArgument */
1958 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1964 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1965 * @hw: pointer to the HW struct
1966 * @profile_id: package profile ID to associate the recipe with
1967 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1968 * @cd: pointer to command details structure or NULL
1969 * Recipe to profile association (0x0291)
1972 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1973 struct ice_sq_cd *cd)
1975 struct ice_aqc_recipe_to_profile *cmd;
1976 struct ice_aq_desc desc;
1978 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1979 cmd = &desc.params.recipe_to_profile;
1980 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1981 cmd->profile_id = CPU_TO_LE16(profile_id);
1982 /* Set the recipe ID bit in the bitmask to let the device know which
1983 * profile we are associating the recipe to
1985 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1986 ICE_NONDMA_TO_NONDMA);
1988 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1992 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1993 * @hw: pointer to the HW struct
1994 * @profile_id: package profile ID to associate the recipe with
1995 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1996 * @cd: pointer to command details structure or NULL
1997 * Associate profile ID with given recipe (0x0293)
2000 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2001 struct ice_sq_cd *cd)
2003 struct ice_aqc_recipe_to_profile *cmd;
2004 struct ice_aq_desc desc;
2005 enum ice_status status;
2007 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2008 cmd = &desc.params.recipe_to_profile;
2009 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2010 cmd->profile_id = CPU_TO_LE16(profile_id);
2012 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2014 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2015 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2021 * ice_alloc_recipe - add recipe resource
2022 * @hw: pointer to the hardware structure
2023 * @rid: recipe ID returned as response to AQ call
2025 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2027 struct ice_aqc_alloc_free_res_elem *sw_buf;
2028 enum ice_status status;
2031 buf_len = sizeof(*sw_buf);
2032 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2034 return ICE_ERR_NO_MEMORY;
2036 sw_buf->num_elems = CPU_TO_LE16(1);
2037 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2038 ICE_AQC_RES_TYPE_S) |
2039 ICE_AQC_RES_TYPE_FLAG_SHARED);
2040 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2041 ice_aqc_opc_alloc_res, NULL);
2043 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2044 ice_free(hw, sw_buf);
2049 /* ice_init_port_info - Initialize port_info with switch configuration data
2050 * @pi: pointer to port_info
2051 * @vsi_port_num: VSI number or port number
2052 * @type: Type of switch element (port or VSI)
2053 * @swid: switch ID of the switch the element is attached to
2054 * @pf_vf_num: PF or VF number
2055 * @is_vf: true if the element is a VF, false otherwise
2058 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2059 u16 swid, u16 pf_vf_num, bool is_vf)
2062 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2063 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2065 pi->pf_vf_num = pf_vf_num;
2067 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2068 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2071 ice_debug(pi->hw, ICE_DBG_SW,
2072 "incorrect VSI/port type received\n");
2077 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2078 * @hw: pointer to the hardware structure
2080 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2082 struct ice_aqc_get_sw_cfg_resp *rbuf;
2083 enum ice_status status;
2090 num_total_ports = 1;
2092 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2093 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2096 return ICE_ERR_NO_MEMORY;
2098 /* Multiple calls to ice_aq_get_sw_cfg may be required
2099 * to get all the switch configuration information. The need
2100 * for additional calls is indicated by ice_aq_get_sw_cfg
2101 * writing a non-zero value in req_desc
2104 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2105 &req_desc, &num_elems, NULL);
2110 for (i = 0; i < num_elems; i++) {
2111 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2112 u16 pf_vf_num, swid, vsi_port_num;
2116 ele = rbuf[i].elements;
2117 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2118 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2120 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2121 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2123 swid = LE16_TO_CPU(ele->swid);
2125 if (LE16_TO_CPU(ele->pf_vf_num) &
2126 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2129 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2130 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2133 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2134 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2135 if (j == num_total_ports) {
2136 ice_debug(hw, ICE_DBG_SW,
2137 "more ports than expected\n");
2138 status = ICE_ERR_CFG;
2141 ice_init_port_info(hw->port_info,
2142 vsi_port_num, res_type, swid,
2150 } while (req_desc && !status);
2153 ice_free(hw, (void *)rbuf);
2158 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2159 * @hw: pointer to the hardware structure
2160 * @fi: filter info structure to fill/update
2162 * This helper function populates the lb_en and lan_en elements of the provided
2163 * ice_fltr_info struct using the switch's type and characteristics of the
2164 * switch rule being configured.
2166 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2168 if ((fi->flag & ICE_FLTR_RX) &&
2169 (fi->fltr_act == ICE_FWD_TO_VSI ||
2170 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2171 fi->lkup_type == ICE_SW_LKUP_LAST)
2175 if ((fi->flag & ICE_FLTR_TX) &&
2176 (fi->fltr_act == ICE_FWD_TO_VSI ||
2177 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2178 fi->fltr_act == ICE_FWD_TO_Q ||
2179 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2180 /* Setting LB for prune actions will result in replicated
2181 * packets to the internal switch that will be dropped.
2183 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2186 /* Set lan_en to TRUE if
2187 * 1. The switch is a VEB AND
2189 * 2.1 The lookup is a directional lookup like ethertype,
2190 * promiscuous, ethertype-MAC, promiscuous-VLAN
2191 * and default-port OR
2192 * 2.2 The lookup is VLAN, OR
2193 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2194 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2198 * The switch is a VEPA.
2200 * In all other cases, the LAN enable has to be set to false.
2203 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2204 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2205 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2206 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2207 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2208 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2209 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2210 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2211 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2212 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2221 * ice_fill_sw_rule - Helper function to fill switch rule structure
2222 * @hw: pointer to the hardware structure
2223 * @f_info: entry containing packet forwarding information
2224 * @s_rule: switch rule structure to be filled in based on mac_entry
2225 * @opc: switch rules population command type - pass in the command opcode
2228 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2229 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2231 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2239 if (opc == ice_aqc_opc_remove_sw_rules) {
2240 s_rule->pdata.lkup_tx_rx.act = 0;
2241 s_rule->pdata.lkup_tx_rx.index =
2242 CPU_TO_LE16(f_info->fltr_rule_id);
2243 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2247 eth_hdr_sz = sizeof(dummy_eth_header);
2248 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2250 /* initialize the ether header with a dummy header */
2251 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2252 ice_fill_sw_info(hw, f_info);
2254 switch (f_info->fltr_act) {
2255 case ICE_FWD_TO_VSI:
2256 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2257 ICE_SINGLE_ACT_VSI_ID_M;
2258 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2259 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2260 ICE_SINGLE_ACT_VALID_BIT;
2262 case ICE_FWD_TO_VSI_LIST:
2263 act |= ICE_SINGLE_ACT_VSI_LIST;
2264 act |= (f_info->fwd_id.vsi_list_id <<
2265 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2266 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2267 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2268 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2269 ICE_SINGLE_ACT_VALID_BIT;
2272 act |= ICE_SINGLE_ACT_TO_Q;
2273 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2274 ICE_SINGLE_ACT_Q_INDEX_M;
2276 case ICE_DROP_PACKET:
2277 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2278 ICE_SINGLE_ACT_VALID_BIT;
2280 case ICE_FWD_TO_QGRP:
2281 q_rgn = f_info->qgrp_size > 0 ?
2282 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2283 act |= ICE_SINGLE_ACT_TO_Q;
2284 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2285 ICE_SINGLE_ACT_Q_INDEX_M;
2286 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2287 ICE_SINGLE_ACT_Q_REGION_M;
2294 act |= ICE_SINGLE_ACT_LB_ENABLE;
2296 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2298 switch (f_info->lkup_type) {
2299 case ICE_SW_LKUP_MAC:
2300 daddr = f_info->l_data.mac.mac_addr;
2302 case ICE_SW_LKUP_VLAN:
2303 vlan_id = f_info->l_data.vlan.vlan_id;
2304 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2305 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2306 act |= ICE_SINGLE_ACT_PRUNE;
2307 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2310 case ICE_SW_LKUP_ETHERTYPE_MAC:
2311 daddr = f_info->l_data.ethertype_mac.mac_addr;
2313 case ICE_SW_LKUP_ETHERTYPE:
2314 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2315 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2317 case ICE_SW_LKUP_MAC_VLAN:
2318 daddr = f_info->l_data.mac_vlan.mac_addr;
2319 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2321 case ICE_SW_LKUP_PROMISC_VLAN:
2322 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2324 case ICE_SW_LKUP_PROMISC:
2325 daddr = f_info->l_data.mac_vlan.mac_addr;
2331 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2332 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2333 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2335 /* Recipe set depending on lookup type */
2336 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2337 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2338 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2341 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2342 ICE_NONDMA_TO_NONDMA);
2344 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2345 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2346 *off = CPU_TO_BE16(vlan_id);
2349 /* Create the switch rule with the final dummy Ethernet header */
2350 if (opc != ice_aqc_opc_update_sw_rules)
2351 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2355 * ice_add_marker_act
2356 * @hw: pointer to the hardware structure
2357 * @m_ent: the management entry for which sw marker needs to be added
2358 * @sw_marker: sw marker to tag the Rx descriptor with
2359 * @l_id: large action resource ID
2361 * Create a large action to hold software marker and update the switch rule
2362 * entry pointed by m_ent with newly created large action
2364 static enum ice_status
2365 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2366 u16 sw_marker, u16 l_id)
2368 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2369 /* For software marker we need 3 large actions
2370 * 1. FWD action: FWD TO VSI or VSI LIST
2371 * 2. GENERIC VALUE action to hold the profile ID
2372 * 3. GENERIC VALUE action to hold the software marker ID
2374 const u16 num_lg_acts = 3;
2375 enum ice_status status;
2381 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2382 return ICE_ERR_PARAM;
2384 /* Create two back-to-back switch rules and submit them to the HW using
2385 * one memory buffer:
2389 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2390 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2391 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2393 return ICE_ERR_NO_MEMORY;
2395 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2397 /* Fill in the first switch rule i.e. large action */
2398 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2399 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2400 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2402 /* First action VSI forwarding or VSI list forwarding depending on how
2405 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2406 m_ent->fltr_info.fwd_id.hw_vsi_id;
2408 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2409 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2410 ICE_LG_ACT_VSI_LIST_ID_M;
2411 if (m_ent->vsi_count > 1)
2412 act |= ICE_LG_ACT_VSI_LIST;
2413 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2415 /* Second action descriptor type */
2416 act = ICE_LG_ACT_GENERIC;
2418 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2419 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2421 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2422 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2424 /* Third action Marker value */
2425 act |= ICE_LG_ACT_GENERIC;
2426 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2427 ICE_LG_ACT_GENERIC_VALUE_M;
2429 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2431 /* call the fill switch rule to fill the lookup Tx Rx structure */
2432 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2433 ice_aqc_opc_update_sw_rules);
2435 /* Update the action to point to the large action ID */
2436 rx_tx->pdata.lkup_tx_rx.act =
2437 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2438 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2439 ICE_SINGLE_ACT_PTR_VAL_M));
2441 /* Use the filter rule ID of the previously created rule with single
2442 * act. Once the update happens, hardware will treat this as large
2445 rx_tx->pdata.lkup_tx_rx.index =
2446 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2448 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2449 ice_aqc_opc_update_sw_rules, NULL);
2451 m_ent->lg_act_idx = l_id;
2452 m_ent->sw_marker_id = sw_marker;
2455 ice_free(hw, lg_act);
2460 * ice_add_counter_act - add/update filter rule with counter action
2461 * @hw: pointer to the hardware structure
2462 * @m_ent: the management entry for which counter needs to be added
2463 * @counter_id: VLAN counter ID returned as part of allocate resource
2464 * @l_id: large action resource ID
2466 static enum ice_status
2467 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2468 u16 counter_id, u16 l_id)
2470 struct ice_aqc_sw_rules_elem *lg_act;
2471 struct ice_aqc_sw_rules_elem *rx_tx;
2472 enum ice_status status;
2473 /* 2 actions will be added while adding a large action counter */
2474 const int num_acts = 2;
2481 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2482 return ICE_ERR_PARAM;
2484 /* Create two back-to-back switch rules and submit them to the HW using
2485 * one memory buffer:
2489 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2490 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2491 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2494 return ICE_ERR_NO_MEMORY;
2496 rx_tx = (struct ice_aqc_sw_rules_elem *)
2497 ((u8 *)lg_act + lg_act_size);
2499 /* Fill in the first switch rule i.e. large action */
2500 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2501 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2502 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2504 /* First action VSI forwarding or VSI list forwarding depending on how
2507 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2508 m_ent->fltr_info.fwd_id.hw_vsi_id;
2510 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2511 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2512 ICE_LG_ACT_VSI_LIST_ID_M;
2513 if (m_ent->vsi_count > 1)
2514 act |= ICE_LG_ACT_VSI_LIST;
2515 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2517 /* Second action counter ID */
2518 act = ICE_LG_ACT_STAT_COUNT;
2519 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2520 ICE_LG_ACT_STAT_COUNT_M;
2521 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2523 /* call the fill switch rule to fill the lookup Tx Rx structure */
2524 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2525 ice_aqc_opc_update_sw_rules);
2527 act = ICE_SINGLE_ACT_PTR;
2528 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2529 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2531 /* Use the filter rule ID of the previously created rule with single
2532 * act. Once the update happens, hardware will treat this as large
2535 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2536 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2538 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2539 ice_aqc_opc_update_sw_rules, NULL);
2541 m_ent->lg_act_idx = l_id;
2542 m_ent->counter_index = counter_id;
2545 ice_free(hw, lg_act);
2550 * ice_create_vsi_list_map
2551 * @hw: pointer to the hardware structure
2552 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2553 * @num_vsi: number of VSI handles in the array
2554 * @vsi_list_id: VSI list ID generated as part of allocate resource
2556 * Helper function to create a new entry of VSI list ID to VSI mapping
2557 * using the given VSI list ID
2559 static struct ice_vsi_list_map_info *
2560 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2563 struct ice_switch_info *sw = hw->switch_info;
2564 struct ice_vsi_list_map_info *v_map;
2567 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2572 v_map->vsi_list_id = vsi_list_id;
2574 for (i = 0; i < num_vsi; i++)
2575 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2577 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2582 * ice_update_vsi_list_rule
2583 * @hw: pointer to the hardware structure
2584 * @vsi_handle_arr: array of VSI handles to form a VSI list
2585 * @num_vsi: number of VSI handles in the array
2586 * @vsi_list_id: VSI list ID generated as part of allocate resource
2587 * @remove: Boolean value to indicate if this is a remove action
2588 * @opc: switch rules population command type - pass in the command opcode
2589 * @lkup_type: lookup type of the filter
2591 * Call AQ command to add a new switch rule or update existing switch rule
2592 * using the given VSI list ID
2594 static enum ice_status
2595 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2596 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2597 enum ice_sw_lkup_type lkup_type)
2599 struct ice_aqc_sw_rules_elem *s_rule;
2600 enum ice_status status;
2606 return ICE_ERR_PARAM;
2608 if (lkup_type == ICE_SW_LKUP_MAC ||
2609 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2610 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2611 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2612 lkup_type == ICE_SW_LKUP_PROMISC ||
2613 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2614 lkup_type == ICE_SW_LKUP_LAST)
2615 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2616 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2617 else if (lkup_type == ICE_SW_LKUP_VLAN)
2618 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2619 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2621 return ICE_ERR_PARAM;
2623 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2624 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2626 return ICE_ERR_NO_MEMORY;
2627 for (i = 0; i < num_vsi; i++) {
2628 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2629 status = ICE_ERR_PARAM;
2632 /* AQ call requires hw_vsi_id(s) */
2633 s_rule->pdata.vsi_list.vsi[i] =
2634 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2637 s_rule->type = CPU_TO_LE16(rule_type);
2638 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2639 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2641 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2644 ice_free(hw, s_rule);
2649 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2650 * @hw: pointer to the HW struct
2651 * @vsi_handle_arr: array of VSI handles to form a VSI list
2652 * @num_vsi: number of VSI handles in the array
2653 * @vsi_list_id: stores the ID of the VSI list to be created
2654 * @lkup_type: switch rule filter's lookup type
2656 static enum ice_status
2657 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2658 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2660 enum ice_status status;
2662 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2663 ice_aqc_opc_alloc_res);
2667 /* Update the newly created VSI list to include the specified VSIs */
2668 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2669 *vsi_list_id, false,
2670 ice_aqc_opc_add_sw_rules, lkup_type);
2674 * ice_create_pkt_fwd_rule
2675 * @hw: pointer to the hardware structure
2676 * @recp_list: corresponding filter management list
2677 * @f_entry: entry containing packet forwarding information
2679 * Create switch rule with given filter information and add an entry
2680 * to the corresponding filter management list to track this switch rule
2683 static enum ice_status
2684 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2685 struct ice_fltr_list_entry *f_entry)
2687 struct ice_fltr_mgmt_list_entry *fm_entry;
2688 struct ice_aqc_sw_rules_elem *s_rule;
2689 enum ice_status status;
2691 s_rule = (struct ice_aqc_sw_rules_elem *)
2692 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2694 return ICE_ERR_NO_MEMORY;
2695 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2696 ice_malloc(hw, sizeof(*fm_entry));
2698 status = ICE_ERR_NO_MEMORY;
2699 goto ice_create_pkt_fwd_rule_exit;
2702 fm_entry->fltr_info = f_entry->fltr_info;
2704 /* Initialize all the fields for the management entry */
2705 fm_entry->vsi_count = 1;
2706 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2707 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2708 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2710 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2711 ice_aqc_opc_add_sw_rules);
2713 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2714 ice_aqc_opc_add_sw_rules, NULL);
2716 ice_free(hw, fm_entry);
2717 goto ice_create_pkt_fwd_rule_exit;
2720 f_entry->fltr_info.fltr_rule_id =
2721 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2722 fm_entry->fltr_info.fltr_rule_id =
2723 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2725 /* The book keeping entries will get removed when base driver
2726 * calls remove filter AQ command
2728 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2730 ice_create_pkt_fwd_rule_exit:
2731 ice_free(hw, s_rule);
2736 * ice_update_pkt_fwd_rule
2737 * @hw: pointer to the hardware structure
2738 * @f_info: filter information for switch rule
2740 * Call AQ command to update a previously created switch rule with a
2743 static enum ice_status
2744 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2746 struct ice_aqc_sw_rules_elem *s_rule;
2747 enum ice_status status;
2749 s_rule = (struct ice_aqc_sw_rules_elem *)
2750 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2752 return ICE_ERR_NO_MEMORY;
2754 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2756 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2758 /* Update switch rule with new rule set to forward VSI list */
2759 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2760 ice_aqc_opc_update_sw_rules, NULL);
2762 ice_free(hw, s_rule);
2767 * ice_update_sw_rule_bridge_mode
2768 * @hw: pointer to the HW struct
2770 * Updates unicast switch filter rules based on VEB/VEPA mode
2772 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2774 struct ice_switch_info *sw = hw->switch_info;
2775 struct ice_fltr_mgmt_list_entry *fm_entry;
2776 enum ice_status status = ICE_SUCCESS;
2777 struct LIST_HEAD_TYPE *rule_head;
2778 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2780 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2781 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2783 ice_acquire_lock(rule_lock);
2784 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2786 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2787 u8 *addr = fi->l_data.mac.mac_addr;
2789 /* Update unicast Tx rules to reflect the selected
2792 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2793 (fi->fltr_act == ICE_FWD_TO_VSI ||
2794 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2795 fi->fltr_act == ICE_FWD_TO_Q ||
2796 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2797 status = ice_update_pkt_fwd_rule(hw, fi);
2803 ice_release_lock(rule_lock);
2809 * ice_add_update_vsi_list
2810 * @hw: pointer to the hardware structure
2811 * @m_entry: pointer to current filter management list entry
2812 * @cur_fltr: filter information from the book keeping entry
2813 * @new_fltr: filter information with the new VSI to be added
2815 * Call AQ command to add or update previously created VSI list with new VSI.
2817 * Helper function to do book keeping associated with adding filter information
2818 * The algorithm to do the book keeping is described below :
2819 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2820 * if only one VSI has been added till now
2821 * Allocate a new VSI list and add two VSIs
2822 * to this list using switch rule command
2823 * Update the previously created switch rule with the
2824 * newly created VSI list ID
2825 * if a VSI list was previously created
2826 * Add the new VSI to the previously created VSI list set
2827 * using the update switch rule command
2829 static enum ice_status
2830 ice_add_update_vsi_list(struct ice_hw *hw,
2831 struct ice_fltr_mgmt_list_entry *m_entry,
2832 struct ice_fltr_info *cur_fltr,
2833 struct ice_fltr_info *new_fltr)
2835 enum ice_status status = ICE_SUCCESS;
2836 u16 vsi_list_id = 0;
2838 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2839 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2840 return ICE_ERR_NOT_IMPL;
2842 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2843 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2844 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2845 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2846 return ICE_ERR_NOT_IMPL;
2848 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2849 /* Only one entry existed in the mapping and it was not already
2850 * a part of a VSI list. So, create a VSI list with the old and
2853 struct ice_fltr_info tmp_fltr;
2854 u16 vsi_handle_arr[2];
2856 /* A rule already exists with the new VSI being added */
2857 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2858 return ICE_ERR_ALREADY_EXISTS;
2860 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2861 vsi_handle_arr[1] = new_fltr->vsi_handle;
2862 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2864 new_fltr->lkup_type);
2868 tmp_fltr = *new_fltr;
2869 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2870 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2871 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2872 /* Update the previous switch rule of "MAC forward to VSI" to
2873 * "MAC fwd to VSI list"
2875 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2879 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2880 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2881 m_entry->vsi_list_info =
2882 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2885 /* If this entry was large action then the large action needs
2886 * to be updated to point to FWD to VSI list
2888 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2890 ice_add_marker_act(hw, m_entry,
2891 m_entry->sw_marker_id,
2892 m_entry->lg_act_idx);
2894 u16 vsi_handle = new_fltr->vsi_handle;
2895 enum ice_adminq_opc opcode;
2897 if (!m_entry->vsi_list_info)
2900 /* A rule already exists with the new VSI being added */
2901 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2904 /* Update the previously created VSI list set with
2905 * the new VSI ID passed in
2907 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2908 opcode = ice_aqc_opc_update_sw_rules;
2910 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2911 vsi_list_id, false, opcode,
2912 new_fltr->lkup_type);
2913 /* update VSI list mapping info with new VSI ID */
2915 ice_set_bit(vsi_handle,
2916 m_entry->vsi_list_info->vsi_map);
2919 m_entry->vsi_count++;
2924 * ice_find_rule_entry - Search a rule entry
2925 * @list_head: head of rule list
2926 * @f_info: rule information
2928 * Helper function to search for a given rule entry
2929 * Returns pointer to entry storing the rule if found
2931 static struct ice_fltr_mgmt_list_entry *
2932 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2933 struct ice_fltr_info *f_info)
2935 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2937 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2939 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2940 sizeof(f_info->l_data)) &&
2941 f_info->flag == list_itr->fltr_info.flag) {
2950 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2951 * @recp_list: VSI lists needs to be searched
2952 * @vsi_handle: VSI handle to be found in VSI list
2953 * @vsi_list_id: VSI list ID found containing vsi_handle
2955 * Helper function to search a VSI list with single entry containing given VSI
2956 * handle element. This can be extended further to search VSI list with more
2957 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2959 static struct ice_vsi_list_map_info *
2960 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2963 struct ice_vsi_list_map_info *map_info = NULL;
2964 struct LIST_HEAD_TYPE *list_head;
2966 list_head = &recp_list->filt_rules;
2967 if (recp_list->adv_rule) {
2968 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2970 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2971 ice_adv_fltr_mgmt_list_entry,
2973 if (list_itr->vsi_list_info) {
2974 map_info = list_itr->vsi_list_info;
2975 if (ice_is_bit_set(map_info->vsi_map,
2977 *vsi_list_id = map_info->vsi_list_id;
2983 struct ice_fltr_mgmt_list_entry *list_itr;
2985 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2986 ice_fltr_mgmt_list_entry,
2988 if (list_itr->vsi_count == 1 &&
2989 list_itr->vsi_list_info) {
2990 map_info = list_itr->vsi_list_info;
2991 if (ice_is_bit_set(map_info->vsi_map,
2993 *vsi_list_id = map_info->vsi_list_id;
3003 * ice_add_rule_internal - add rule for a given lookup type
3004 * @hw: pointer to the hardware structure
3005 * @recp_list: recipe list for which rule has to be added
3006 * @lport: logic port number on which function add rule
3007 * @f_entry: structure containing MAC forwarding information
3009 * Adds or updates the rule lists for a given recipe
3011 static enum ice_status
3012 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3013 u8 lport, struct ice_fltr_list_entry *f_entry)
3015 struct ice_fltr_info *new_fltr, *cur_fltr;
3016 struct ice_fltr_mgmt_list_entry *m_entry;
3017 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3018 enum ice_status status = ICE_SUCCESS;
3020 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3021 return ICE_ERR_PARAM;
3023 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3024 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3025 f_entry->fltr_info.fwd_id.hw_vsi_id =
3026 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3028 rule_lock = &recp_list->filt_rule_lock;
3030 ice_acquire_lock(rule_lock);
3031 new_fltr = &f_entry->fltr_info;
3032 if (new_fltr->flag & ICE_FLTR_RX)
3033 new_fltr->src = lport;
3034 else if (new_fltr->flag & ICE_FLTR_TX)
3036 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3038 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3040 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3041 goto exit_add_rule_internal;
3044 cur_fltr = &m_entry->fltr_info;
3045 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3047 exit_add_rule_internal:
3048 ice_release_lock(rule_lock);
3053 * ice_remove_vsi_list_rule
3054 * @hw: pointer to the hardware structure
3055 * @vsi_list_id: VSI list ID generated as part of allocate resource
3056 * @lkup_type: switch rule filter lookup type
3058 * The VSI list should be emptied before this function is called to remove the
3061 static enum ice_status
3062 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3063 enum ice_sw_lkup_type lkup_type)
3065 struct ice_aqc_sw_rules_elem *s_rule;
3066 enum ice_status status;
3069 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3070 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3072 return ICE_ERR_NO_MEMORY;
3074 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3075 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3077 /* Free the vsi_list resource that we allocated. It is assumed that the
3078 * list is empty at this point.
3080 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3081 ice_aqc_opc_free_res);
3083 ice_free(hw, s_rule);
3088 * ice_rem_update_vsi_list
3089 * @hw: pointer to the hardware structure
3090 * @vsi_handle: VSI handle of the VSI to remove
3091 * @fm_list: filter management entry for which the VSI list management needs to
3094 static enum ice_status
3095 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3096 struct ice_fltr_mgmt_list_entry *fm_list)
3098 enum ice_sw_lkup_type lkup_type;
3099 enum ice_status status = ICE_SUCCESS;
3102 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3103 fm_list->vsi_count == 0)
3104 return ICE_ERR_PARAM;
3106 /* A rule with the VSI being removed does not exist */
3107 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3108 return ICE_ERR_DOES_NOT_EXIST;
3110 lkup_type = fm_list->fltr_info.lkup_type;
3111 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3112 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3113 ice_aqc_opc_update_sw_rules,
3118 fm_list->vsi_count--;
3119 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3121 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3122 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3123 struct ice_vsi_list_map_info *vsi_list_info =
3124 fm_list->vsi_list_info;
3127 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3129 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3130 return ICE_ERR_OUT_OF_RANGE;
3132 /* Make sure VSI list is empty before removing it below */
3133 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3135 ice_aqc_opc_update_sw_rules,
3140 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3141 tmp_fltr_info.fwd_id.hw_vsi_id =
3142 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3143 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3144 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3146 ice_debug(hw, ICE_DBG_SW,
3147 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3148 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3152 fm_list->fltr_info = tmp_fltr_info;
3155 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3156 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3157 struct ice_vsi_list_map_info *vsi_list_info =
3158 fm_list->vsi_list_info;
3160 /* Remove the VSI list since it is no longer used */
3161 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3163 ice_debug(hw, ICE_DBG_SW,
3164 "Failed to remove VSI list %d, error %d\n",
3165 vsi_list_id, status);
3169 LIST_DEL(&vsi_list_info->list_entry);
3170 ice_free(hw, vsi_list_info);
3171 fm_list->vsi_list_info = NULL;
3178 * ice_remove_rule_internal - Remove a filter rule of a given type
3180 * @hw: pointer to the hardware structure
3181 * @recp_list: recipe list for which the rule needs to removed
3182 * @f_entry: rule entry containing filter information
3184 static enum ice_status
3185 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3186 struct ice_fltr_list_entry *f_entry)
3188 struct ice_fltr_mgmt_list_entry *list_elem;
3189 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3190 enum ice_status status = ICE_SUCCESS;
3191 bool remove_rule = false;
3194 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3195 return ICE_ERR_PARAM;
3196 f_entry->fltr_info.fwd_id.hw_vsi_id =
3197 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3199 rule_lock = &recp_list->filt_rule_lock;
3200 ice_acquire_lock(rule_lock);
3201 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3202 &f_entry->fltr_info);
3204 status = ICE_ERR_DOES_NOT_EXIST;
3208 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3210 } else if (!list_elem->vsi_list_info) {
3211 status = ICE_ERR_DOES_NOT_EXIST;
3213 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3214 /* a ref_cnt > 1 indicates that the vsi_list is being
3215 * shared by multiple rules. Decrement the ref_cnt and
3216 * remove this rule, but do not modify the list, as it
3217 * is in-use by other rules.
3219 list_elem->vsi_list_info->ref_cnt--;
3222 /* a ref_cnt of 1 indicates the vsi_list is only used
3223 * by one rule. However, the original removal request is only
3224 * for a single VSI. Update the vsi_list first, and only
3225 * remove the rule if there are no further VSIs in this list.
3227 vsi_handle = f_entry->fltr_info.vsi_handle;
3228 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3231 /* if VSI count goes to zero after updating the VSI list */
3232 if (list_elem->vsi_count == 0)
3237 /* Remove the lookup rule */
3238 struct ice_aqc_sw_rules_elem *s_rule;
3240 s_rule = (struct ice_aqc_sw_rules_elem *)
3241 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3243 status = ICE_ERR_NO_MEMORY;
3247 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3248 ice_aqc_opc_remove_sw_rules);
3250 status = ice_aq_sw_rules(hw, s_rule,
3251 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3252 ice_aqc_opc_remove_sw_rules, NULL);
3254 /* Remove a book keeping from the list */
3255 ice_free(hw, s_rule);
3260 LIST_DEL(&list_elem->list_entry);
3261 ice_free(hw, list_elem);
3264 ice_release_lock(rule_lock);
3269 * ice_aq_get_res_alloc - get allocated resources
3270 * @hw: pointer to the HW struct
3271 * @num_entries: pointer to u16 to store the number of resource entries returned
3272 * @buf: pointer to user-supplied buffer
3273 * @buf_size: size of buff
3274 * @cd: pointer to command details structure or NULL
3276 * The user-supplied buffer must be large enough to store the resource
3277 * information for all resource types. Each resource type is an
3278 * ice_aqc_get_res_resp_data_elem structure.
3281 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3282 u16 buf_size, struct ice_sq_cd *cd)
3284 struct ice_aqc_get_res_alloc *resp;
3285 enum ice_status status;
3286 struct ice_aq_desc desc;
3289 return ICE_ERR_BAD_PTR;
3291 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3292 return ICE_ERR_INVAL_SIZE;
3294 resp = &desc.params.get_res;
3296 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3297 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3299 if (!status && num_entries)
3300 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3306 * ice_aq_get_res_descs - get allocated resource descriptors
3307 * @hw: pointer to the hardware structure
3308 * @num_entries: number of resource entries in buffer
3309 * @buf: Indirect buffer to hold data parameters and response
3310 * @buf_size: size of buffer for indirect commands
3311 * @res_type: resource type
3312 * @res_shared: is resource shared
3313 * @desc_id: input - first desc ID to start; output - next desc ID
3314 * @cd: pointer to command details structure or NULL
3317 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3318 struct ice_aqc_get_allocd_res_desc_resp *buf,
3319 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3320 struct ice_sq_cd *cd)
3322 struct ice_aqc_get_allocd_res_desc *cmd;
3323 struct ice_aq_desc desc;
3324 enum ice_status status;
3326 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3328 cmd = &desc.params.get_res_desc;
3331 return ICE_ERR_PARAM;
3333 if (buf_size != (num_entries * sizeof(*buf)))
3334 return ICE_ERR_PARAM;
3336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3338 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3339 ICE_AQC_RES_TYPE_M) | (res_shared ?
3340 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3341 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3343 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3345 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3351 * ice_add_mac_rule - Add a MAC address based filter rule
3352 * @hw: pointer to the hardware structure
3353 * @m_list: list of MAC addresses and forwarding information
3354 * @sw: pointer to switch info struct for which function add rule
3355 * @lport: logic port number on which function add rule
3357 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3358 * multiple unicast addresses, the function assumes that all the
3359 * addresses are unique in a given add_mac call. It doesn't
3360 * check for duplicates in this case, removing duplicates from a given
3361 * list should be taken care of in the caller of this function.
3363 static enum ice_status
3364 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3365 struct ice_switch_info *sw, u8 lport)
3367 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3368 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3369 struct ice_fltr_list_entry *m_list_itr;
3370 struct LIST_HEAD_TYPE *rule_head;
3371 u16 total_elem_left, s_rule_size;
3372 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3373 enum ice_status status = ICE_SUCCESS;
3374 u16 num_unicast = 0;
3378 rule_lock = &recp_list->filt_rule_lock;
3379 rule_head = &recp_list->filt_rules;
3381 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3383 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3387 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3388 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3389 if (!ice_is_vsi_valid(hw, vsi_handle))
3390 return ICE_ERR_PARAM;
3391 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3392 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3393 /* update the src in case it is VSI num */
3394 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3395 return ICE_ERR_PARAM;
3396 m_list_itr->fltr_info.src = hw_vsi_id;
3397 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3398 IS_ZERO_ETHER_ADDR(add))
3399 return ICE_ERR_PARAM;
3400 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3401 /* Don't overwrite the unicast address */
3402 ice_acquire_lock(rule_lock);
3403 if (ice_find_rule_entry(rule_head,
3404 &m_list_itr->fltr_info)) {
3405 ice_release_lock(rule_lock);
3406 return ICE_ERR_ALREADY_EXISTS;
3408 ice_release_lock(rule_lock);
3410 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3411 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3412 m_list_itr->status =
3413 ice_add_rule_internal(hw, recp_list, lport,
3415 if (m_list_itr->status)
3416 return m_list_itr->status;
3420 ice_acquire_lock(rule_lock);
3421 /* Exit if no suitable entries were found for adding bulk switch rule */
3423 status = ICE_SUCCESS;
3424 goto ice_add_mac_exit;
3427 /* Allocate switch rule buffer for the bulk update for unicast */
3428 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3429 s_rule = (struct ice_aqc_sw_rules_elem *)
3430 ice_calloc(hw, num_unicast, s_rule_size);
3432 status = ICE_ERR_NO_MEMORY;
3433 goto ice_add_mac_exit;
3437 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3439 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3440 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3442 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3443 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3444 ice_aqc_opc_add_sw_rules);
3445 r_iter = (struct ice_aqc_sw_rules_elem *)
3446 ((u8 *)r_iter + s_rule_size);
3450 /* Call AQ bulk switch rule update for all unicast addresses */
3452 /* Call AQ switch rule in AQ_MAX chunk */
3453 for (total_elem_left = num_unicast; total_elem_left > 0;
3454 total_elem_left -= elem_sent) {
3455 struct ice_aqc_sw_rules_elem *entry = r_iter;
3457 elem_sent = MIN_T(u8, total_elem_left,
3458 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3459 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3460 elem_sent, ice_aqc_opc_add_sw_rules,
3463 goto ice_add_mac_exit;
3464 r_iter = (struct ice_aqc_sw_rules_elem *)
3465 ((u8 *)r_iter + (elem_sent * s_rule_size));
3468 /* Fill up rule ID based on the value returned from FW */
3470 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3472 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3473 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3474 struct ice_fltr_mgmt_list_entry *fm_entry;
3476 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3477 f_info->fltr_rule_id =
3478 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3479 f_info->fltr_act = ICE_FWD_TO_VSI;
3480 /* Create an entry to track this MAC address */
3481 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3482 ice_malloc(hw, sizeof(*fm_entry));
3484 status = ICE_ERR_NO_MEMORY;
3485 goto ice_add_mac_exit;
3487 fm_entry->fltr_info = *f_info;
3488 fm_entry->vsi_count = 1;
3489 /* The book keeping entries will get removed when
3490 * base driver calls remove filter AQ command
3493 LIST_ADD(&fm_entry->list_entry, rule_head);
3494 r_iter = (struct ice_aqc_sw_rules_elem *)
3495 ((u8 *)r_iter + s_rule_size);
3500 ice_release_lock(rule_lock);
3502 ice_free(hw, s_rule);
3507 * ice_add_mac - Add a MAC address based filter rule
3508 * @hw: pointer to the hardware structure
3509 * @m_list: list of MAC addresses and forwarding information
3511 * Function add MAC rule for logical port from HW struct
3513 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3516 return ICE_ERR_PARAM;
3518 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3519 hw->port_info->lport);
3523 * ice_add_vlan_internal - Add one VLAN based filter rule
3524 * @hw: pointer to the hardware structure
3525 * @recp_list: recipe list for which rule has to be added
3526 * @f_entry: filter entry containing one VLAN information
3528 static enum ice_status
3529 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3530 struct ice_fltr_list_entry *f_entry)
3532 struct ice_fltr_mgmt_list_entry *v_list_itr;
3533 struct ice_fltr_info *new_fltr, *cur_fltr;
3534 enum ice_sw_lkup_type lkup_type;
3535 u16 vsi_list_id = 0, vsi_handle;
3536 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3537 enum ice_status status = ICE_SUCCESS;
3539 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3540 return ICE_ERR_PARAM;
3542 f_entry->fltr_info.fwd_id.hw_vsi_id =
3543 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3544 new_fltr = &f_entry->fltr_info;
3546 /* VLAN ID should only be 12 bits */
3547 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3548 return ICE_ERR_PARAM;
3550 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3551 return ICE_ERR_PARAM;
3553 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3554 lkup_type = new_fltr->lkup_type;
3555 vsi_handle = new_fltr->vsi_handle;
3556 rule_lock = &recp_list->filt_rule_lock;
3557 ice_acquire_lock(rule_lock);
3558 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3560 struct ice_vsi_list_map_info *map_info = NULL;
3562 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3563 /* All VLAN pruning rules use a VSI list. Check if
3564 * there is already a VSI list containing VSI that we
3565 * want to add. If found, use the same vsi_list_id for
3566 * this new VLAN rule or else create a new list.
3568 map_info = ice_find_vsi_list_entry(recp_list,
3572 status = ice_create_vsi_list_rule(hw,
3580 /* Convert the action to forwarding to a VSI list. */
3581 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3582 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3585 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3587 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3590 status = ICE_ERR_DOES_NOT_EXIST;
3593 /* reuse VSI list for new rule and increment ref_cnt */
3595 v_list_itr->vsi_list_info = map_info;
3596 map_info->ref_cnt++;
3598 v_list_itr->vsi_list_info =
3599 ice_create_vsi_list_map(hw, &vsi_handle,
3603 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3604 /* Update existing VSI list to add new VSI ID only if it used
3607 cur_fltr = &v_list_itr->fltr_info;
3608 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3611 /* If VLAN rule exists and VSI list being used by this rule is
3612 * referenced by more than 1 VLAN rule. Then create a new VSI
3613 * list appending previous VSI with new VSI and update existing
3614 * VLAN rule to point to new VSI list ID
3616 struct ice_fltr_info tmp_fltr;
3617 u16 vsi_handle_arr[2];
3620 /* Current implementation only supports reusing VSI list with
3621 * one VSI count. We should never hit below condition
3623 if (v_list_itr->vsi_count > 1 &&
3624 v_list_itr->vsi_list_info->ref_cnt > 1) {
3625 ice_debug(hw, ICE_DBG_SW,
3626 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3627 status = ICE_ERR_CFG;
3632 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3635 /* A rule already exists with the new VSI being added */
3636 if (cur_handle == vsi_handle) {
3637 status = ICE_ERR_ALREADY_EXISTS;
3641 vsi_handle_arr[0] = cur_handle;
3642 vsi_handle_arr[1] = vsi_handle;
3643 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3644 &vsi_list_id, lkup_type);
3648 tmp_fltr = v_list_itr->fltr_info;
3649 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3650 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3651 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3652 /* Update the previous switch rule to a new VSI list which
3653 * includes current VSI that is requested
3655 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3659 /* before overriding VSI list map info. decrement ref_cnt of
3662 v_list_itr->vsi_list_info->ref_cnt--;
3664 /* now update to newly created list */
3665 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3666 v_list_itr->vsi_list_info =
3667 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3669 v_list_itr->vsi_count++;
3673 ice_release_lock(rule_lock);
3678 * ice_add_vlan_rule - Add VLAN based filter rule
3679 * @hw: pointer to the hardware structure
3680 * @v_list: list of VLAN entries and forwarding information
3681 * @sw: pointer to switch info struct for which function add rule
3683 static enum ice_status
3684 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3685 struct ice_switch_info *sw)
3687 struct ice_fltr_list_entry *v_list_itr;
3688 struct ice_sw_recipe *recp_list;
3690 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3691 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3693 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3694 return ICE_ERR_PARAM;
3695 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3696 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3698 if (v_list_itr->status)
3699 return v_list_itr->status;
3705 * ice_add_vlan - Add a VLAN based filter rule
3706 * @hw: pointer to the hardware structure
3707 * @v_list: list of VLAN and forwarding information
3709 * Function add VLAN rule for logical port from HW struct
3711 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3714 return ICE_ERR_PARAM;
3716 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3720 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3721 * @hw: pointer to the hardware structure
3722 * @mv_list: list of MAC and VLAN filters
3723 * @sw: pointer to switch info struct for which function add rule
3724 * @lport: logic port number on which function add rule
3726 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3727 * pruning bits enabled, then it is the responsibility of the caller to make
3728 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3729 * VLAN won't be received on that VSI otherwise.
3731 static enum ice_status
3732 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3733 struct ice_switch_info *sw, u8 lport)
3735 struct ice_fltr_list_entry *mv_list_itr;
3736 struct ice_sw_recipe *recp_list;
3738 if (!mv_list || !hw)
3739 return ICE_ERR_PARAM;
3741 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3742 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3744 enum ice_sw_lkup_type l_type =
3745 mv_list_itr->fltr_info.lkup_type;
3747 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3748 return ICE_ERR_PARAM;
3749 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3750 mv_list_itr->status =
3751 ice_add_rule_internal(hw, recp_list, lport,
3753 if (mv_list_itr->status)
3754 return mv_list_itr->status;
3760 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3761 * @hw: pointer to the hardware structure
3762 * @mv_list: list of MAC VLAN addresses and forwarding information
3764 * Function add MAC VLAN rule for logical port from HW struct
3767 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3769 if (!mv_list || !hw)
3770 return ICE_ERR_PARAM;
3772 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3773 hw->port_info->lport);
3777 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3778 * @hw: pointer to the hardware structure
3779 * @em_list: list of ether type MAC filter, MAC is optional
3780 * @sw: pointer to switch info struct for which function add rule
3781 * @lport: logic port number on which function add rule
3783 * This function requires the caller to populate the entries in
3784 * the filter list with the necessary fields (including flags to
3785 * indicate Tx or Rx rules).
3787 static enum ice_status
3788 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3789 struct ice_switch_info *sw, u8 lport)
3791 struct ice_fltr_list_entry *em_list_itr;
3793 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3795 struct ice_sw_recipe *recp_list;
3796 enum ice_sw_lkup_type l_type;
3798 l_type = em_list_itr->fltr_info.lkup_type;
3799 recp_list = &sw->recp_list[l_type];
3801 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3802 l_type != ICE_SW_LKUP_ETHERTYPE)
3803 return ICE_ERR_PARAM;
3805 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3808 if (em_list_itr->status)
3809 return em_list_itr->status;
3815 * ice_add_eth_mac - Add a ethertype based filter rule
3816 * @hw: pointer to the hardware structure
3817 * @em_list: list of ethertype and forwarding information
3819 * Function add ethertype rule for logical port from HW struct
3822 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3824 if (!em_list || !hw)
3825 return ICE_ERR_PARAM;
3827 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3828 hw->port_info->lport);
3832 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3833 * @hw: pointer to the hardware structure
3834 * @em_list: list of ethertype or ethertype MAC entries
3835 * @sw: pointer to switch info struct for which function add rule
3837 static enum ice_status
3838 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3839 struct ice_switch_info *sw)
3841 struct ice_fltr_list_entry *em_list_itr, *tmp;
3843 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3845 struct ice_sw_recipe *recp_list;
3846 enum ice_sw_lkup_type l_type;
3848 l_type = em_list_itr->fltr_info.lkup_type;
3850 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3851 l_type != ICE_SW_LKUP_ETHERTYPE)
3852 return ICE_ERR_PARAM;
3854 recp_list = &sw->recp_list[l_type];
3855 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3857 if (em_list_itr->status)
3858 return em_list_itr->status;
3864 * ice_remove_eth_mac - remove a ethertype based filter rule
3865 * @hw: pointer to the hardware structure
3866 * @em_list: list of ethertype and forwarding information
3870 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3872 if (!em_list || !hw)
3873 return ICE_ERR_PARAM;
3875 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3879 * ice_rem_sw_rule_info
3880 * @hw: pointer to the hardware structure
3881 * @rule_head: pointer to the switch list structure that we want to delete
3884 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3886 if (!LIST_EMPTY(rule_head)) {
3887 struct ice_fltr_mgmt_list_entry *entry;
3888 struct ice_fltr_mgmt_list_entry *tmp;
3890 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3891 ice_fltr_mgmt_list_entry, list_entry) {
3892 LIST_DEL(&entry->list_entry);
3893 ice_free(hw, entry);
3899 * ice_rem_adv_rule_info
3900 * @hw: pointer to the hardware structure
3901 * @rule_head: pointer to the switch list structure that we want to delete
3904 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3906 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3907 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3909 if (LIST_EMPTY(rule_head))
3912 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3913 ice_adv_fltr_mgmt_list_entry, list_entry) {
3914 LIST_DEL(&lst_itr->list_entry);
3915 ice_free(hw, lst_itr->lkups);
3916 ice_free(hw, lst_itr);
3921 * ice_rem_all_sw_rules_info
3922 * @hw: pointer to the hardware structure
3924 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3926 struct ice_switch_info *sw = hw->switch_info;
3929 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3930 struct LIST_HEAD_TYPE *rule_head;
3932 rule_head = &sw->recp_list[i].filt_rules;
3933 if (!sw->recp_list[i].adv_rule)
3934 ice_rem_sw_rule_info(hw, rule_head);
3936 ice_rem_adv_rule_info(hw, rule_head);
3941 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3942 * @pi: pointer to the port_info structure
3943 * @vsi_handle: VSI handle to set as default
3944 * @set: true to add the above mentioned switch rule, false to remove it
3945 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3947 * add filter rule to set/unset given VSI as default VSI for the switch
3948 * (represented by swid)
3951 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3954 struct ice_aqc_sw_rules_elem *s_rule;
3955 struct ice_fltr_info f_info;
3956 struct ice_hw *hw = pi->hw;
3957 enum ice_adminq_opc opcode;
3958 enum ice_status status;
3962 if (!ice_is_vsi_valid(hw, vsi_handle))
3963 return ICE_ERR_PARAM;
3964 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3966 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3967 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3968 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3970 return ICE_ERR_NO_MEMORY;
3972 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3974 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3975 f_info.flag = direction;
3976 f_info.fltr_act = ICE_FWD_TO_VSI;
3977 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3979 if (f_info.flag & ICE_FLTR_RX) {
3980 f_info.src = pi->lport;
3981 f_info.src_id = ICE_SRC_ID_LPORT;
3983 f_info.fltr_rule_id =
3984 pi->dflt_rx_vsi_rule_id;
3985 } else if (f_info.flag & ICE_FLTR_TX) {
3986 f_info.src_id = ICE_SRC_ID_VSI;
3987 f_info.src = hw_vsi_id;
3989 f_info.fltr_rule_id =
3990 pi->dflt_tx_vsi_rule_id;
3994 opcode = ice_aqc_opc_add_sw_rules;
3996 opcode = ice_aqc_opc_remove_sw_rules;
3998 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4000 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4001 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4004 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4006 if (f_info.flag & ICE_FLTR_TX) {
4007 pi->dflt_tx_vsi_num = hw_vsi_id;
4008 pi->dflt_tx_vsi_rule_id = index;
4009 } else if (f_info.flag & ICE_FLTR_RX) {
4010 pi->dflt_rx_vsi_num = hw_vsi_id;
4011 pi->dflt_rx_vsi_rule_id = index;
4014 if (f_info.flag & ICE_FLTR_TX) {
4015 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4016 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4017 } else if (f_info.flag & ICE_FLTR_RX) {
4018 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4019 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4024 ice_free(hw, s_rule);
4029 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4030 * @list_head: head of rule list
4031 * @f_info: rule information
4033 * Helper function to search for a unicast rule entry - this is to be used
4034 * to remove unicast MAC filter that is not shared with other VSIs on the
4037 * Returns pointer to entry storing the rule if found
4039 static struct ice_fltr_mgmt_list_entry *
4040 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4041 struct ice_fltr_info *f_info)
4043 struct ice_fltr_mgmt_list_entry *list_itr;
4045 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4047 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4048 sizeof(f_info->l_data)) &&
4049 f_info->fwd_id.hw_vsi_id ==
4050 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4051 f_info->flag == list_itr->fltr_info.flag)
4058 * ice_remove_mac_rule - remove a MAC based filter rule
4059 * @hw: pointer to the hardware structure
4060 * @m_list: list of MAC addresses and forwarding information
4061 * @recp_list: list from which function remove MAC address
4063 * This function removes either a MAC filter rule or a specific VSI from a
4064 * VSI list for a multicast MAC address.
4066 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4067 * ice_add_mac. Caller should be aware that this call will only work if all
4068 * the entries passed into m_list were added previously. It will not attempt to
4069 * do a partial remove of entries that were found.
4071 static enum ice_status
4072 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4073 struct ice_sw_recipe *recp_list)
4075 struct ice_fltr_list_entry *list_itr, *tmp;
4076 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4079 return ICE_ERR_PARAM;
4081 rule_lock = &recp_list->filt_rule_lock;
4082 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4084 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4085 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4088 if (l_type != ICE_SW_LKUP_MAC)
4089 return ICE_ERR_PARAM;
4091 vsi_handle = list_itr->fltr_info.vsi_handle;
4092 if (!ice_is_vsi_valid(hw, vsi_handle))
4093 return ICE_ERR_PARAM;
4095 list_itr->fltr_info.fwd_id.hw_vsi_id =
4096 ice_get_hw_vsi_num(hw, vsi_handle);
4097 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4098 /* Don't remove the unicast address that belongs to
4099 * another VSI on the switch, since it is not being
4102 ice_acquire_lock(rule_lock);
4103 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4104 &list_itr->fltr_info)) {
4105 ice_release_lock(rule_lock);
4106 return ICE_ERR_DOES_NOT_EXIST;
4108 ice_release_lock(rule_lock);
4110 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4112 if (list_itr->status)
4113 return list_itr->status;
4119 * ice_remove_mac - remove a MAC address based filter rule
4120 * @hw: pointer to the hardware structure
4121 * @m_list: list of MAC addresses and forwarding information
4124 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4126 struct ice_sw_recipe *recp_list;
4128 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4129 return ice_remove_mac_rule(hw, m_list, recp_list);
4133 * ice_remove_vlan_rule - Remove VLAN based filter rule
4134 * @hw: pointer to the hardware structure
4135 * @v_list: list of VLAN entries and forwarding information
4136 * @recp_list: list from which function remove VLAN
4138 static enum ice_status
4139 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4140 struct ice_sw_recipe *recp_list)
4142 struct ice_fltr_list_entry *v_list_itr, *tmp;
4144 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4146 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4148 if (l_type != ICE_SW_LKUP_VLAN)
4149 return ICE_ERR_PARAM;
4150 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4152 if (v_list_itr->status)
4153 return v_list_itr->status;
4159 * ice_remove_vlan - remove a VLAN address based filter rule
4160 * @hw: pointer to the hardware structure
4161 * @v_list: list of VLAN and forwarding information
4165 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4167 struct ice_sw_recipe *recp_list;
4170 return ICE_ERR_PARAM;
4172 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4173 return ice_remove_vlan_rule(hw, v_list, recp_list);
4177 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4178 * @hw: pointer to the hardware structure
4179 * @v_list: list of MAC VLAN entries and forwarding information
4180 * @recp_list: list from which function remove MAC VLAN
4182 static enum ice_status
4183 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4184 struct ice_sw_recipe *recp_list)
4186 struct ice_fltr_list_entry *v_list_itr, *tmp;
4188 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4189 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4191 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4193 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4194 return ICE_ERR_PARAM;
4195 v_list_itr->status =
4196 ice_remove_rule_internal(hw, recp_list,
4198 if (v_list_itr->status)
4199 return v_list_itr->status;
4205 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4206 * @hw: pointer to the hardware structure
4207 * @mv_list: list of MAC VLAN and forwarding information
4210 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4212 struct ice_sw_recipe *recp_list;
4214 if (!mv_list || !hw)
4215 return ICE_ERR_PARAM;
4217 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4218 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4222 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4223 * @fm_entry: filter entry to inspect
4224 * @vsi_handle: VSI handle to compare with filter info
4227 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4229 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4230 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4231 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4232 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4237 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4238 * @hw: pointer to the hardware structure
4239 * @vsi_handle: VSI handle to remove filters from
4240 * @vsi_list_head: pointer to the list to add entry to
4241 * @fi: pointer to fltr_info of filter entry to copy & add
4243 * Helper function, used when creating a list of filters to remove from
4244 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4245 * original filter entry, with the exception of fltr_info.fltr_act and
4246 * fltr_info.fwd_id fields. These are set such that later logic can
4247 * extract which VSI to remove the fltr from, and pass on that information.
4249 static enum ice_status
4250 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4251 struct LIST_HEAD_TYPE *vsi_list_head,
4252 struct ice_fltr_info *fi)
4254 struct ice_fltr_list_entry *tmp;
4256 /* this memory is freed up in the caller function
4257 * once filters for this VSI are removed
4259 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4261 return ICE_ERR_NO_MEMORY;
4263 tmp->fltr_info = *fi;
4265 /* Overwrite these fields to indicate which VSI to remove filter from,
4266 * so find and remove logic can extract the information from the
4267 * list entries. Note that original entries will still have proper
4270 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4271 tmp->fltr_info.vsi_handle = vsi_handle;
4272 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4274 LIST_ADD(&tmp->list_entry, vsi_list_head);
4280 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4281 * @hw: pointer to the hardware structure
4282 * @vsi_handle: VSI handle to remove filters from
4283 * @lkup_list_head: pointer to the list that has certain lookup type filters
4284 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4286 * Locates all filters in lkup_list_head that are used by the given VSI,
4287 * and adds COPIES of those entries to vsi_list_head (intended to be used
4288 * to remove the listed filters).
4289 * Note that this means all entries in vsi_list_head must be explicitly
4290 * deallocated by the caller when done with list.
4292 static enum ice_status
4293 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4294 struct LIST_HEAD_TYPE *lkup_list_head,
4295 struct LIST_HEAD_TYPE *vsi_list_head)
4297 struct ice_fltr_mgmt_list_entry *fm_entry;
4298 enum ice_status status = ICE_SUCCESS;
4300 /* check to make sure VSI ID is valid and within boundary */
4301 if (!ice_is_vsi_valid(hw, vsi_handle))
4302 return ICE_ERR_PARAM;
4304 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4305 ice_fltr_mgmt_list_entry, list_entry) {
4306 struct ice_fltr_info *fi;
4308 fi = &fm_entry->fltr_info;
4309 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4312 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4321 * ice_determine_promisc_mask
4322 * @fi: filter info to parse
4324 * Helper function to determine which ICE_PROMISC_ mask corresponds
4325 * to given filter into.
4327 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4329 u16 vid = fi->l_data.mac_vlan.vlan_id;
4330 u8 *macaddr = fi->l_data.mac.mac_addr;
4331 bool is_tx_fltr = false;
4332 u8 promisc_mask = 0;
4334 if (fi->flag == ICE_FLTR_TX)
4337 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4338 promisc_mask |= is_tx_fltr ?
4339 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4340 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4341 promisc_mask |= is_tx_fltr ?
4342 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4343 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4344 promisc_mask |= is_tx_fltr ?
4345 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4347 promisc_mask |= is_tx_fltr ?
4348 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4350 return promisc_mask;
4354 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4355 * @hw: pointer to the hardware structure
4356 * @vsi_handle: VSI handle to retrieve info from
4357 * @promisc_mask: pointer to mask to be filled in
4358 * @vid: VLAN ID of promisc VLAN VSI
4359 * @sw: pointer to switch info struct for which function add rule
4361 static enum ice_status
4362 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4363 u16 *vid, struct ice_switch_info *sw)
4365 struct ice_fltr_mgmt_list_entry *itr;
4366 struct LIST_HEAD_TYPE *rule_head;
4367 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4369 if (!ice_is_vsi_valid(hw, vsi_handle))
4370 return ICE_ERR_PARAM;
4374 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4375 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4377 ice_acquire_lock(rule_lock);
4378 LIST_FOR_EACH_ENTRY(itr, rule_head,
4379 ice_fltr_mgmt_list_entry, list_entry) {
4380 /* Continue if this filter doesn't apply to this VSI or the
4381 * VSI ID is not in the VSI map for this filter
4383 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4386 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4388 ice_release_lock(rule_lock);
4394 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4395 * @hw: pointer to the hardware structure
4396 * @vsi_handle: VSI handle to retrieve info from
4397 * @promisc_mask: pointer to mask to be filled in
4398 * @vid: VLAN ID of promisc VLAN VSI
4401 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4404 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4405 vid, hw->switch_info);
4409 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4410 * @hw: pointer to the hardware structure
4411 * @vsi_handle: VSI handle to retrieve info from
4412 * @promisc_mask: pointer to mask to be filled in
4413 * @vid: VLAN ID of promisc VLAN VSI
4414 * @sw: pointer to switch info struct for which function add rule
4416 static enum ice_status
4417 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4418 u16 *vid, struct ice_switch_info *sw)
4420 struct ice_fltr_mgmt_list_entry *itr;
4421 struct LIST_HEAD_TYPE *rule_head;
4422 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4424 if (!ice_is_vsi_valid(hw, vsi_handle))
4425 return ICE_ERR_PARAM;
4429 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4430 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4432 ice_acquire_lock(rule_lock);
4433 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4435 /* Continue if this filter doesn't apply to this VSI or the
4436 * VSI ID is not in the VSI map for this filter
4438 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4441 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4443 ice_release_lock(rule_lock);
4449 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4450 * @hw: pointer to the hardware structure
4451 * @vsi_handle: VSI handle to retrieve info from
4452 * @promisc_mask: pointer to mask to be filled in
4453 * @vid: VLAN ID of promisc VLAN VSI
4456 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4459 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4460 vid, hw->switch_info);
4464 * ice_remove_promisc - Remove promisc based filter rules
4465 * @hw: pointer to the hardware structure
4466 * @recp_id: recipe ID for which the rule needs to removed
4467 * @v_list: list of promisc entries
4469 static enum ice_status
4470 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4471 struct LIST_HEAD_TYPE *v_list)
4473 struct ice_fltr_list_entry *v_list_itr, *tmp;
4474 struct ice_sw_recipe *recp_list;
4476 recp_list = &hw->switch_info->recp_list[recp_id];
4477 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4479 v_list_itr->status =
4480 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4481 if (v_list_itr->status)
4482 return v_list_itr->status;
4488 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4489 * @hw: pointer to the hardware structure
4490 * @vsi_handle: VSI handle to clear mode
4491 * @promisc_mask: mask of promiscuous config bits to clear
4492 * @vid: VLAN ID to clear VLAN promiscuous
4493 * @sw: pointer to switch info struct for which function add rule
4495 static enum ice_status
4496 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4497 u16 vid, struct ice_switch_info *sw)
4499 struct ice_fltr_list_entry *fm_entry, *tmp;
4500 struct LIST_HEAD_TYPE remove_list_head;
4501 struct ice_fltr_mgmt_list_entry *itr;
4502 struct LIST_HEAD_TYPE *rule_head;
4503 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4504 enum ice_status status = ICE_SUCCESS;
4507 if (!ice_is_vsi_valid(hw, vsi_handle))
4508 return ICE_ERR_PARAM;
4510 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4511 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4513 recipe_id = ICE_SW_LKUP_PROMISC;
4515 rule_head = &sw->recp_list[recipe_id].filt_rules;
4516 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4518 INIT_LIST_HEAD(&remove_list_head);
4520 ice_acquire_lock(rule_lock);
4521 LIST_FOR_EACH_ENTRY(itr, rule_head,
4522 ice_fltr_mgmt_list_entry, list_entry) {
4523 struct ice_fltr_info *fltr_info;
4524 u8 fltr_promisc_mask = 0;
4526 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4528 fltr_info = &itr->fltr_info;
4530 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4531 vid != fltr_info->l_data.mac_vlan.vlan_id)
4534 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4536 /* Skip if filter is not completely specified by given mask */
4537 if (fltr_promisc_mask & ~promisc_mask)
4540 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4544 ice_release_lock(rule_lock);
4545 goto free_fltr_list;
4548 ice_release_lock(rule_lock);
4550 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4553 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4554 ice_fltr_list_entry, list_entry) {
4555 LIST_DEL(&fm_entry->list_entry);
4556 ice_free(hw, fm_entry);
4563 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4564 * @hw: pointer to the hardware structure
4565 * @vsi_handle: VSI handle to clear mode
4566 * @promisc_mask: mask of promiscuous config bits to clear
4567 * @vid: VLAN ID to clear VLAN promiscuous
4570 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4571 u8 promisc_mask, u16 vid)
4573 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4574 vid, hw->switch_info);
4578 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4579 * @hw: pointer to the hardware structure
4580 * @vsi_handle: VSI handle to configure
4581 * @promisc_mask: mask of promiscuous config bits
4582 * @vid: VLAN ID to set VLAN promiscuous
4583 * @lport: logical port number to configure promisc mode
4584 * @sw: pointer to switch info struct for which function add rule
4586 static enum ice_status
4587 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4588 u16 vid, u8 lport, struct ice_switch_info *sw)
4590 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4591 struct ice_fltr_list_entry f_list_entry;
4592 struct ice_fltr_info new_fltr;
4593 enum ice_status status = ICE_SUCCESS;
4599 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4601 if (!ice_is_vsi_valid(hw, vsi_handle))
4602 return ICE_ERR_PARAM;
4603 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4605 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4607 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4608 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4609 new_fltr.l_data.mac_vlan.vlan_id = vid;
4610 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4612 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4613 recipe_id = ICE_SW_LKUP_PROMISC;
4616 /* Separate filters must be set for each direction/packet type
4617 * combination, so we will loop over the mask value, store the
4618 * individual type, and clear it out in the input mask as it
4621 while (promisc_mask) {
4622 struct ice_sw_recipe *recp_list;
4628 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4629 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4630 pkt_type = UCAST_FLTR;
4631 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4632 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4633 pkt_type = UCAST_FLTR;
4635 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4636 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4637 pkt_type = MCAST_FLTR;
4638 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4639 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4640 pkt_type = MCAST_FLTR;
4642 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4643 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4644 pkt_type = BCAST_FLTR;
4645 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4646 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4647 pkt_type = BCAST_FLTR;
4651 /* Check for VLAN promiscuous flag */
4652 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4653 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4654 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4655 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4659 /* Set filter DA based on packet type */
4660 mac_addr = new_fltr.l_data.mac.mac_addr;
4661 if (pkt_type == BCAST_FLTR) {
4662 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4663 } else if (pkt_type == MCAST_FLTR ||
4664 pkt_type == UCAST_FLTR) {
4665 /* Use the dummy ether header DA */
4666 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4667 ICE_NONDMA_TO_NONDMA);
4668 if (pkt_type == MCAST_FLTR)
4669 mac_addr[0] |= 0x1; /* Set multicast bit */
4672 /* Need to reset this to zero for all iterations */
4675 new_fltr.flag |= ICE_FLTR_TX;
4676 new_fltr.src = hw_vsi_id;
4678 new_fltr.flag |= ICE_FLTR_RX;
4679 new_fltr.src = lport;
4682 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4683 new_fltr.vsi_handle = vsi_handle;
4684 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4685 f_list_entry.fltr_info = new_fltr;
4686 recp_list = &sw->recp_list[recipe_id];
4688 status = ice_add_rule_internal(hw, recp_list, lport,
4690 if (status != ICE_SUCCESS)
4691 goto set_promisc_exit;
4699 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4700 * @hw: pointer to the hardware structure
4701 * @vsi_handle: VSI handle to configure
4702 * @promisc_mask: mask of promiscuous config bits
4703 * @vid: VLAN ID to set VLAN promiscuous
4706 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4709 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
4710 hw->port_info->lport,
4715 * _ice_set_vlan_vsi_promisc
4716 * @hw: pointer to the hardware structure
4717 * @vsi_handle: VSI handle to configure
4718 * @promisc_mask: mask of promiscuous config bits
4719 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4720 * @lport: logical port number to configure promisc mode
4721 * @sw: pointer to switch info struct for which function add rule
4723 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4725 static enum ice_status
4726 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4727 bool rm_vlan_promisc, u8 lport,
4728 struct ice_switch_info *sw)
4730 struct ice_fltr_list_entry *list_itr, *tmp;
4731 struct LIST_HEAD_TYPE vsi_list_head;
4732 struct LIST_HEAD_TYPE *vlan_head;
4733 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4734 enum ice_status status;
4737 INIT_LIST_HEAD(&vsi_list_head);
4738 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4739 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4740 ice_acquire_lock(vlan_lock);
4741 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4743 ice_release_lock(vlan_lock);
4745 goto free_fltr_list;
4747 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4749 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4750 if (rm_vlan_promisc)
4751 status = _ice_clear_vsi_promisc(hw, vsi_handle,
4755 status = _ice_set_vsi_promisc(hw, vsi_handle,
4756 promisc_mask, vlan_id,
4763 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4764 ice_fltr_list_entry, list_entry) {
4765 LIST_DEL(&list_itr->list_entry);
4766 ice_free(hw, list_itr);
4772 * ice_set_vlan_vsi_promisc
4773 * @hw: pointer to the hardware structure
4774 * @vsi_handle: VSI handle to configure
4775 * @promisc_mask: mask of promiscuous config bits
4776 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4778 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4781 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4782 bool rm_vlan_promisc)
4784 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
4785 rm_vlan_promisc, hw->port_info->lport,
4790 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4791 * @hw: pointer to the hardware structure
4792 * @vsi_handle: VSI handle to remove filters from
4793 * @recp_list: recipe list from which function remove fltr
4794 * @lkup: switch rule filter lookup type
4797 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4798 struct ice_sw_recipe *recp_list,
4799 enum ice_sw_lkup_type lkup)
4801 struct ice_fltr_list_entry *fm_entry;
4802 struct LIST_HEAD_TYPE remove_list_head;
4803 struct LIST_HEAD_TYPE *rule_head;
4804 struct ice_fltr_list_entry *tmp;
4805 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4806 enum ice_status status;
4808 INIT_LIST_HEAD(&remove_list_head);
4809 rule_lock = &recp_list[lkup].filt_rule_lock;
4810 rule_head = &recp_list[lkup].filt_rules;
4811 ice_acquire_lock(rule_lock);
4812 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4814 ice_release_lock(rule_lock);
4819 case ICE_SW_LKUP_MAC:
4820 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4822 case ICE_SW_LKUP_VLAN:
4823 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4825 case ICE_SW_LKUP_PROMISC:
4826 case ICE_SW_LKUP_PROMISC_VLAN:
4827 ice_remove_promisc(hw, lkup, &remove_list_head);
4829 case ICE_SW_LKUP_MAC_VLAN:
4830 ice_remove_mac_vlan(hw, &remove_list_head);
4832 case ICE_SW_LKUP_ETHERTYPE:
4833 case ICE_SW_LKUP_ETHERTYPE_MAC:
4834 ice_remove_eth_mac(hw, &remove_list_head);
4836 case ICE_SW_LKUP_DFLT:
4837 ice_debug(hw, ICE_DBG_SW,
4838 "Remove filters for this lookup type hasn't been implemented yet\n");
4840 case ICE_SW_LKUP_LAST:
4841 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4845 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4846 ice_fltr_list_entry, list_entry) {
4847 LIST_DEL(&fm_entry->list_entry);
4848 ice_free(hw, fm_entry);
4853 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4854 * @hw: pointer to the hardware structure
4855 * @vsi_handle: VSI handle to remove filters from
4856 * @sw: pointer to switch info struct
4859 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4860 struct ice_switch_info *sw)
4862 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4864 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4865 sw->recp_list, ICE_SW_LKUP_MAC);
4866 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4867 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4868 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4869 sw->recp_list, ICE_SW_LKUP_PROMISC);
4870 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4871 sw->recp_list, ICE_SW_LKUP_VLAN);
4872 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4873 sw->recp_list, ICE_SW_LKUP_DFLT);
4874 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4875 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4876 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4877 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4878 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4879 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4883 * ice_remove_vsi_fltr - Remove all filters for a VSI
4884 * @hw: pointer to the hardware structure
4885 * @vsi_handle: VSI handle to remove filters from
4887 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4889 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4893 * ice_alloc_res_cntr - allocating resource counter
4894 * @hw: pointer to the hardware structure
4895 * @type: type of resource
4896 * @alloc_shared: if set it is shared else dedicated
4897 * @num_items: number of entries requested for FD resource type
4898 * @counter_id: counter index returned by AQ call
4901 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4904 struct ice_aqc_alloc_free_res_elem *buf;
4905 enum ice_status status;
4908 /* Allocate resource */
4909 buf_len = sizeof(*buf);
4910 buf = (struct ice_aqc_alloc_free_res_elem *)
4911 ice_malloc(hw, buf_len);
4913 return ICE_ERR_NO_MEMORY;
4915 buf->num_elems = CPU_TO_LE16(num_items);
4916 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4917 ICE_AQC_RES_TYPE_M) | alloc_shared);
4919 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4920 ice_aqc_opc_alloc_res, NULL);
4924 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4932 * ice_free_res_cntr - free resource counter
4933 * @hw: pointer to the hardware structure
4934 * @type: type of resource
4935 * @alloc_shared: if set it is shared else dedicated
4936 * @num_items: number of entries to be freed for FD resource type
4937 * @counter_id: counter ID resource which needs to be freed
4940 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4943 struct ice_aqc_alloc_free_res_elem *buf;
4944 enum ice_status status;
4948 buf_len = sizeof(*buf);
4949 buf = (struct ice_aqc_alloc_free_res_elem *)
4950 ice_malloc(hw, buf_len);
4952 return ICE_ERR_NO_MEMORY;
4954 buf->num_elems = CPU_TO_LE16(num_items);
4955 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4956 ICE_AQC_RES_TYPE_M) | alloc_shared);
4957 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4959 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4960 ice_aqc_opc_free_res, NULL);
4962 ice_debug(hw, ICE_DBG_SW,
4963 "counter resource could not be freed\n");
4970 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4971 * @hw: pointer to the hardware structure
4972 * @counter_id: returns counter index
4974 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4976 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4977 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4982 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4983 * @hw: pointer to the hardware structure
4984 * @counter_id: counter index to be freed
4986 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4988 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4989 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4994 * ice_alloc_res_lg_act - add large action resource
4995 * @hw: pointer to the hardware structure
4996 * @l_id: large action ID to fill it in
4997 * @num_acts: number of actions to hold with a large action entry
4999 static enum ice_status
5000 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5002 struct ice_aqc_alloc_free_res_elem *sw_buf;
5003 enum ice_status status;
5006 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5007 return ICE_ERR_PARAM;
5009 /* Allocate resource for large action */
5010 buf_len = sizeof(*sw_buf);
5011 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5012 ice_malloc(hw, buf_len);
5014 return ICE_ERR_NO_MEMORY;
5016 sw_buf->num_elems = CPU_TO_LE16(1);
5018 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5019 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5020 * If num_acts is greater than 2, then use
5021 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5022 * The num_acts cannot exceed 4. This was ensured at the
5023 * beginning of the function.
5026 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5027 else if (num_acts == 2)
5028 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5030 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5032 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5033 ice_aqc_opc_alloc_res, NULL);
5035 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5037 ice_free(hw, sw_buf);
5042 * ice_add_mac_with_sw_marker - add filter with sw marker
5043 * @hw: pointer to the hardware structure
5044 * @f_info: filter info structure containing the MAC filter information
5045 * @sw_marker: sw marker to tag the Rx descriptor with
5048 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5051 struct ice_fltr_mgmt_list_entry *m_entry;
5052 struct ice_fltr_list_entry fl_info;
5053 struct ice_sw_recipe *recp_list;
5054 struct LIST_HEAD_TYPE l_head;
5055 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5056 enum ice_status ret;
5060 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5061 return ICE_ERR_PARAM;
5063 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5064 return ICE_ERR_PARAM;
5066 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5067 return ICE_ERR_PARAM;
5069 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5070 return ICE_ERR_PARAM;
5071 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5073 /* Add filter if it doesn't exist so then the adding of large
5074 * action always results in update
5077 INIT_LIST_HEAD(&l_head);
5078 fl_info.fltr_info = *f_info;
5079 LIST_ADD(&fl_info.list_entry, &l_head);
5081 entry_exists = false;
5082 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5083 hw->port_info->lport);
5084 if (ret == ICE_ERR_ALREADY_EXISTS)
5085 entry_exists = true;
5089 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5090 rule_lock = &recp_list->filt_rule_lock;
5091 ice_acquire_lock(rule_lock);
5092 /* Get the book keeping entry for the filter */
5093 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5097 /* If counter action was enabled for this rule then don't enable
5098 * sw marker large action
5100 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5101 ret = ICE_ERR_PARAM;
5105 /* if same marker was added before */
5106 if (m_entry->sw_marker_id == sw_marker) {
5107 ret = ICE_ERR_ALREADY_EXISTS;
5111 /* Allocate a hardware table entry to hold large act. Three actions
5112 * for marker based large action
5114 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5118 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5121 /* Update the switch rule to add the marker action */
5122 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5124 ice_release_lock(rule_lock);
5129 ice_release_lock(rule_lock);
5130 /* only remove entry if it did not exist previously */
5132 ret = ice_remove_mac(hw, &l_head);
5138 * ice_add_mac_with_counter - add filter with counter enabled
5139 * @hw: pointer to the hardware structure
5140 * @f_info: pointer to filter info structure containing the MAC filter
5144 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5146 struct ice_fltr_mgmt_list_entry *m_entry;
5147 struct ice_fltr_list_entry fl_info;
5148 struct ice_sw_recipe *recp_list;
5149 struct LIST_HEAD_TYPE l_head;
5150 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5151 enum ice_status ret;
5156 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5157 return ICE_ERR_PARAM;
5159 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5160 return ICE_ERR_PARAM;
5162 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5163 return ICE_ERR_PARAM;
5164 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5165 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5167 entry_exist = false;
5169 rule_lock = &recp_list->filt_rule_lock;
5171 /* Add filter if it doesn't exist so then the adding of large
5172 * action always results in update
5174 INIT_LIST_HEAD(&l_head);
5176 fl_info.fltr_info = *f_info;
5177 LIST_ADD(&fl_info.list_entry, &l_head);
5179 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5180 hw->port_info->lport);
5181 if (ret == ICE_ERR_ALREADY_EXISTS)
5186 ice_acquire_lock(rule_lock);
5187 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5189 ret = ICE_ERR_BAD_PTR;
5193 /* Don't enable counter for a filter for which sw marker was enabled */
5194 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5195 ret = ICE_ERR_PARAM;
5199 /* If a counter was already enabled then don't need to add again */
5200 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5201 ret = ICE_ERR_ALREADY_EXISTS;
5205 /* Allocate a hardware table entry to VLAN counter */
5206 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5210 /* Allocate a hardware table entry to hold large act. Two actions for
5211 * counter based large action
5213 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5217 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5220 /* Update the switch rule to add the counter action */
5221 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5223 ice_release_lock(rule_lock);
5228 ice_release_lock(rule_lock);
5229 /* only remove entry if it did not exist previously */
5231 ret = ice_remove_mac(hw, &l_head);
5236 /* This is mapping table entry that maps every word within a given protocol
5237 * structure to the real byte offset as per the specification of that
5239 * for example dst address is 3 words in ethertype header and corresponding
5240 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5241 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5242 * matching entry describing its field. This needs to be updated if new
5243 * structure is added to that union.
5245 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5246 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5247 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5248 { ICE_ETYPE_OL, { 0 } },
5249 { ICE_VLAN_OFOS, { 0, 2 } },
5250 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5251 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5252 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5253 26, 28, 30, 32, 34, 36, 38 } },
5254 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5255 26, 28, 30, 32, 34, 36, 38 } },
5256 { ICE_TCP_IL, { 0, 2 } },
5257 { ICE_UDP_OF, { 0, 2 } },
5258 { ICE_UDP_ILOS, { 0, 2 } },
5259 { ICE_SCTP_IL, { 0, 2 } },
5260 { ICE_VXLAN, { 8, 10, 12, 14 } },
5261 { ICE_GENEVE, { 8, 10, 12, 14 } },
5262 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5263 { ICE_NVGRE, { 0, 2, 4, 6 } },
5264 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5265 { ICE_PPPOE, { 0, 2, 4, 6 } },
5266 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5267 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5268 { ICE_ESP, { 0, 2, 4, 6 } },
5269 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5270 { ICE_NAT_T, { 8, 10, 12, 14 } },
5273 /* The following table describes preferred grouping of recipes.
5274 * If a recipe that needs to be programmed is a superset or matches one of the
5275 * following combinations, then the recipe needs to be chained as per the
5279 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5280 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5281 { ICE_MAC_IL, ICE_MAC_IL_HW },
5282 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5283 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5284 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5285 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5286 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5287 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5288 { ICE_TCP_IL, ICE_TCP_IL_HW },
5289 { ICE_UDP_OF, ICE_UDP_OF_HW },
5290 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5291 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5292 { ICE_VXLAN, ICE_UDP_OF_HW },
5293 { ICE_GENEVE, ICE_UDP_OF_HW },
5294 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5295 { ICE_NVGRE, ICE_GRE_OF_HW },
5296 { ICE_GTP, ICE_UDP_OF_HW },
5297 { ICE_PPPOE, ICE_PPPOE_HW },
5298 { ICE_PFCP, ICE_UDP_ILOS_HW },
5299 { ICE_L2TPV3, ICE_L2TPV3_HW },
5300 { ICE_ESP, ICE_ESP_HW },
5301 { ICE_AH, ICE_AH_HW },
5302 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5306 * ice_find_recp - find a recipe
5307 * @hw: pointer to the hardware structure
5308 * @lkup_exts: extension sequence to match
5310 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5312 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5313 enum ice_sw_tunnel_type tun_type)
5315 bool refresh_required = true;
5316 struct ice_sw_recipe *recp;
5319 /* Walk through existing recipes to find a match */
5320 recp = hw->switch_info->recp_list;
5321 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5322 /* If recipe was not created for this ID, in SW bookkeeping,
5323 * check if FW has an entry for this recipe. If the FW has an
5324 * entry update it in our SW bookkeeping and continue with the
5327 if (!recp[i].recp_created)
5328 if (ice_get_recp_frm_fw(hw,
5329 hw->switch_info->recp_list, i,
5333 /* Skip inverse action recipes */
5334 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5335 ICE_AQ_RECIPE_ACT_INV_ACT)
5338 /* if number of words we are looking for match */
5339 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5340 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5341 struct ice_fv_word *be = lkup_exts->fv_words;
5342 u16 *cr = recp[i].lkup_exts.field_mask;
5343 u16 *de = lkup_exts->field_mask;
5347 /* ar, cr, and qr are related to the recipe words, while
5348 * be, de, and pe are related to the lookup words
5350 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5351 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5353 if (ar[qr].off == be[pe].off &&
5354 ar[qr].prot_id == be[pe].prot_id &&
5356 /* Found the "pe"th word in the
5361 /* After walking through all the words in the
5362 * "i"th recipe if "p"th word was not found then
5363 * this recipe is not what we are looking for.
5364 * So break out from this loop and try the next
5367 if (qr >= recp[i].lkup_exts.n_val_words) {
5372 /* If for "i"th recipe the found was never set to false
5373 * then it means we found our match
5375 if ((tun_type == recp[i].tun_type ||
5376 tun_type == ICE_SW_TUN_AND_NON_TUN) && found)
5377 return i; /* Return the recipe ID */
5380 return ICE_MAX_NUM_RECIPES;
5384 * ice_prot_type_to_id - get protocol ID from protocol type
5385 * @type: protocol type
5386 * @id: pointer to variable that will receive the ID
5388 * Returns true if found, false otherwise
5390 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5394 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5395 if (ice_prot_id_tbl[i].type == type) {
5396 *id = ice_prot_id_tbl[i].protocol_id;
5403 * ice_find_valid_words - count valid words
5404 * @rule: advanced rule with lookup information
5405 * @lkup_exts: byte offset extractions of the words that are valid
5407 * calculate valid words in a lookup rule using mask value
5410 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5411 struct ice_prot_lkup_ext *lkup_exts)
5413 u8 j, word, prot_id, ret_val;
5415 if (!ice_prot_type_to_id(rule->type, &prot_id))
5418 word = lkup_exts->n_val_words;
5420 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5421 if (((u16 *)&rule->m_u)[j] &&
5422 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5423 /* No more space to accommodate */
5424 if (word >= ICE_MAX_CHAIN_WORDS)
5426 lkup_exts->fv_words[word].off =
5427 ice_prot_ext[rule->type].offs[j];
5428 lkup_exts->fv_words[word].prot_id =
5429 ice_prot_id_tbl[rule->type].protocol_id;
5430 lkup_exts->field_mask[word] =
5431 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5435 ret_val = word - lkup_exts->n_val_words;
5436 lkup_exts->n_val_words = word;
5442 * ice_create_first_fit_recp_def - Create a recipe grouping
5443 * @hw: pointer to the hardware structure
5444 * @lkup_exts: an array of protocol header extractions
5445 * @rg_list: pointer to a list that stores new recipe groups
5446 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5448 * Using first fit algorithm, take all the words that are still not done
5449 * and start grouping them in 4-word groups. Each group makes up one
5452 static enum ice_status
5453 ice_create_first_fit_recp_def(struct ice_hw *hw,
5454 struct ice_prot_lkup_ext *lkup_exts,
5455 struct LIST_HEAD_TYPE *rg_list,
5458 struct ice_pref_recipe_group *grp = NULL;
5463 if (!lkup_exts->n_val_words) {
5464 struct ice_recp_grp_entry *entry;
5466 entry = (struct ice_recp_grp_entry *)
5467 ice_malloc(hw, sizeof(*entry));
5469 return ICE_ERR_NO_MEMORY;
5470 LIST_ADD(&entry->l_entry, rg_list);
5471 grp = &entry->r_group;
5473 grp->n_val_pairs = 0;
5476 /* Walk through every word in the rule to check if it is not done. If so
5477 * then this word needs to be part of a new recipe.
5479 for (j = 0; j < lkup_exts->n_val_words; j++)
5480 if (!ice_is_bit_set(lkup_exts->done, j)) {
5482 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5483 struct ice_recp_grp_entry *entry;
5485 entry = (struct ice_recp_grp_entry *)
5486 ice_malloc(hw, sizeof(*entry));
5488 return ICE_ERR_NO_MEMORY;
5489 LIST_ADD(&entry->l_entry, rg_list);
5490 grp = &entry->r_group;
5494 grp->pairs[grp->n_val_pairs].prot_id =
5495 lkup_exts->fv_words[j].prot_id;
5496 grp->pairs[grp->n_val_pairs].off =
5497 lkup_exts->fv_words[j].off;
5498 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5506 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5507 * @hw: pointer to the hardware structure
5508 * @fv_list: field vector with the extraction sequence information
5509 * @rg_list: recipe groupings with protocol-offset pairs
5511 * Helper function to fill in the field vector indices for protocol-offset
5512 * pairs. These indexes are then ultimately programmed into a recipe.
5514 static enum ice_status
5515 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5516 struct LIST_HEAD_TYPE *rg_list)
5518 struct ice_sw_fv_list_entry *fv;
5519 struct ice_recp_grp_entry *rg;
5520 struct ice_fv_word *fv_ext;
5522 if (LIST_EMPTY(fv_list))
5525 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5526 fv_ext = fv->fv_ptr->ew;
5528 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5531 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5532 struct ice_fv_word *pr;
5537 pr = &rg->r_group.pairs[i];
5538 mask = rg->r_group.mask[i];
5540 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5541 if (fv_ext[j].prot_id == pr->prot_id &&
5542 fv_ext[j].off == pr->off) {
5545 /* Store index of field vector */
5547 rg->fv_mask[i] = mask;
5551 /* Protocol/offset could not be found, caller gave an
5555 return ICE_ERR_PARAM;
5563 * ice_find_free_recp_res_idx - find free result indexes for recipe
5564 * @hw: pointer to hardware structure
5565 * @profiles: bitmap of profiles that will be associated with the new recipe
5566 * @free_idx: pointer to variable to receive the free index bitmap
5568 * The algorithm used here is:
5569 * 1. When creating a new recipe, create a set P which contains all
5570 * Profiles that will be associated with our new recipe
5572 * 2. For each Profile p in set P:
5573 * a. Add all recipes associated with Profile p into set R
5574 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5575 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5576 * i. Or just assume they all have the same possible indexes:
5578 * i.e., PossibleIndexes = 0x0000F00000000000
5580 * 3. For each Recipe r in set R:
5581 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5582 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5584 * FreeIndexes will contain the bits indicating the indexes free for use,
5585 * then the code needs to update the recipe[r].used_result_idx_bits to
5586 * indicate which indexes were selected for use by this recipe.
5589 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5590 ice_bitmap_t *free_idx)
5592 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5593 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5594 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5598 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5599 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5600 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5601 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5603 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5604 ice_set_bit(count, possible_idx);
5606 /* For each profile we are going to associate the recipe with, add the
5607 * recipes that are associated with that profile. This will give us
5608 * the set of recipes that our recipe may collide with. Also, determine
5609 * what possible result indexes are usable given this set of profiles.
5612 while (ICE_MAX_NUM_PROFILES >
5613 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5614 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5615 ICE_MAX_NUM_RECIPES);
5616 ice_and_bitmap(possible_idx, possible_idx,
5617 hw->switch_info->prof_res_bm[bit],
5622 /* For each recipe that our new recipe may collide with, determine
5623 * which indexes have been used.
5625 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5626 if (ice_is_bit_set(recipes, bit)) {
5627 ice_or_bitmap(used_idx, used_idx,
5628 hw->switch_info->recp_list[bit].res_idxs,
5632 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5634 /* return number of free indexes */
5637 while (ICE_MAX_FV_WORDS >
5638 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5647 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5648 * @hw: pointer to hardware structure
5649 * @rm: recipe management list entry
5650 * @match_tun_mask: tunnel mask that needs to be programmed
5651 * @profiles: bitmap of profiles that will be associated.
5653 static enum ice_status
5654 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5655 u16 match_tun_mask, ice_bitmap_t *profiles)
5657 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5658 struct ice_aqc_recipe_data_elem *tmp;
5659 struct ice_aqc_recipe_data_elem *buf;
5660 struct ice_recp_grp_entry *entry;
5661 enum ice_status status;
5667 /* When more than one recipe are required, another recipe is needed to
5668 * chain them together. Matching a tunnel metadata ID takes up one of
5669 * the match fields in the chaining recipe reducing the number of
5670 * chained recipes by one.
5672 /* check number of free result indices */
5673 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5674 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5676 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5677 free_res_idx, rm->n_grp_count);
5679 if (rm->n_grp_count > 1) {
5680 if (rm->n_grp_count > free_res_idx)
5681 return ICE_ERR_MAX_LIMIT;
5686 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5687 return ICE_ERR_MAX_LIMIT;
5689 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5690 ICE_MAX_NUM_RECIPES,
5693 return ICE_ERR_NO_MEMORY;
5695 buf = (struct ice_aqc_recipe_data_elem *)
5696 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5698 status = ICE_ERR_NO_MEMORY;
5702 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5703 recipe_count = ICE_MAX_NUM_RECIPES;
5704 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5706 if (status || recipe_count == 0)
5709 /* Allocate the recipe resources, and configure them according to the
5710 * match fields from protocol headers and extracted field vectors.
5712 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5713 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5716 status = ice_alloc_recipe(hw, &entry->rid);
5720 /* Clear the result index of the located recipe, as this will be
5721 * updated, if needed, later in the recipe creation process.
5723 tmp[0].content.result_indx = 0;
5725 buf[recps] = tmp[0];
5726 buf[recps].recipe_indx = (u8)entry->rid;
5727 /* if the recipe is a non-root recipe RID should be programmed
5728 * as 0 for the rules to be applied correctly.
5730 buf[recps].content.rid = 0;
5731 ice_memset(&buf[recps].content.lkup_indx, 0,
5732 sizeof(buf[recps].content.lkup_indx),
5735 /* All recipes use look-up index 0 to match switch ID. */
5736 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5737 buf[recps].content.mask[0] =
5738 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5739 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5742 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5743 buf[recps].content.lkup_indx[i] = 0x80;
5744 buf[recps].content.mask[i] = 0;
5747 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5748 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5749 buf[recps].content.mask[i + 1] =
5750 CPU_TO_LE16(entry->fv_mask[i]);
5753 if (rm->n_grp_count > 1) {
5754 /* Checks to see if there really is a valid result index
5757 if (chain_idx >= ICE_MAX_FV_WORDS) {
5758 ice_debug(hw, ICE_DBG_SW,
5759 "No chain index available\n");
5760 status = ICE_ERR_MAX_LIMIT;
5764 entry->chain_idx = chain_idx;
5765 buf[recps].content.result_indx =
5766 ICE_AQ_RECIPE_RESULT_EN |
5767 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5768 ICE_AQ_RECIPE_RESULT_DATA_M);
5769 ice_clear_bit(chain_idx, result_idx_bm);
5770 chain_idx = ice_find_first_bit(result_idx_bm,
5774 /* fill recipe dependencies */
5775 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5776 ICE_MAX_NUM_RECIPES);
5777 ice_set_bit(buf[recps].recipe_indx,
5778 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5779 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5783 if (rm->n_grp_count == 1) {
5784 rm->root_rid = buf[0].recipe_indx;
5785 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5786 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5787 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5788 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5789 sizeof(buf[0].recipe_bitmap),
5790 ICE_NONDMA_TO_NONDMA);
5792 status = ICE_ERR_BAD_PTR;
5795 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5796 * the recipe which is getting created if specified
5797 * by user. Usually any advanced switch filter, which results
5798 * into new extraction sequence, ended up creating a new recipe
5799 * of type ROOT and usually recipes are associated with profiles
5800 * Switch rule referreing newly created recipe, needs to have
5801 * either/or 'fwd' or 'join' priority, otherwise switch rule
5802 * evaluation will not happen correctly. In other words, if
5803 * switch rule to be evaluated on priority basis, then recipe
5804 * needs to have priority, otherwise it will be evaluated last.
5806 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5808 struct ice_recp_grp_entry *last_chain_entry;
5811 /* Allocate the last recipe that will chain the outcomes of the
5812 * other recipes together
5814 status = ice_alloc_recipe(hw, &rid);
5818 buf[recps].recipe_indx = (u8)rid;
5819 buf[recps].content.rid = (u8)rid;
5820 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5821 /* the new entry created should also be part of rg_list to
5822 * make sure we have complete recipe
5824 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5825 sizeof(*last_chain_entry));
5826 if (!last_chain_entry) {
5827 status = ICE_ERR_NO_MEMORY;
5830 last_chain_entry->rid = rid;
5831 ice_memset(&buf[recps].content.lkup_indx, 0,
5832 sizeof(buf[recps].content.lkup_indx),
5834 /* All recipes use look-up index 0 to match switch ID. */
5835 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5836 buf[recps].content.mask[0] =
5837 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5838 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5839 buf[recps].content.lkup_indx[i] =
5840 ICE_AQ_RECIPE_LKUP_IGNORE;
5841 buf[recps].content.mask[i] = 0;
5845 /* update r_bitmap with the recp that is used for chaining */
5846 ice_set_bit(rid, rm->r_bitmap);
5847 /* this is the recipe that chains all the other recipes so it
5848 * should not have a chaining ID to indicate the same
5850 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5851 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5853 last_chain_entry->fv_idx[i] = entry->chain_idx;
5854 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5855 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5856 ice_set_bit(entry->rid, rm->r_bitmap);
5858 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5859 if (sizeof(buf[recps].recipe_bitmap) >=
5860 sizeof(rm->r_bitmap)) {
5861 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5862 sizeof(buf[recps].recipe_bitmap),
5863 ICE_NONDMA_TO_NONDMA);
5865 status = ICE_ERR_BAD_PTR;
5868 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5870 /* To differentiate among different UDP tunnels, a meta data ID
5873 if (match_tun_mask) {
5874 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5875 buf[recps].content.mask[i] =
5876 CPU_TO_LE16(match_tun_mask);
5880 rm->root_rid = (u8)rid;
5882 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5886 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5887 ice_release_change_lock(hw);
5891 /* Every recipe that just got created add it to the recipe
5894 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5895 struct ice_switch_info *sw = hw->switch_info;
5896 bool is_root, idx_found = false;
5897 struct ice_sw_recipe *recp;
5898 u16 idx, buf_idx = 0;
5900 /* find buffer index for copying some data */
5901 for (idx = 0; idx < rm->n_grp_count; idx++)
5902 if (buf[idx].recipe_indx == entry->rid) {
5908 status = ICE_ERR_OUT_OF_RANGE;
5912 recp = &sw->recp_list[entry->rid];
5913 is_root = (rm->root_rid == entry->rid);
5914 recp->is_root = is_root;
5916 recp->root_rid = entry->rid;
5917 recp->big_recp = (is_root && rm->n_grp_count > 1);
5919 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5920 entry->r_group.n_val_pairs *
5921 sizeof(struct ice_fv_word),
5922 ICE_NONDMA_TO_NONDMA);
5924 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5925 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5927 /* Copy non-result fv index values and masks to recipe. This
5928 * call will also update the result recipe bitmask.
5930 ice_collect_result_idx(&buf[buf_idx], recp);
5932 /* for non-root recipes, also copy to the root, this allows
5933 * easier matching of a complete chained recipe
5936 ice_collect_result_idx(&buf[buf_idx],
5937 &sw->recp_list[rm->root_rid]);
5939 recp->n_ext_words = entry->r_group.n_val_pairs;
5940 recp->chain_idx = entry->chain_idx;
5941 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5942 recp->n_grp_count = rm->n_grp_count;
5943 recp->tun_type = rm->tun_type;
5944 recp->recp_created = true;
5959 * ice_create_recipe_group - creates recipe group
5960 * @hw: pointer to hardware structure
5961 * @rm: recipe management list entry
5962 * @lkup_exts: lookup elements
5964 static enum ice_status
5965 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5966 struct ice_prot_lkup_ext *lkup_exts)
5968 enum ice_status status;
5971 rm->n_grp_count = 0;
5973 /* Create recipes for words that are marked not done by packing them
5976 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5977 &rm->rg_list, &recp_count);
5979 rm->n_grp_count += recp_count;
5980 rm->n_ext_words = lkup_exts->n_val_words;
5981 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5982 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5983 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5984 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5991 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5992 * @hw: pointer to hardware structure
5993 * @lkups: lookup elements or match criteria for the advanced recipe, one
5994 * structure per protocol header
5995 * @lkups_cnt: number of protocols
5996 * @bm: bitmap of field vectors to consider
5997 * @fv_list: pointer to a list that holds the returned field vectors
5999 static enum ice_status
6000 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6001 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6003 enum ice_status status;
6010 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6012 return ICE_ERR_NO_MEMORY;
6014 for (i = 0; i < lkups_cnt; i++)
6015 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6016 status = ICE_ERR_CFG;
6020 /* Find field vectors that include all specified protocol types */
6021 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6024 ice_free(hw, prot_ids);
6029 * ice_tun_type_match_mask - determine if tun type needs a match mask
6030 * @tun_type: tunnel type
6031 * @mask: mask to be used for the tunnel
6033 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6036 case ICE_SW_TUN_VXLAN_GPE:
6037 case ICE_SW_TUN_GENEVE:
6038 case ICE_SW_TUN_VXLAN:
6039 case ICE_SW_TUN_NVGRE:
6040 case ICE_SW_TUN_UDP:
6041 case ICE_ALL_TUNNELS:
6042 *mask = ICE_TUN_FLAG_MASK;
6045 case ICE_SW_TUN_GENEVE_VLAN:
6046 case ICE_SW_TUN_VXLAN_VLAN:
6047 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6057 * ice_add_special_words - Add words that are not protocols, such as metadata
6058 * @rinfo: other information regarding the rule e.g. priority and action info
6059 * @lkup_exts: lookup word structure
6061 static enum ice_status
6062 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6063 struct ice_prot_lkup_ext *lkup_exts)
6067 /* If this is a tunneled packet, then add recipe index to match the
6068 * tunnel bit in the packet metadata flags.
6070 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6071 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6072 u8 word = lkup_exts->n_val_words++;
6074 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6075 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6076 lkup_exts->field_mask[word] = mask;
6078 return ICE_ERR_MAX_LIMIT;
6085 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6086 * @hw: pointer to hardware structure
6087 * @rinfo: other information regarding the rule e.g. priority and action info
6088 * @bm: pointer to memory for returning the bitmap of field vectors
6091 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6094 enum ice_prof_type prof_type;
6096 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6098 switch (rinfo->tun_type) {
6100 prof_type = ICE_PROF_NON_TUN;
6102 case ICE_ALL_TUNNELS:
6103 prof_type = ICE_PROF_TUN_ALL;
6105 case ICE_SW_TUN_VXLAN_GPE:
6106 case ICE_SW_TUN_GENEVE:
6107 case ICE_SW_TUN_GENEVE_VLAN:
6108 case ICE_SW_TUN_VXLAN:
6109 case ICE_SW_TUN_VXLAN_VLAN:
6110 case ICE_SW_TUN_UDP:
6111 case ICE_SW_TUN_GTP:
6112 prof_type = ICE_PROF_TUN_UDP;
6114 case ICE_SW_TUN_NVGRE:
6115 prof_type = ICE_PROF_TUN_GRE;
6117 case ICE_SW_TUN_PPPOE:
6118 prof_type = ICE_PROF_TUN_PPPOE;
6120 case ICE_SW_TUN_PROFID_IPV6_ESP:
6121 case ICE_SW_TUN_IPV6_ESP:
6122 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6124 case ICE_SW_TUN_PROFID_IPV6_AH:
6125 case ICE_SW_TUN_IPV6_AH:
6126 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6128 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6129 case ICE_SW_TUN_IPV6_L2TPV3:
6130 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6132 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6133 case ICE_SW_TUN_IPV6_NAT_T:
6134 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6136 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6137 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6139 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6140 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6142 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6143 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6145 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6146 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6148 case ICE_SW_TUN_IPV4_NAT_T:
6149 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6151 case ICE_SW_TUN_IPV4_L2TPV3:
6152 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6154 case ICE_SW_TUN_IPV4_ESP:
6155 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6157 case ICE_SW_TUN_IPV4_AH:
6158 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6160 case ICE_SW_TUN_AND_NON_TUN:
6162 prof_type = ICE_PROF_ALL;
6166 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6170 * ice_is_prof_rule - determine if rule type is a profile rule
6171 * @type: the rule type
6173 * if the rule type is a profile rule, that means that there no field value
6174 * match required, in this case just a profile hit is required.
6176 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6179 case ICE_SW_TUN_PROFID_IPV6_ESP:
6180 case ICE_SW_TUN_PROFID_IPV6_AH:
6181 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6182 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6183 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6184 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6185 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6186 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6196 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6197 * @hw: pointer to hardware structure
6198 * @lkups: lookup elements or match criteria for the advanced recipe, one
6199 * structure per protocol header
6200 * @lkups_cnt: number of protocols
6201 * @rinfo: other information regarding the rule e.g. priority and action info
6202 * @rid: return the recipe ID of the recipe created
6204 static enum ice_status
6205 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6206 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6208 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6209 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6210 struct ice_prot_lkup_ext *lkup_exts;
6211 struct ice_recp_grp_entry *r_entry;
6212 struct ice_sw_fv_list_entry *fvit;
6213 struct ice_recp_grp_entry *r_tmp;
6214 struct ice_sw_fv_list_entry *tmp;
6215 enum ice_status status = ICE_SUCCESS;
6216 struct ice_sw_recipe *rm;
6217 u16 match_tun_mask = 0;
6221 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6222 return ICE_ERR_PARAM;
6224 lkup_exts = (struct ice_prot_lkup_ext *)
6225 ice_malloc(hw, sizeof(*lkup_exts));
6227 return ICE_ERR_NO_MEMORY;
6229 /* Determine the number of words to be matched and if it exceeds a
6230 * recipe's restrictions
6232 for (i = 0; i < lkups_cnt; i++) {
6235 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6236 status = ICE_ERR_CFG;
6237 goto err_free_lkup_exts;
6240 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6242 status = ICE_ERR_CFG;
6243 goto err_free_lkup_exts;
6247 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6249 status = ICE_ERR_NO_MEMORY;
6250 goto err_free_lkup_exts;
6253 /* Get field vectors that contain fields extracted from all the protocol
6254 * headers being programmed.
6256 INIT_LIST_HEAD(&rm->fv_list);
6257 INIT_LIST_HEAD(&rm->rg_list);
6259 /* Get bitmap of field vectors (profiles) that are compatible with the
6260 * rule request; only these will be searched in the subsequent call to
6263 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6265 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6269 /* Group match words into recipes using preferred recipe grouping
6272 status = ice_create_recipe_group(hw, rm, lkup_exts);
6276 /* For certain tunnel types it is necessary to use a metadata ID flag to
6277 * differentiate different tunnel types. A separate recipe needs to be
6278 * used for the metadata.
6280 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6281 rm->n_grp_count > 1)
6282 match_tun_mask = mask;
6284 /* set the recipe priority if specified */
6285 rm->priority = (u8)rinfo->priority;
6287 /* Find offsets from the field vector. Pick the first one for all the
6290 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6294 /* An empty FV list means to use all the profiles returned in the
6297 if (LIST_EMPTY(&rm->fv_list)) {
6300 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6301 if (ice_is_bit_set(fv_bitmap, j)) {
6302 struct ice_sw_fv_list_entry *fvl;
6304 fvl = (struct ice_sw_fv_list_entry *)
6305 ice_malloc(hw, sizeof(*fvl));
6309 fvl->profile_id = j;
6310 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6314 /* get bitmap of all profiles the recipe will be associated with */
6315 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6316 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6318 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6319 ice_set_bit((u16)fvit->profile_id, profiles);
6322 /* Create any special protocol/offset pairs, such as looking at tunnel
6323 * bits by extracting metadata
6325 status = ice_add_special_words(rinfo, lkup_exts);
6327 goto err_free_lkup_exts;
6329 /* Look for a recipe which matches our requested fv / mask list */
6330 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6331 if (*rid < ICE_MAX_NUM_RECIPES)
6332 /* Success if found a recipe that match the existing criteria */
6335 rm->tun_type = rinfo->tun_type;
6336 /* Recipe we need does not exist, add a recipe */
6337 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
6341 /* Associate all the recipes created with all the profiles in the
6342 * common field vector.
6344 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6346 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6349 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6350 (u8 *)r_bitmap, NULL);
6354 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6355 ICE_MAX_NUM_RECIPES);
6356 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6360 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6363 ice_release_change_lock(hw);
6368 /* Update profile to recipe bitmap array */
6369 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6370 ICE_MAX_NUM_RECIPES);
6372 /* Update recipe to profile bitmap array */
6373 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6374 if (ice_is_bit_set(r_bitmap, j))
6375 ice_set_bit((u16)fvit->profile_id,
6376 recipe_to_profile[j]);
6379 *rid = rm->root_rid;
6380 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6381 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6383 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6384 ice_recp_grp_entry, l_entry) {
6385 LIST_DEL(&r_entry->l_entry);
6386 ice_free(hw, r_entry);
6389 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6391 LIST_DEL(&fvit->list_entry);
6396 ice_free(hw, rm->root_buf);
6401 ice_free(hw, lkup_exts);
6407 * ice_find_dummy_packet - find dummy packet by tunnel type
6409 * @lkups: lookup elements or match criteria for the advanced recipe, one
6410 * structure per protocol header
6411 * @lkups_cnt: number of protocols
6412 * @tun_type: tunnel type from the match criteria
6413 * @pkt: dummy packet to fill according to filter match criteria
6414 * @pkt_len: packet length of dummy packet
6415 * @offsets: pointer to receive the pointer to the offsets for the packet
6418 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6419 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6421 const struct ice_dummy_pkt_offsets **offsets)
6423 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6427 for (i = 0; i < lkups_cnt; i++) {
6428 if (lkups[i].type == ICE_UDP_ILOS)
6430 else if (lkups[i].type == ICE_TCP_IL)
6432 else if (lkups[i].type == ICE_IPV6_OFOS)
6434 else if (lkups[i].type == ICE_VLAN_OFOS)
6436 else if (lkups[i].type == ICE_IPV4_OFOS &&
6437 lkups[i].h_u.ipv4_hdr.protocol ==
6438 ICE_IPV4_NVGRE_PROTO_ID &&
6439 lkups[i].m_u.ipv4_hdr.protocol ==
6442 else if (lkups[i].type == ICE_PPPOE &&
6443 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6444 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6445 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6448 else if (lkups[i].type == ICE_ETYPE_OL &&
6449 lkups[i].h_u.ethertype.ethtype_id ==
6450 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6451 lkups[i].m_u.ethertype.ethtype_id ==
6456 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6457 *pkt = dummy_ipv4_esp_pkt;
6458 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6459 *offsets = dummy_ipv4_esp_packet_offsets;
6463 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6464 *pkt = dummy_ipv6_esp_pkt;
6465 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6466 *offsets = dummy_ipv6_esp_packet_offsets;
6470 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6471 *pkt = dummy_ipv4_ah_pkt;
6472 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6473 *offsets = dummy_ipv4_ah_packet_offsets;
6477 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6478 *pkt = dummy_ipv6_ah_pkt;
6479 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6480 *offsets = dummy_ipv6_ah_packet_offsets;
6484 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6485 *pkt = dummy_ipv4_nat_pkt;
6486 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6487 *offsets = dummy_ipv4_nat_packet_offsets;
6491 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6492 *pkt = dummy_ipv6_nat_pkt;
6493 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6494 *offsets = dummy_ipv6_nat_packet_offsets;
6498 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6499 *pkt = dummy_ipv4_l2tpv3_pkt;
6500 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6501 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6505 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6506 *pkt = dummy_ipv6_l2tpv3_pkt;
6507 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6508 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6512 if (tun_type == ICE_SW_TUN_GTP) {
6513 *pkt = dummy_udp_gtp_packet;
6514 *pkt_len = sizeof(dummy_udp_gtp_packet);
6515 *offsets = dummy_udp_gtp_packet_offsets;
6519 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6520 *pkt = dummy_pppoe_ipv6_packet;
6521 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6522 *offsets = dummy_pppoe_packet_offsets;
6524 } else if (tun_type == ICE_SW_TUN_PPPOE) {
6525 *pkt = dummy_pppoe_ipv4_packet;
6526 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6527 *offsets = dummy_pppoe_packet_offsets;
6531 if (tun_type == ICE_ALL_TUNNELS) {
6532 *pkt = dummy_gre_udp_packet;
6533 *pkt_len = sizeof(dummy_gre_udp_packet);
6534 *offsets = dummy_gre_udp_packet_offsets;
6538 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6540 *pkt = dummy_gre_tcp_packet;
6541 *pkt_len = sizeof(dummy_gre_tcp_packet);
6542 *offsets = dummy_gre_tcp_packet_offsets;
6546 *pkt = dummy_gre_udp_packet;
6547 *pkt_len = sizeof(dummy_gre_udp_packet);
6548 *offsets = dummy_gre_udp_packet_offsets;
6552 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6553 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
6554 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
6555 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
6557 *pkt = dummy_udp_tun_tcp_packet;
6558 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6559 *offsets = dummy_udp_tun_tcp_packet_offsets;
6563 *pkt = dummy_udp_tun_udp_packet;
6564 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6565 *offsets = dummy_udp_tun_udp_packet_offsets;
6571 *pkt = dummy_vlan_udp_packet;
6572 *pkt_len = sizeof(dummy_vlan_udp_packet);
6573 *offsets = dummy_vlan_udp_packet_offsets;
6576 *pkt = dummy_udp_packet;
6577 *pkt_len = sizeof(dummy_udp_packet);
6578 *offsets = dummy_udp_packet_offsets;
6580 } else if (udp && ipv6) {
6582 *pkt = dummy_vlan_udp_ipv6_packet;
6583 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6584 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6587 *pkt = dummy_udp_ipv6_packet;
6588 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6589 *offsets = dummy_udp_ipv6_packet_offsets;
6591 } else if ((tcp && ipv6) || ipv6) {
6593 *pkt = dummy_vlan_tcp_ipv6_packet;
6594 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6595 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6598 *pkt = dummy_tcp_ipv6_packet;
6599 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6600 *offsets = dummy_tcp_ipv6_packet_offsets;
6605 *pkt = dummy_vlan_tcp_packet;
6606 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6607 *offsets = dummy_vlan_tcp_packet_offsets;
6609 *pkt = dummy_tcp_packet;
6610 *pkt_len = sizeof(dummy_tcp_packet);
6611 *offsets = dummy_tcp_packet_offsets;
6616 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6618 * @lkups: lookup elements or match criteria for the advanced recipe, one
6619 * structure per protocol header
6620 * @lkups_cnt: number of protocols
6621 * @s_rule: stores rule information from the match criteria
6622 * @dummy_pkt: dummy packet to fill according to filter match criteria
6623 * @pkt_len: packet length of dummy packet
6624 * @offsets: offset info for the dummy packet
6626 static enum ice_status
6627 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6628 struct ice_aqc_sw_rules_elem *s_rule,
6629 const u8 *dummy_pkt, u16 pkt_len,
6630 const struct ice_dummy_pkt_offsets *offsets)
6635 /* Start with a packet with a pre-defined/dummy content. Then, fill
6636 * in the header values to be looked up or matched.
6638 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6640 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6642 for (i = 0; i < lkups_cnt; i++) {
6643 enum ice_protocol_type type;
6644 u16 offset = 0, len = 0, j;
6647 /* find the start of this layer; it should be found since this
6648 * was already checked when search for the dummy packet
6650 type = lkups[i].type;
6651 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6652 if (type == offsets[j].type) {
6653 offset = offsets[j].offset;
6658 /* this should never happen in a correct calling sequence */
6660 return ICE_ERR_PARAM;
6662 switch (lkups[i].type) {
6665 len = sizeof(struct ice_ether_hdr);
6668 len = sizeof(struct ice_ethtype_hdr);
6671 len = sizeof(struct ice_vlan_hdr);
6675 len = sizeof(struct ice_ipv4_hdr);
6679 len = sizeof(struct ice_ipv6_hdr);
6684 len = sizeof(struct ice_l4_hdr);
6687 len = sizeof(struct ice_sctp_hdr);
6690 len = sizeof(struct ice_nvgre);
6695 len = sizeof(struct ice_udp_tnl_hdr);
6699 len = sizeof(struct ice_udp_gtp_hdr);
6702 len = sizeof(struct ice_pppoe_hdr);
6705 len = sizeof(struct ice_esp_hdr);
6708 len = sizeof(struct ice_nat_t_hdr);
6711 len = sizeof(struct ice_ah_hdr);
6714 len = sizeof(struct ice_l2tpv3_sess_hdr);
6717 return ICE_ERR_PARAM;
6720 /* the length should be a word multiple */
6721 if (len % ICE_BYTES_PER_WORD)
6724 /* We have the offset to the header start, the length, the
6725 * caller's header values and mask. Use this information to
6726 * copy the data into the dummy packet appropriately based on
6727 * the mask. Note that we need to only write the bits as
6728 * indicated by the mask to make sure we don't improperly write
6729 * over any significant packet data.
6731 for (j = 0; j < len / sizeof(u16); j++)
6732 if (((u16 *)&lkups[i].m_u)[j])
6733 ((u16 *)(pkt + offset))[j] =
6734 (((u16 *)(pkt + offset))[j] &
6735 ~((u16 *)&lkups[i].m_u)[j]) |
6736 (((u16 *)&lkups[i].h_u)[j] &
6737 ((u16 *)&lkups[i].m_u)[j]);
6740 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6746 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6747 * @hw: pointer to the hardware structure
6748 * @tun_type: tunnel type
6749 * @pkt: dummy packet to fill in
6750 * @offsets: offset info for the dummy packet
6752 static enum ice_status
6753 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6754 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6759 case ICE_SW_TUN_AND_NON_TUN:
6760 case ICE_SW_TUN_VXLAN_GPE:
6761 case ICE_SW_TUN_VXLAN:
6762 case ICE_SW_TUN_VXLAN_VLAN:
6763 case ICE_SW_TUN_UDP:
6764 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6768 case ICE_SW_TUN_GENEVE:
6769 case ICE_SW_TUN_GENEVE_VLAN:
6770 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6775 /* Nothing needs to be done for this tunnel type */
6779 /* Find the outer UDP protocol header and insert the port number */
6780 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6781 if (offsets[i].type == ICE_UDP_OF) {
6782 struct ice_l4_hdr *hdr;
6785 offset = offsets[i].offset;
6786 hdr = (struct ice_l4_hdr *)&pkt[offset];
6787 hdr->dst_port = CPU_TO_BE16(open_port);
6797 * ice_find_adv_rule_entry - Search a rule entry
6798 * @hw: pointer to the hardware structure
6799 * @lkups: lookup elements or match criteria for the advanced recipe, one
6800 * structure per protocol header
6801 * @lkups_cnt: number of protocols
6802 * @recp_id: recipe ID for which we are finding the rule
6803 * @rinfo: other information regarding the rule e.g. priority and action info
6805 * Helper function to search for a given advance rule entry
6806 * Returns pointer to entry storing the rule if found
6808 static struct ice_adv_fltr_mgmt_list_entry *
6809 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6810 u16 lkups_cnt, u16 recp_id,
6811 struct ice_adv_rule_info *rinfo)
6813 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6814 struct ice_switch_info *sw = hw->switch_info;
6817 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6818 ice_adv_fltr_mgmt_list_entry, list_entry) {
6819 bool lkups_matched = true;
6821 if (lkups_cnt != list_itr->lkups_cnt)
6823 for (i = 0; i < list_itr->lkups_cnt; i++)
6824 if (memcmp(&list_itr->lkups[i], &lkups[i],
6826 lkups_matched = false;
6829 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6830 rinfo->tun_type == list_itr->rule_info.tun_type &&
6838 * ice_adv_add_update_vsi_list
6839 * @hw: pointer to the hardware structure
6840 * @m_entry: pointer to current adv filter management list entry
6841 * @cur_fltr: filter information from the book keeping entry
6842 * @new_fltr: filter information with the new VSI to be added
6844 * Call AQ command to add or update previously created VSI list with new VSI.
6846 * Helper function to do book keeping associated with adding filter information
6847 * The algorithm to do the booking keeping is described below :
6848 * When a VSI needs to subscribe to a given advanced filter
6849 * if only one VSI has been added till now
6850 * Allocate a new VSI list and add two VSIs
6851 * to this list using switch rule command
6852 * Update the previously created switch rule with the
6853 * newly created VSI list ID
6854 * if a VSI list was previously created
6855 * Add the new VSI to the previously created VSI list set
6856 * using the update switch rule command
6858 static enum ice_status
6859 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6860 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6861 struct ice_adv_rule_info *cur_fltr,
6862 struct ice_adv_rule_info *new_fltr)
6864 enum ice_status status;
6865 u16 vsi_list_id = 0;
6867 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6868 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6869 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6870 return ICE_ERR_NOT_IMPL;
6872 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6873 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6874 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6875 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6876 return ICE_ERR_NOT_IMPL;
6878 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6879 /* Only one entry existed in the mapping and it was not already
6880 * a part of a VSI list. So, create a VSI list with the old and
6883 struct ice_fltr_info tmp_fltr;
6884 u16 vsi_handle_arr[2];
6886 /* A rule already exists with the new VSI being added */
6887 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6888 new_fltr->sw_act.fwd_id.hw_vsi_id)
6889 return ICE_ERR_ALREADY_EXISTS;
6891 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6892 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6893 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6899 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6900 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
6901 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6902 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6903 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6904 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6906 /* Update the previous switch rule of "forward to VSI" to
6909 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6913 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6914 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6915 m_entry->vsi_list_info =
6916 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6919 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6921 if (!m_entry->vsi_list_info)
6924 /* A rule already exists with the new VSI being added */
6925 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6928 /* Update the previously created VSI list set with
6929 * the new VSI ID passed in
6931 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6933 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6935 ice_aqc_opc_update_sw_rules,
6937 /* update VSI list mapping info with new VSI ID */
6939 ice_set_bit(vsi_handle,
6940 m_entry->vsi_list_info->vsi_map);
6943 m_entry->vsi_count++;
6948 * ice_add_adv_rule - helper function to create an advanced switch rule
6949 * @hw: pointer to the hardware structure
6950 * @lkups: information on the words that needs to be looked up. All words
6951 * together makes one recipe
6952 * @lkups_cnt: num of entries in the lkups array
6953 * @rinfo: other information related to the rule that needs to be programmed
6954 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6955 * ignored is case of error.
6957 * This function can program only 1 rule at a time. The lkups is used to
6958 * describe the all the words that forms the "lookup" portion of the recipe.
6959 * These words can span multiple protocols. Callers to this function need to
6960 * pass in a list of protocol headers with lookup information along and mask
6961 * that determines which words are valid from the given protocol header.
6962 * rinfo describes other information related to this rule such as forwarding
6963 * IDs, priority of this rule, etc.
6966 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6967 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6968 struct ice_rule_query_data *added_entry)
6970 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6971 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6972 const struct ice_dummy_pkt_offsets *pkt_offsets;
6973 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6974 struct LIST_HEAD_TYPE *rule_head;
6975 struct ice_switch_info *sw;
6976 enum ice_status status;
6977 const u8 *pkt = NULL;
6983 /* Initialize profile to result index bitmap */
6984 if (!hw->switch_info->prof_res_bm_init) {
6985 hw->switch_info->prof_res_bm_init = 1;
6986 ice_init_prof_result_bm(hw);
6989 prof_rule = ice_is_prof_rule(rinfo->tun_type);
6990 if (!prof_rule && !lkups_cnt)
6991 return ICE_ERR_PARAM;
6993 /* get # of words we need to match */
6995 for (i = 0; i < lkups_cnt; i++) {
6998 ptr = (u16 *)&lkups[i].m_u;
6999 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7005 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7006 return ICE_ERR_PARAM;
7008 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7009 return ICE_ERR_PARAM;
7012 /* make sure that we can locate a dummy packet */
7013 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7016 status = ICE_ERR_PARAM;
7017 goto err_ice_add_adv_rule;
7020 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7021 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7022 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7023 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7026 vsi_handle = rinfo->sw_act.vsi_handle;
7027 if (!ice_is_vsi_valid(hw, vsi_handle))
7028 return ICE_ERR_PARAM;
7030 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7031 rinfo->sw_act.fwd_id.hw_vsi_id =
7032 ice_get_hw_vsi_num(hw, vsi_handle);
7033 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7034 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7036 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7039 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7041 /* we have to add VSI to VSI_LIST and increment vsi_count.
7042 * Also Update VSI list so that we can change forwarding rule
7043 * if the rule already exists, we will check if it exists with
7044 * same vsi_id, if not then add it to the VSI list if it already
7045 * exists if not then create a VSI list and add the existing VSI
7046 * ID and the new VSI ID to the list
7047 * We will add that VSI to the list
7049 status = ice_adv_add_update_vsi_list(hw, m_entry,
7050 &m_entry->rule_info,
7053 added_entry->rid = rid;
7054 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7055 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7059 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7060 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7062 return ICE_ERR_NO_MEMORY;
7063 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7064 switch (rinfo->sw_act.fltr_act) {
7065 case ICE_FWD_TO_VSI:
7066 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7067 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7068 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7071 act |= ICE_SINGLE_ACT_TO_Q;
7072 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7073 ICE_SINGLE_ACT_Q_INDEX_M;
7075 case ICE_FWD_TO_QGRP:
7076 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7077 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7078 act |= ICE_SINGLE_ACT_TO_Q;
7079 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7080 ICE_SINGLE_ACT_Q_INDEX_M;
7081 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7082 ICE_SINGLE_ACT_Q_REGION_M;
7084 case ICE_DROP_PACKET:
7085 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7086 ICE_SINGLE_ACT_VALID_BIT;
7089 status = ICE_ERR_CFG;
7090 goto err_ice_add_adv_rule;
7093 /* set the rule LOOKUP type based on caller specified 'RX'
7094 * instead of hardcoding it to be either LOOKUP_TX/RX
7096 * for 'RX' set the source to be the port number
7097 * for 'TX' set the source to be the source HW VSI number (determined
7101 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7102 s_rule->pdata.lkup_tx_rx.src =
7103 CPU_TO_LE16(hw->port_info->lport);
7105 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7106 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7109 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7110 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7112 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7113 pkt_len, pkt_offsets);
7115 goto err_ice_add_adv_rule;
7117 if (rinfo->tun_type != ICE_NON_TUN &&
7118 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7119 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7120 s_rule->pdata.lkup_tx_rx.hdr,
7123 goto err_ice_add_adv_rule;
7126 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7127 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7130 goto err_ice_add_adv_rule;
7131 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7132 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7134 status = ICE_ERR_NO_MEMORY;
7135 goto err_ice_add_adv_rule;
7138 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7139 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7140 ICE_NONDMA_TO_NONDMA);
7141 if (!adv_fltr->lkups && !prof_rule) {
7142 status = ICE_ERR_NO_MEMORY;
7143 goto err_ice_add_adv_rule;
7146 adv_fltr->lkups_cnt = lkups_cnt;
7147 adv_fltr->rule_info = *rinfo;
7148 adv_fltr->rule_info.fltr_rule_id =
7149 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7150 sw = hw->switch_info;
7151 sw->recp_list[rid].adv_rule = true;
7152 rule_head = &sw->recp_list[rid].filt_rules;
7154 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7155 adv_fltr->vsi_count = 1;
7157 /* Add rule entry to book keeping list */
7158 LIST_ADD(&adv_fltr->list_entry, rule_head);
7160 added_entry->rid = rid;
7161 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7162 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7164 err_ice_add_adv_rule:
7165 if (status && adv_fltr) {
7166 ice_free(hw, adv_fltr->lkups);
7167 ice_free(hw, adv_fltr);
7170 ice_free(hw, s_rule);
7176 * ice_adv_rem_update_vsi_list
7177 * @hw: pointer to the hardware structure
7178 * @vsi_handle: VSI handle of the VSI to remove
7179 * @fm_list: filter management entry for which the VSI list management needs to
7182 static enum ice_status
7183 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7184 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7186 struct ice_vsi_list_map_info *vsi_list_info;
7187 enum ice_sw_lkup_type lkup_type;
7188 enum ice_status status;
7191 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7192 fm_list->vsi_count == 0)
7193 return ICE_ERR_PARAM;
7195 /* A rule with the VSI being removed does not exist */
7196 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7197 return ICE_ERR_DOES_NOT_EXIST;
7199 lkup_type = ICE_SW_LKUP_LAST;
7200 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7201 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7202 ice_aqc_opc_update_sw_rules,
7207 fm_list->vsi_count--;
7208 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7209 vsi_list_info = fm_list->vsi_list_info;
7210 if (fm_list->vsi_count == 1) {
7211 struct ice_fltr_info tmp_fltr;
7214 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7216 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7217 return ICE_ERR_OUT_OF_RANGE;
7219 /* Make sure VSI list is empty before removing it below */
7220 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7222 ice_aqc_opc_update_sw_rules,
7227 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7228 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7229 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7230 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7231 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7232 tmp_fltr.fwd_id.hw_vsi_id =
7233 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7234 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7235 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7237 /* Update the previous switch rule of "MAC forward to VSI" to
7238 * "MAC fwd to VSI list"
7240 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7242 ice_debug(hw, ICE_DBG_SW,
7243 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7244 tmp_fltr.fwd_id.hw_vsi_id, status);
7248 /* Remove the VSI list since it is no longer used */
7249 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7251 ice_debug(hw, ICE_DBG_SW,
7252 "Failed to remove VSI list %d, error %d\n",
7253 vsi_list_id, status);
7257 LIST_DEL(&vsi_list_info->list_entry);
7258 ice_free(hw, vsi_list_info);
7259 fm_list->vsi_list_info = NULL;
7266 * ice_rem_adv_rule - removes existing advanced switch rule
7267 * @hw: pointer to the hardware structure
7268 * @lkups: information on the words that needs to be looked up. All words
7269 * together makes one recipe
7270 * @lkups_cnt: num of entries in the lkups array
7271 * @rinfo: Its the pointer to the rule information for the rule
7273 * This function can be used to remove 1 rule at a time. The lkups is
7274 * used to describe all the words that forms the "lookup" portion of the
7275 * rule. These words can span multiple protocols. Callers to this function
7276 * need to pass in a list of protocol headers with lookup information along
7277 * and mask that determines which words are valid from the given protocol
7278 * header. rinfo describes other information related to this rule such as
7279 * forwarding IDs, priority of this rule, etc.
7282 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7283 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7285 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7286 struct ice_prot_lkup_ext lkup_exts;
7287 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7288 enum ice_status status = ICE_SUCCESS;
7289 bool remove_rule = false;
7290 u16 i, rid, vsi_handle;
7292 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7293 for (i = 0; i < lkups_cnt; i++) {
7296 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7299 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7304 /* Create any special protocol/offset pairs, such as looking at tunnel
7305 * bits by extracting metadata
7307 status = ice_add_special_words(rinfo, &lkup_exts);
7311 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7312 /* If did not find a recipe that match the existing criteria */
7313 if (rid == ICE_MAX_NUM_RECIPES)
7314 return ICE_ERR_PARAM;
7316 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7317 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7318 /* the rule is already removed */
7321 ice_acquire_lock(rule_lock);
7322 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7324 } else if (list_elem->vsi_count > 1) {
7325 list_elem->vsi_list_info->ref_cnt--;
7326 remove_rule = false;
7327 vsi_handle = rinfo->sw_act.vsi_handle;
7328 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7330 vsi_handle = rinfo->sw_act.vsi_handle;
7331 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7333 ice_release_lock(rule_lock);
7336 if (list_elem->vsi_count == 0)
7339 ice_release_lock(rule_lock);
7341 struct ice_aqc_sw_rules_elem *s_rule;
7344 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7346 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7349 return ICE_ERR_NO_MEMORY;
7350 s_rule->pdata.lkup_tx_rx.act = 0;
7351 s_rule->pdata.lkup_tx_rx.index =
7352 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7353 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7354 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7356 ice_aqc_opc_remove_sw_rules, NULL);
7357 if (status == ICE_SUCCESS) {
7358 ice_acquire_lock(rule_lock);
7359 LIST_DEL(&list_elem->list_entry);
7360 ice_free(hw, list_elem->lkups);
7361 ice_free(hw, list_elem);
7362 ice_release_lock(rule_lock);
7364 ice_free(hw, s_rule);
7370 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7371 * @hw: pointer to the hardware structure
7372 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7374 * This function is used to remove 1 rule at a time. The removal is based on
7375 * the remove_entry parameter. This function will remove rule for a given
7376 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7379 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7380 struct ice_rule_query_data *remove_entry)
7382 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7383 struct LIST_HEAD_TYPE *list_head;
7384 struct ice_adv_rule_info rinfo;
7385 struct ice_switch_info *sw;
7387 sw = hw->switch_info;
7388 if (!sw->recp_list[remove_entry->rid].recp_created)
7389 return ICE_ERR_PARAM;
7390 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7391 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7393 if (list_itr->rule_info.fltr_rule_id ==
7394 remove_entry->rule_id) {
7395 rinfo = list_itr->rule_info;
7396 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7397 return ice_rem_adv_rule(hw, list_itr->lkups,
7398 list_itr->lkups_cnt, &rinfo);
7401 return ICE_ERR_PARAM;
7405 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7407 * @hw: pointer to the hardware structure
7408 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7410 * This function is used to remove all the rules for a given VSI and as soon
7411 * as removing a rule fails, it will return immediately with the error code,
7412 * else it will return ICE_SUCCESS
7414 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7416 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7417 struct ice_vsi_list_map_info *map_info;
7418 struct LIST_HEAD_TYPE *list_head;
7419 struct ice_adv_rule_info rinfo;
7420 struct ice_switch_info *sw;
7421 enum ice_status status;
7422 u16 vsi_list_id = 0;
7425 sw = hw->switch_info;
7426 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7427 if (!sw->recp_list[rid].recp_created)
7429 if (!sw->recp_list[rid].adv_rule)
7431 list_head = &sw->recp_list[rid].filt_rules;
7433 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7434 ice_adv_fltr_mgmt_list_entry, list_entry) {
7435 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7440 rinfo = list_itr->rule_info;
7441 rinfo.sw_act.vsi_handle = vsi_handle;
7442 status = ice_rem_adv_rule(hw, list_itr->lkups,
7443 list_itr->lkups_cnt, &rinfo);
7453 * ice_replay_fltr - Replay all the filters stored by a specific list head
7454 * @hw: pointer to the hardware structure
7455 * @list_head: list for which filters needs to be replayed
7456 * @recp_id: Recipe ID for which rules need to be replayed
7458 static enum ice_status
7459 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7461 struct ice_fltr_mgmt_list_entry *itr;
7462 enum ice_status status = ICE_SUCCESS;
7463 struct ice_sw_recipe *recp_list;
7464 u8 lport = hw->port_info->lport;
7465 struct LIST_HEAD_TYPE l_head;
7467 if (LIST_EMPTY(list_head))
7470 recp_list = &hw->switch_info->recp_list[recp_id];
7471 /* Move entries from the given list_head to a temporary l_head so that
7472 * they can be replayed. Otherwise when trying to re-add the same
7473 * filter, the function will return already exists
7475 LIST_REPLACE_INIT(list_head, &l_head);
7477 /* Mark the given list_head empty by reinitializing it so filters
7478 * could be added again by *handler
7480 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7482 struct ice_fltr_list_entry f_entry;
7484 f_entry.fltr_info = itr->fltr_info;
7485 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7486 status = ice_add_rule_internal(hw, recp_list, lport,
7488 if (status != ICE_SUCCESS)
7493 /* Add a filter per VSI separately */
7498 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7500 if (!ice_is_vsi_valid(hw, vsi_handle))
7503 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7504 f_entry.fltr_info.vsi_handle = vsi_handle;
7505 f_entry.fltr_info.fwd_id.hw_vsi_id =
7506 ice_get_hw_vsi_num(hw, vsi_handle);
7507 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7508 if (recp_id == ICE_SW_LKUP_VLAN)
7509 status = ice_add_vlan_internal(hw, recp_list,
7512 status = ice_add_rule_internal(hw, recp_list,
7515 if (status != ICE_SUCCESS)
7520 /* Clear the filter management list */
7521 ice_rem_sw_rule_info(hw, &l_head);
7526 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7527 * @hw: pointer to the hardware structure
7529 * NOTE: This function does not clean up partially added filters on error.
7530 * It is up to caller of the function to issue a reset or fail early.
7532 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7534 struct ice_switch_info *sw = hw->switch_info;
7535 enum ice_status status = ICE_SUCCESS;
7538 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7539 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7541 status = ice_replay_fltr(hw, i, head);
7542 if (status != ICE_SUCCESS)
7549 * ice_replay_vsi_fltr - Replay filters for requested VSI
7550 * @hw: pointer to the hardware structure
7551 * @pi: pointer to port information structure
7552 * @sw: pointer to switch info struct for which function replays filters
7553 * @vsi_handle: driver VSI handle
7554 * @recp_id: Recipe ID for which rules need to be replayed
7555 * @list_head: list for which filters need to be replayed
7557 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7558 * It is required to pass valid VSI handle.
7560 static enum ice_status
7561 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7562 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
7563 struct LIST_HEAD_TYPE *list_head)
7565 struct ice_fltr_mgmt_list_entry *itr;
7566 enum ice_status status = ICE_SUCCESS;
7567 struct ice_sw_recipe *recp_list;
7570 if (LIST_EMPTY(list_head))
7572 recp_list = &sw->recp_list[recp_id];
7573 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7575 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7577 struct ice_fltr_list_entry f_entry;
7579 f_entry.fltr_info = itr->fltr_info;
7580 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7581 itr->fltr_info.vsi_handle == vsi_handle) {
7582 /* update the src in case it is VSI num */
7583 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7584 f_entry.fltr_info.src = hw_vsi_id;
7585 status = ice_add_rule_internal(hw, recp_list,
7588 if (status != ICE_SUCCESS)
7592 if (!itr->vsi_list_info ||
7593 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7595 /* Clearing it so that the logic can add it back */
7596 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7597 f_entry.fltr_info.vsi_handle = vsi_handle;
7598 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7599 /* update the src in case it is VSI num */
7600 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7601 f_entry.fltr_info.src = hw_vsi_id;
7602 if (recp_id == ICE_SW_LKUP_VLAN)
7603 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7605 status = ice_add_rule_internal(hw, recp_list,
7608 if (status != ICE_SUCCESS)
7616 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7617 * @hw: pointer to the hardware structure
7618 * @vsi_handle: driver VSI handle
7619 * @list_head: list for which filters need to be replayed
7621 * Replay the advanced rule for the given VSI.
7623 static enum ice_status
7624 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7625 struct LIST_HEAD_TYPE *list_head)
7627 struct ice_rule_query_data added_entry = { 0 };
7628 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7629 enum ice_status status = ICE_SUCCESS;
7631 if (LIST_EMPTY(list_head))
7633 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7635 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7636 u16 lk_cnt = adv_fltr->lkups_cnt;
7638 if (vsi_handle != rinfo->sw_act.vsi_handle)
7640 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7649 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7650 * @hw: pointer to the hardware structure
7651 * @pi: pointer to port information structure
7652 * @vsi_handle: driver VSI handle
7654 * Replays filters for requested VSI via vsi_handle.
7657 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7660 struct ice_switch_info *sw = hw->switch_info;
7661 enum ice_status status;
7664 /* Update the recipes that were created */
7665 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7666 struct LIST_HEAD_TYPE *head;
7668 head = &sw->recp_list[i].filt_replay_rules;
7669 if (!sw->recp_list[i].adv_rule)
7670 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
7673 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7674 if (status != ICE_SUCCESS)
7682 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
7683 * @hw: pointer to the HW struct
7684 * @sw: pointer to switch info struct for which function removes filters
7686 * Deletes the filter replay rules for given switch
7688 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
7695 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7696 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7697 struct LIST_HEAD_TYPE *l_head;
7699 l_head = &sw->recp_list[i].filt_replay_rules;
7700 if (!sw->recp_list[i].adv_rule)
7701 ice_rem_sw_rule_info(hw, l_head);
7703 ice_rem_adv_rule_info(hw, l_head);
7709 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7710 * @hw: pointer to the HW struct
7712 * Deletes the filter replay rules.
7714 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7716 ice_rm_sw_replay_rule_info(hw, hw->switch_info);