1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x08, 0x00, 0x00,
462 0x00, 0x00, /* 2 bytes for 4 byte alignment */
465 /* C-tag (802.1Q): IPv6 + UDP */
466 static const struct ice_dummy_pkt_offsets
467 dummy_vlan_udp_ipv6_packet_offsets[] = {
469 { ICE_ETYPE_OL, 12 },
470 { ICE_VLAN_OFOS, 14 },
471 { ICE_IPV6_OFOS, 18 },
472 { ICE_UDP_ILOS, 58 },
473 { ICE_PROTOCOL_LAST, 0 },
476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
477 static const u8 dummy_vlan_udp_ipv6_packet[] = {
478 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
482 0x81, 0x00, /* ICE_ETYPE_OL 12 */
484 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
487 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
498 0x00, 0x08, 0x00, 0x00,
500 0x00, 0x00, /* 2 bytes for 4 byte alignment */
503 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
505 { ICE_IPV4_OFOS, 14 },
508 { ICE_PROTOCOL_LAST, 0 },
511 static const u8 dummy_udp_gtp_packet[] = {
512 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
517 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
518 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x11, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
524 0x00, 0x1c, 0x00, 0x00,
526 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
527 0x00, 0x00, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x85,
530 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
531 0x00, 0x00, 0x00, 0x00,
534 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
536 { ICE_ETYPE_OL, 12 },
537 { ICE_VLAN_OFOS, 14},
539 { ICE_PROTOCOL_LAST, 0 },
542 static const u8 dummy_pppoe_ipv4_packet[] = {
543 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x81, 0x00, /* ICE_ETYPE_OL 12 */
549 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
551 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
554 0x00, 0x21, /* PPP Link Layer 24 */
556 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
559 0x00, 0x00, 0x00, 0x00,
560 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
565 static const u8 dummy_pppoe_ipv6_packet[] = {
566 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
567 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00,
570 0x81, 0x00, /* ICE_ETYPE_OL 12 */
572 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
574 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
577 0x00, 0x57, /* PPP Link Layer 24 */
579 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
593 /* this is a recipe to profile association bitmap */
594 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
595 ICE_MAX_NUM_PROFILES);
597 /* this is a profile to recipe association bitmap */
598 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
599 ICE_MAX_NUM_RECIPES);
601 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
604 * ice_collect_result_idx - copy result index values
605 * @buf: buffer that contains the result index
606 * @recp: the recipe struct to copy data into
608 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
609 struct ice_sw_recipe *recp)
611 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
612 ice_set_bit(buf->content.result_indx &
613 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
617 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
618 * @hw: pointer to hardware structure
619 * @recps: struct that we need to populate
620 * @rid: recipe ID that we are populating
621 * @refresh_required: true if we should get recipe to profile mapping from FW
623 * This function is used to populate all the necessary entries into our
624 * bookkeeping so that we have a current list of all the recipes that are
625 * programmed in the firmware.
627 static enum ice_status
628 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
629 bool *refresh_required)
631 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
632 struct ice_aqc_recipe_data_elem *tmp;
633 u16 num_recps = ICE_MAX_NUM_RECIPES;
634 struct ice_prot_lkup_ext *lkup_exts;
635 enum ice_status status;
639 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
641 /* we need a buffer big enough to accommodate all the recipes */
642 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
643 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
645 return ICE_ERR_NO_MEMORY;
647 tmp[0].recipe_indx = rid;
648 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
649 /* non-zero status meaning recipe doesn't exist */
653 /* Get recipe to profile map so that we can get the fv from lkups that
654 * we read for a recipe from FW. Since we want to minimize the number of
655 * times we make this FW call, just make one call and cache the copy
656 * until a new recipe is added. This operation is only required the
657 * first time to get the changes from FW. Then to search existing
658 * entries we don't need to update the cache again until another recipe
661 if (*refresh_required) {
662 ice_get_recp_to_prof_map(hw);
663 *refresh_required = false;
666 /* Start populating all the entries for recps[rid] based on lkups from
667 * firmware. Note that we are only creating the root recipe in our
670 lkup_exts = &recps[rid].lkup_exts;
672 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
673 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
674 struct ice_recp_grp_entry *rg_entry;
675 u8 i, prof, idx, prot = 0;
679 rg_entry = (struct ice_recp_grp_entry *)
680 ice_malloc(hw, sizeof(*rg_entry));
682 status = ICE_ERR_NO_MEMORY;
686 idx = root_bufs.recipe_indx;
687 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
689 /* Mark all result indices in this chain */
690 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
691 ice_set_bit(root_bufs.content.result_indx &
692 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
694 /* get the first profile that is associated with rid */
695 prof = ice_find_first_bit(recipe_to_profile[idx],
696 ICE_MAX_NUM_PROFILES);
697 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
698 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
700 rg_entry->fv_idx[i] = lkup_indx;
701 rg_entry->fv_mask[i] =
702 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
704 /* If the recipe is a chained recipe then all its
705 * child recipe's result will have a result index.
706 * To fill fv_words we should not use those result
707 * index, we only need the protocol ids and offsets.
708 * We will skip all the fv_idx which stores result
709 * index in them. We also need to skip any fv_idx which
710 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
711 * valid offset value.
713 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
714 rg_entry->fv_idx[i]) ||
715 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
716 rg_entry->fv_idx[i] == 0)
719 ice_find_prot_off(hw, ICE_BLK_SW, prof,
720 rg_entry->fv_idx[i], &prot, &off);
721 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
722 lkup_exts->fv_words[fv_word_idx].off = off;
725 /* populate rg_list with the data from the child entry of this
728 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
730 /* Propagate some data to the recipe database */
731 recps[idx].is_root = !!is_root;
732 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
733 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
734 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
735 recps[idx].chain_idx = root_bufs.content.result_indx &
736 ~ICE_AQ_RECIPE_RESULT_EN;
737 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
739 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
745 /* Only do the following for root recipes entries */
746 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
747 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
748 recps[idx].root_rid = root_bufs.content.rid &
749 ~ICE_AQ_RECIPE_ID_IS_ROOT;
750 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
753 /* Complete initialization of the root recipe entry */
754 lkup_exts->n_val_words = fv_word_idx;
755 recps[rid].big_recp = (num_recps > 1);
756 recps[rid].n_grp_count = (u8)num_recps;
757 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
758 ice_memdup(hw, tmp, recps[rid].n_grp_count *
759 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
760 if (!recps[rid].root_buf)
763 /* Copy result indexes */
764 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
765 recps[rid].recp_created = true;
773 * ice_get_recp_to_prof_map - updates recipe to profile mapping
774 * @hw: pointer to hardware structure
776 * This function is used to populate recipe_to_profile matrix where index to
777 * this array is the recipe ID and the element is the mapping of which profiles
778 * is this recipe mapped to.
781 ice_get_recp_to_prof_map(struct ice_hw *hw)
783 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
786 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
789 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
790 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
791 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
793 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
794 ICE_MAX_NUM_RECIPES);
795 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
796 if (ice_is_bit_set(r_bitmap, j))
797 ice_set_bit(i, recipe_to_profile[j]);
802 * ice_init_def_sw_recp - initialize the recipe book keeping tables
803 * @hw: pointer to the HW struct
804 * @recp_list: pointer to sw recipe list
806 * Allocate memory for the entire recipe table and initialize the structures/
807 * entries corresponding to basic recipes.
810 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
812 struct ice_sw_recipe *recps;
815 recps = (struct ice_sw_recipe *)
816 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
818 return ICE_ERR_NO_MEMORY;
820 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
821 recps[i].root_rid = i;
822 INIT_LIST_HEAD(&recps[i].filt_rules);
823 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
824 INIT_LIST_HEAD(&recps[i].rg_list);
825 ice_init_lock(&recps[i].filt_rule_lock);
834 * ice_aq_get_sw_cfg - get switch configuration
835 * @hw: pointer to the hardware structure
836 * @buf: pointer to the result buffer
837 * @buf_size: length of the buffer available for response
838 * @req_desc: pointer to requested descriptor
839 * @num_elems: pointer to number of elements
840 * @cd: pointer to command details structure or NULL
842 * Get switch configuration (0x0200) to be placed in 'buff'.
843 * This admin command returns information such as initial VSI/port number
844 * and switch ID it belongs to.
846 * NOTE: *req_desc is both an input/output parameter.
847 * The caller of this function first calls this function with *request_desc set
848 * to 0. If the response from f/w has *req_desc set to 0, all the switch
849 * configuration information has been returned; if non-zero (meaning not all
850 * the information was returned), the caller should call this function again
851 * with *req_desc set to the previous value returned by f/w to get the
852 * next block of switch configuration information.
854 * *num_elems is output only parameter. This reflects the number of elements
855 * in response buffer. The caller of this function to use *num_elems while
856 * parsing the response buffer.
858 static enum ice_status
859 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
860 u16 buf_size, u16 *req_desc, u16 *num_elems,
861 struct ice_sq_cd *cd)
863 struct ice_aqc_get_sw_cfg *cmd;
864 enum ice_status status;
865 struct ice_aq_desc desc;
867 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
868 cmd = &desc.params.get_sw_conf;
869 cmd->element = CPU_TO_LE16(*req_desc);
871 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
873 *req_desc = LE16_TO_CPU(cmd->element);
874 *num_elems = LE16_TO_CPU(cmd->num_elems);
881 * ice_alloc_sw - allocate resources specific to switch
882 * @hw: pointer to the HW struct
883 * @ena_stats: true to turn on VEB stats
884 * @shared_res: true for shared resource, false for dedicated resource
885 * @sw_id: switch ID returned
886 * @counter_id: VEB counter ID returned
888 * allocates switch resources (SWID and VEB counter) (0x0208)
891 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
894 struct ice_aqc_alloc_free_res_elem *sw_buf;
895 struct ice_aqc_res_elem *sw_ele;
896 enum ice_status status;
899 buf_len = sizeof(*sw_buf);
900 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
901 ice_malloc(hw, buf_len);
903 return ICE_ERR_NO_MEMORY;
905 /* Prepare buffer for switch ID.
906 * The number of resource entries in buffer is passed as 1 since only a
907 * single switch/VEB instance is allocated, and hence a single sw_id
910 sw_buf->num_elems = CPU_TO_LE16(1);
912 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
913 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
914 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
916 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
917 ice_aqc_opc_alloc_res, NULL);
920 goto ice_alloc_sw_exit;
922 sw_ele = &sw_buf->elem[0];
923 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
926 /* Prepare buffer for VEB Counter */
927 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
928 struct ice_aqc_alloc_free_res_elem *counter_buf;
929 struct ice_aqc_res_elem *counter_ele;
931 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
932 ice_malloc(hw, buf_len);
934 status = ICE_ERR_NO_MEMORY;
935 goto ice_alloc_sw_exit;
938 /* The number of resource entries in buffer is passed as 1 since
939 * only a single switch/VEB instance is allocated, and hence a
940 * single VEB counter is requested.
942 counter_buf->num_elems = CPU_TO_LE16(1);
943 counter_buf->res_type =
944 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
945 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
946 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
950 ice_free(hw, counter_buf);
951 goto ice_alloc_sw_exit;
953 counter_ele = &counter_buf->elem[0];
954 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
955 ice_free(hw, counter_buf);
959 ice_free(hw, sw_buf);
964 * ice_free_sw - free resources specific to switch
965 * @hw: pointer to the HW struct
966 * @sw_id: switch ID returned
967 * @counter_id: VEB counter ID returned
969 * free switch resources (SWID and VEB counter) (0x0209)
971 * NOTE: This function frees multiple resources. It continues
972 * releasing other resources even after it encounters error.
973 * The error code returned is the last error it encountered.
975 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
977 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
978 enum ice_status status, ret_status;
981 buf_len = sizeof(*sw_buf);
982 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
983 ice_malloc(hw, buf_len);
985 return ICE_ERR_NO_MEMORY;
987 /* Prepare buffer to free for switch ID res.
988 * The number of resource entries in buffer is passed as 1 since only a
989 * single switch/VEB instance is freed, and hence a single sw_id
992 sw_buf->num_elems = CPU_TO_LE16(1);
993 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
994 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
996 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
997 ice_aqc_opc_free_res, NULL);
1000 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1002 /* Prepare buffer to free for VEB Counter resource */
1003 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1004 ice_malloc(hw, buf_len);
1006 ice_free(hw, sw_buf);
1007 return ICE_ERR_NO_MEMORY;
1010 /* The number of resource entries in buffer is passed as 1 since only a
1011 * single switch/VEB instance is freed, and hence a single VEB counter
1014 counter_buf->num_elems = CPU_TO_LE16(1);
1015 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1016 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1018 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1019 ice_aqc_opc_free_res, NULL);
1021 ice_debug(hw, ICE_DBG_SW,
1022 "VEB counter resource could not be freed\n");
1023 ret_status = status;
1026 ice_free(hw, counter_buf);
1027 ice_free(hw, sw_buf);
1033 * @hw: pointer to the HW struct
1034 * @vsi_ctx: pointer to a VSI context struct
1035 * @cd: pointer to command details structure or NULL
1037 * Add a VSI context to the hardware (0x0210)
1040 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1041 struct ice_sq_cd *cd)
1043 struct ice_aqc_add_update_free_vsi_resp *res;
1044 struct ice_aqc_add_get_update_free_vsi *cmd;
1045 struct ice_aq_desc desc;
1046 enum ice_status status;
1048 cmd = &desc.params.vsi_cmd;
1049 res = &desc.params.add_update_free_vsi_res;
1051 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1053 if (!vsi_ctx->alloc_from_pool)
1054 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1055 ICE_AQ_VSI_IS_VALID);
1057 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1059 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1061 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1062 sizeof(vsi_ctx->info), cd);
1065 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1066 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1067 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1075 * @hw: pointer to the HW struct
1076 * @vsi_ctx: pointer to a VSI context struct
1077 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1078 * @cd: pointer to command details structure or NULL
1080 * Free VSI context info from hardware (0x0213)
1083 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1084 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1086 struct ice_aqc_add_update_free_vsi_resp *resp;
1087 struct ice_aqc_add_get_update_free_vsi *cmd;
1088 struct ice_aq_desc desc;
1089 enum ice_status status;
1091 cmd = &desc.params.vsi_cmd;
1092 resp = &desc.params.add_update_free_vsi_res;
1094 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1096 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1098 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1100 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1102 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1103 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1111 * @hw: pointer to the HW struct
1112 * @vsi_ctx: pointer to a VSI context struct
1113 * @cd: pointer to command details structure or NULL
1115 * Update VSI context in the hardware (0x0211)
1118 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1119 struct ice_sq_cd *cd)
1121 struct ice_aqc_add_update_free_vsi_resp *resp;
1122 struct ice_aqc_add_get_update_free_vsi *cmd;
1123 struct ice_aq_desc desc;
1124 enum ice_status status;
1126 cmd = &desc.params.vsi_cmd;
1127 resp = &desc.params.add_update_free_vsi_res;
1129 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1131 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1133 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1135 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1136 sizeof(vsi_ctx->info), cd);
1139 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1140 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1147 * ice_is_vsi_valid - check whether the VSI is valid or not
1148 * @hw: pointer to the HW struct
1149 * @vsi_handle: VSI handle
1151 * check whether the VSI is valid or not
1153 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1155 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1159 * ice_get_hw_vsi_num - return the HW VSI number
1160 * @hw: pointer to the HW struct
1161 * @vsi_handle: VSI handle
1163 * return the HW VSI number
1164 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1166 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1168 return hw->vsi_ctx[vsi_handle]->vsi_num;
1172 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1173 * @hw: pointer to the HW struct
1174 * @vsi_handle: VSI handle
1176 * return the VSI context entry for a given VSI handle
1178 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1180 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1184 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1185 * @hw: pointer to the HW struct
1186 * @vsi_handle: VSI handle
1187 * @vsi: VSI context pointer
1189 * save the VSI context entry for a given VSI handle
1192 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1194 hw->vsi_ctx[vsi_handle] = vsi;
1198 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1199 * @hw: pointer to the HW struct
1200 * @vsi_handle: VSI handle
1202 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1204 struct ice_vsi_ctx *vsi;
1207 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1210 ice_for_each_traffic_class(i) {
1211 if (vsi->lan_q_ctx[i]) {
1212 ice_free(hw, vsi->lan_q_ctx[i]);
1213 vsi->lan_q_ctx[i] = NULL;
1219 * ice_clear_vsi_ctx - clear the VSI context entry
1220 * @hw: pointer to the HW struct
1221 * @vsi_handle: VSI handle
1223 * clear the VSI context entry
1225 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1227 struct ice_vsi_ctx *vsi;
1229 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1231 ice_clear_vsi_q_ctx(hw, vsi_handle);
1233 hw->vsi_ctx[vsi_handle] = NULL;
1238 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1239 * @hw: pointer to the HW struct
1241 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1245 for (i = 0; i < ICE_MAX_VSI; i++)
1246 ice_clear_vsi_ctx(hw, i);
1250 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1251 * @hw: pointer to the HW struct
1252 * @vsi_handle: unique VSI handle provided by drivers
1253 * @vsi_ctx: pointer to a VSI context struct
1254 * @cd: pointer to command details structure or NULL
1256 * Add a VSI context to the hardware also add it into the VSI handle list.
1257 * If this function gets called after reset for existing VSIs then update
1258 * with the new HW VSI number in the corresponding VSI handle list entry.
1261 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1262 struct ice_sq_cd *cd)
1264 struct ice_vsi_ctx *tmp_vsi_ctx;
1265 enum ice_status status;
1267 if (vsi_handle >= ICE_MAX_VSI)
1268 return ICE_ERR_PARAM;
1269 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1272 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1274 /* Create a new VSI context */
1275 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1276 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1278 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1279 return ICE_ERR_NO_MEMORY;
1281 *tmp_vsi_ctx = *vsi_ctx;
1283 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1285 /* update with new HW VSI num */
1286 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1293 * ice_free_vsi- free VSI context from hardware and VSI handle list
1294 * @hw: pointer to the HW struct
1295 * @vsi_handle: unique VSI handle
1296 * @vsi_ctx: pointer to a VSI context struct
1297 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1298 * @cd: pointer to command details structure or NULL
1300 * Free VSI context info from hardware as well as from VSI handle list
1303 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1304 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1306 enum ice_status status;
1308 if (!ice_is_vsi_valid(hw, vsi_handle))
1309 return ICE_ERR_PARAM;
1310 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1311 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1313 ice_clear_vsi_ctx(hw, vsi_handle);
1319 * @hw: pointer to the HW struct
1320 * @vsi_handle: unique VSI handle
1321 * @vsi_ctx: pointer to a VSI context struct
1322 * @cd: pointer to command details structure or NULL
1324 * Update VSI context in the hardware
1327 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1328 struct ice_sq_cd *cd)
1330 if (!ice_is_vsi_valid(hw, vsi_handle))
1331 return ICE_ERR_PARAM;
1332 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1333 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1337 * ice_aq_get_vsi_params
1338 * @hw: pointer to the HW struct
1339 * @vsi_ctx: pointer to a VSI context struct
1340 * @cd: pointer to command details structure or NULL
1342 * Get VSI context info from hardware (0x0212)
1345 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1346 struct ice_sq_cd *cd)
1348 struct ice_aqc_add_get_update_free_vsi *cmd;
1349 struct ice_aqc_get_vsi_resp *resp;
1350 struct ice_aq_desc desc;
1351 enum ice_status status;
1353 cmd = &desc.params.vsi_cmd;
1354 resp = &desc.params.get_vsi_resp;
1356 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1358 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1360 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1361 sizeof(vsi_ctx->info), cd);
1363 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1365 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1366 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1373 * ice_aq_add_update_mir_rule - add/update a mirror rule
1374 * @hw: pointer to the HW struct
1375 * @rule_type: Rule Type
1376 * @dest_vsi: VSI number to which packets will be mirrored
1377 * @count: length of the list
1378 * @mr_buf: buffer for list of mirrored VSI numbers
1379 * @cd: pointer to command details structure or NULL
1382 * Add/Update Mirror Rule (0x260).
1385 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1386 u16 count, struct ice_mir_rule_buf *mr_buf,
1387 struct ice_sq_cd *cd, u16 *rule_id)
1389 struct ice_aqc_add_update_mir_rule *cmd;
1390 struct ice_aq_desc desc;
1391 enum ice_status status;
1392 __le16 *mr_list = NULL;
1395 switch (rule_type) {
1396 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1397 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1398 /* Make sure count and mr_buf are set for these rule_types */
1399 if (!(count && mr_buf))
1400 return ICE_ERR_PARAM;
1402 buf_size = count * sizeof(__le16);
1403 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1405 return ICE_ERR_NO_MEMORY;
1407 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1408 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1409 /* Make sure count and mr_buf are not set for these
1412 if (count || mr_buf)
1413 return ICE_ERR_PARAM;
1416 ice_debug(hw, ICE_DBG_SW,
1417 "Error due to unsupported rule_type %u\n", rule_type);
1418 return ICE_ERR_OUT_OF_RANGE;
1421 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1423 /* Pre-process 'mr_buf' items for add/update of virtual port
1424 * ingress/egress mirroring (but not physical port ingress/egress
1430 for (i = 0; i < count; i++) {
1433 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1435 /* Validate specified VSI number, make sure it is less
1436 * than ICE_MAX_VSI, if not return with error.
1438 if (id >= ICE_MAX_VSI) {
1439 ice_debug(hw, ICE_DBG_SW,
1440 "Error VSI index (%u) out-of-range\n",
1442 ice_free(hw, mr_list);
1443 return ICE_ERR_OUT_OF_RANGE;
1446 /* add VSI to mirror rule */
1449 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1450 else /* remove VSI from mirror rule */
1451 mr_list[i] = CPU_TO_LE16(id);
1455 cmd = &desc.params.add_update_rule;
1456 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1457 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1458 ICE_AQC_RULE_ID_VALID_M);
1459 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1460 cmd->num_entries = CPU_TO_LE16(count);
1461 cmd->dest = CPU_TO_LE16(dest_vsi);
1463 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1465 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1467 ice_free(hw, mr_list);
1473 * ice_aq_delete_mir_rule - delete a mirror rule
1474 * @hw: pointer to the HW struct
1475 * @rule_id: Mirror rule ID (to be deleted)
1476 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1477 * otherwise it is returned to the shared pool
1478 * @cd: pointer to command details structure or NULL
1480 * Delete Mirror Rule (0x261).
1483 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1484 struct ice_sq_cd *cd)
1486 struct ice_aqc_delete_mir_rule *cmd;
1487 struct ice_aq_desc desc;
1489 /* rule_id should be in the range 0...63 */
1490 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1491 return ICE_ERR_OUT_OF_RANGE;
1493 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1495 cmd = &desc.params.del_rule;
1496 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1497 cmd->rule_id = CPU_TO_LE16(rule_id);
1500 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1502 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1506 * ice_aq_alloc_free_vsi_list
1507 * @hw: pointer to the HW struct
1508 * @vsi_list_id: VSI list ID returned or used for lookup
1509 * @lkup_type: switch rule filter lookup type
1510 * @opc: switch rules population command type - pass in the command opcode
1512 * allocates or free a VSI list resource
1514 static enum ice_status
1515 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1516 enum ice_sw_lkup_type lkup_type,
1517 enum ice_adminq_opc opc)
1519 struct ice_aqc_alloc_free_res_elem *sw_buf;
1520 struct ice_aqc_res_elem *vsi_ele;
1521 enum ice_status status;
1524 buf_len = sizeof(*sw_buf);
1525 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1526 ice_malloc(hw, buf_len);
1528 return ICE_ERR_NO_MEMORY;
1529 sw_buf->num_elems = CPU_TO_LE16(1);
1531 if (lkup_type == ICE_SW_LKUP_MAC ||
1532 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1533 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1534 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1535 lkup_type == ICE_SW_LKUP_PROMISC ||
1536 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1537 lkup_type == ICE_SW_LKUP_LAST) {
1538 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1539 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1541 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1543 status = ICE_ERR_PARAM;
1544 goto ice_aq_alloc_free_vsi_list_exit;
1547 if (opc == ice_aqc_opc_free_res)
1548 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1550 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1552 goto ice_aq_alloc_free_vsi_list_exit;
1554 if (opc == ice_aqc_opc_alloc_res) {
1555 vsi_ele = &sw_buf->elem[0];
1556 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1559 ice_aq_alloc_free_vsi_list_exit:
1560 ice_free(hw, sw_buf);
1565 * ice_aq_set_storm_ctrl - Sets storm control configuration
1566 * @hw: pointer to the HW struct
1567 * @bcast_thresh: represents the upper threshold for broadcast storm control
1568 * @mcast_thresh: represents the upper threshold for multicast storm control
1569 * @ctl_bitmask: storm control control knobs
1571 * Sets the storm control configuration (0x0280)
1574 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1577 struct ice_aqc_storm_cfg *cmd;
1578 struct ice_aq_desc desc;
1580 cmd = &desc.params.storm_conf;
1582 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1584 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1585 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1586 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1588 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1592 * ice_aq_get_storm_ctrl - gets storm control configuration
1593 * @hw: pointer to the HW struct
1594 * @bcast_thresh: represents the upper threshold for broadcast storm control
1595 * @mcast_thresh: represents the upper threshold for multicast storm control
1596 * @ctl_bitmask: storm control control knobs
1598 * Gets the storm control configuration (0x0281)
1601 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1604 enum ice_status status;
1605 struct ice_aq_desc desc;
1607 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1609 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1611 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1614 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1617 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1620 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1627 * ice_aq_sw_rules - add/update/remove switch rules
1628 * @hw: pointer to the HW struct
1629 * @rule_list: pointer to switch rule population list
1630 * @rule_list_sz: total size of the rule list in bytes
1631 * @num_rules: number of switch rules in the rule_list
1632 * @opc: switch rules population command type - pass in the command opcode
1633 * @cd: pointer to command details structure or NULL
1635 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1637 static enum ice_status
1638 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1639 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1641 struct ice_aq_desc desc;
1643 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1645 if (opc != ice_aqc_opc_add_sw_rules &&
1646 opc != ice_aqc_opc_update_sw_rules &&
1647 opc != ice_aqc_opc_remove_sw_rules)
1648 return ICE_ERR_PARAM;
1650 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1652 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1653 desc.params.sw_rules.num_rules_fltr_entry_index =
1654 CPU_TO_LE16(num_rules);
1655 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1659 * ice_aq_add_recipe - add switch recipe
1660 * @hw: pointer to the HW struct
1661 * @s_recipe_list: pointer to switch rule population list
1662 * @num_recipes: number of switch recipes in the list
1663 * @cd: pointer to command details structure or NULL
1668 ice_aq_add_recipe(struct ice_hw *hw,
1669 struct ice_aqc_recipe_data_elem *s_recipe_list,
1670 u16 num_recipes, struct ice_sq_cd *cd)
1672 struct ice_aqc_add_get_recipe *cmd;
1673 struct ice_aq_desc desc;
1676 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1677 cmd = &desc.params.add_get_recipe;
1678 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1680 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1681 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1683 buf_size = num_recipes * sizeof(*s_recipe_list);
1685 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1689 * ice_aq_get_recipe - get switch recipe
1690 * @hw: pointer to the HW struct
1691 * @s_recipe_list: pointer to switch rule population list
1692 * @num_recipes: pointer to the number of recipes (input and output)
1693 * @recipe_root: root recipe number of recipe(s) to retrieve
1694 * @cd: pointer to command details structure or NULL
1698 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1699 * On output, *num_recipes will equal the number of entries returned in
1702 * The caller must supply enough space in s_recipe_list to hold all possible
1703 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1706 ice_aq_get_recipe(struct ice_hw *hw,
1707 struct ice_aqc_recipe_data_elem *s_recipe_list,
1708 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1710 struct ice_aqc_add_get_recipe *cmd;
1711 struct ice_aq_desc desc;
1712 enum ice_status status;
1715 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1716 return ICE_ERR_PARAM;
1718 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1719 cmd = &desc.params.add_get_recipe;
1720 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1722 cmd->return_index = CPU_TO_LE16(recipe_root);
1723 cmd->num_sub_recipes = 0;
1725 buf_size = *num_recipes * sizeof(*s_recipe_list);
1727 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1728 /* cppcheck-suppress constArgument */
1729 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1735 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1736 * @hw: pointer to the HW struct
1737 * @profile_id: package profile ID to associate the recipe with
1738 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1739 * @cd: pointer to command details structure or NULL
1740 * Recipe to profile association (0x0291)
1743 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1744 struct ice_sq_cd *cd)
1746 struct ice_aqc_recipe_to_profile *cmd;
1747 struct ice_aq_desc desc;
1749 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1750 cmd = &desc.params.recipe_to_profile;
1751 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1752 cmd->profile_id = CPU_TO_LE16(profile_id);
1753 /* Set the recipe ID bit in the bitmask to let the device know which
1754 * profile we are associating the recipe to
1756 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1757 ICE_NONDMA_TO_NONDMA);
1759 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1763 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1764 * @hw: pointer to the HW struct
1765 * @profile_id: package profile ID to associate the recipe with
1766 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1767 * @cd: pointer to command details structure or NULL
1768 * Associate profile ID with given recipe (0x0293)
1771 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1772 struct ice_sq_cd *cd)
1774 struct ice_aqc_recipe_to_profile *cmd;
1775 struct ice_aq_desc desc;
1776 enum ice_status status;
1778 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1779 cmd = &desc.params.recipe_to_profile;
1780 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1781 cmd->profile_id = CPU_TO_LE16(profile_id);
1783 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1785 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1786 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1792 * ice_alloc_recipe - add recipe resource
1793 * @hw: pointer to the hardware structure
1794 * @rid: recipe ID returned as response to AQ call
1796 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1798 struct ice_aqc_alloc_free_res_elem *sw_buf;
1799 enum ice_status status;
1802 buf_len = sizeof(*sw_buf);
1803 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1805 return ICE_ERR_NO_MEMORY;
1807 sw_buf->num_elems = CPU_TO_LE16(1);
1808 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1809 ICE_AQC_RES_TYPE_S) |
1810 ICE_AQC_RES_TYPE_FLAG_SHARED);
1811 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1812 ice_aqc_opc_alloc_res, NULL);
1814 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1815 ice_free(hw, sw_buf);
1820 /* ice_init_port_info - Initialize port_info with switch configuration data
1821 * @pi: pointer to port_info
1822 * @vsi_port_num: VSI number or port number
1823 * @type: Type of switch element (port or VSI)
1824 * @swid: switch ID of the switch the element is attached to
1825 * @pf_vf_num: PF or VF number
1826 * @is_vf: true if the element is a VF, false otherwise
1829 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1830 u16 swid, u16 pf_vf_num, bool is_vf)
1833 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1834 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1836 pi->pf_vf_num = pf_vf_num;
1838 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1839 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1842 ice_debug(pi->hw, ICE_DBG_SW,
1843 "incorrect VSI/port type received\n");
1848 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1849 * @hw: pointer to the hardware structure
1851 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1853 struct ice_aqc_get_sw_cfg_resp *rbuf;
1854 enum ice_status status;
1861 num_total_ports = 1;
1863 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1864 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1867 return ICE_ERR_NO_MEMORY;
1869 /* Multiple calls to ice_aq_get_sw_cfg may be required
1870 * to get all the switch configuration information. The need
1871 * for additional calls is indicated by ice_aq_get_sw_cfg
1872 * writing a non-zero value in req_desc
1875 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1876 &req_desc, &num_elems, NULL);
1881 for (i = 0; i < num_elems; i++) {
1882 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1883 u16 pf_vf_num, swid, vsi_port_num;
1887 ele = rbuf[i].elements;
1888 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1889 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1891 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1892 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1894 swid = LE16_TO_CPU(ele->swid);
1896 if (LE16_TO_CPU(ele->pf_vf_num) &
1897 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1900 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
1901 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1904 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1905 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1906 if (j == num_total_ports) {
1907 ice_debug(hw, ICE_DBG_SW,
1908 "more ports than expected\n");
1909 status = ICE_ERR_CFG;
1912 ice_init_port_info(hw->port_info,
1913 vsi_port_num, res_type, swid,
1921 } while (req_desc && !status);
1924 ice_free(hw, (void *)rbuf);
1929 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1930 * @hw: pointer to the hardware structure
1931 * @fi: filter info structure to fill/update
1933 * This helper function populates the lb_en and lan_en elements of the provided
1934 * ice_fltr_info struct using the switch's type and characteristics of the
1935 * switch rule being configured.
1937 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1941 if ((fi->flag & ICE_FLTR_TX) &&
1942 (fi->fltr_act == ICE_FWD_TO_VSI ||
1943 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1944 fi->fltr_act == ICE_FWD_TO_Q ||
1945 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1946 /* Setting LB for prune actions will result in replicated
1947 * packets to the internal switch that will be dropped.
1949 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1952 /* Set lan_en to TRUE if
1953 * 1. The switch is a VEB AND
1955 * 2.1 The lookup is a directional lookup like ethertype,
1956 * promiscuous, ethertype-MAC, promiscuous-VLAN
1957 * and default-port OR
1958 * 2.2 The lookup is VLAN, OR
1959 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1960 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1964 * The switch is a VEPA.
1966 * In all other cases, the LAN enable has to be set to false.
1969 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1970 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1971 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1972 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1973 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1974 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1975 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1976 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1977 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1978 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1987 * ice_fill_sw_rule - Helper function to fill switch rule structure
1988 * @hw: pointer to the hardware structure
1989 * @f_info: entry containing packet forwarding information
1990 * @s_rule: switch rule structure to be filled in based on mac_entry
1991 * @opc: switch rules population command type - pass in the command opcode
1994 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1995 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1997 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2005 if (opc == ice_aqc_opc_remove_sw_rules) {
2006 s_rule->pdata.lkup_tx_rx.act = 0;
2007 s_rule->pdata.lkup_tx_rx.index =
2008 CPU_TO_LE16(f_info->fltr_rule_id);
2009 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2013 eth_hdr_sz = sizeof(dummy_eth_header);
2014 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2016 /* initialize the ether header with a dummy header */
2017 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2018 ice_fill_sw_info(hw, f_info);
2020 switch (f_info->fltr_act) {
2021 case ICE_FWD_TO_VSI:
2022 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2023 ICE_SINGLE_ACT_VSI_ID_M;
2024 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2025 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2026 ICE_SINGLE_ACT_VALID_BIT;
2028 case ICE_FWD_TO_VSI_LIST:
2029 act |= ICE_SINGLE_ACT_VSI_LIST;
2030 act |= (f_info->fwd_id.vsi_list_id <<
2031 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2032 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2033 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2034 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2035 ICE_SINGLE_ACT_VALID_BIT;
2038 act |= ICE_SINGLE_ACT_TO_Q;
2039 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2040 ICE_SINGLE_ACT_Q_INDEX_M;
2042 case ICE_DROP_PACKET:
2043 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2044 ICE_SINGLE_ACT_VALID_BIT;
2046 case ICE_FWD_TO_QGRP:
2047 q_rgn = f_info->qgrp_size > 0 ?
2048 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2049 act |= ICE_SINGLE_ACT_TO_Q;
2050 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2051 ICE_SINGLE_ACT_Q_INDEX_M;
2052 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2053 ICE_SINGLE_ACT_Q_REGION_M;
2060 act |= ICE_SINGLE_ACT_LB_ENABLE;
2062 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2064 switch (f_info->lkup_type) {
2065 case ICE_SW_LKUP_MAC:
2066 daddr = f_info->l_data.mac.mac_addr;
2068 case ICE_SW_LKUP_VLAN:
2069 vlan_id = f_info->l_data.vlan.vlan_id;
2070 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2071 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2072 act |= ICE_SINGLE_ACT_PRUNE;
2073 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2076 case ICE_SW_LKUP_ETHERTYPE_MAC:
2077 daddr = f_info->l_data.ethertype_mac.mac_addr;
2079 case ICE_SW_LKUP_ETHERTYPE:
2080 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2081 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2083 case ICE_SW_LKUP_MAC_VLAN:
2084 daddr = f_info->l_data.mac_vlan.mac_addr;
2085 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2087 case ICE_SW_LKUP_PROMISC_VLAN:
2088 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2090 case ICE_SW_LKUP_PROMISC:
2091 daddr = f_info->l_data.mac_vlan.mac_addr;
2097 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2098 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2099 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2101 /* Recipe set depending on lookup type */
2102 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2103 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2104 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2107 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2108 ICE_NONDMA_TO_NONDMA);
2110 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2111 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2112 *off = CPU_TO_BE16(vlan_id);
2115 /* Create the switch rule with the final dummy Ethernet header */
2116 if (opc != ice_aqc_opc_update_sw_rules)
2117 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2121 * ice_add_marker_act
2122 * @hw: pointer to the hardware structure
2123 * @m_ent: the management entry for which sw marker needs to be added
2124 * @sw_marker: sw marker to tag the Rx descriptor with
2125 * @l_id: large action resource ID
2127 * Create a large action to hold software marker and update the switch rule
2128 * entry pointed by m_ent with newly created large action
2130 static enum ice_status
2131 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2132 u16 sw_marker, u16 l_id)
2134 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2135 /* For software marker we need 3 large actions
2136 * 1. FWD action: FWD TO VSI or VSI LIST
2137 * 2. GENERIC VALUE action to hold the profile ID
2138 * 3. GENERIC VALUE action to hold the software marker ID
2140 const u16 num_lg_acts = 3;
2141 enum ice_status status;
2147 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2148 return ICE_ERR_PARAM;
2150 /* Create two back-to-back switch rules and submit them to the HW using
2151 * one memory buffer:
2155 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2156 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2157 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2159 return ICE_ERR_NO_MEMORY;
2161 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2163 /* Fill in the first switch rule i.e. large action */
2164 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2165 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2166 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2168 /* First action VSI forwarding or VSI list forwarding depending on how
2171 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2172 m_ent->fltr_info.fwd_id.hw_vsi_id;
2174 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2175 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2176 ICE_LG_ACT_VSI_LIST_ID_M;
2177 if (m_ent->vsi_count > 1)
2178 act |= ICE_LG_ACT_VSI_LIST;
2179 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2181 /* Second action descriptor type */
2182 act = ICE_LG_ACT_GENERIC;
2184 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2185 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2187 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2188 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2190 /* Third action Marker value */
2191 act |= ICE_LG_ACT_GENERIC;
2192 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2193 ICE_LG_ACT_GENERIC_VALUE_M;
2195 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2197 /* call the fill switch rule to fill the lookup Tx Rx structure */
2198 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2199 ice_aqc_opc_update_sw_rules);
2201 /* Update the action to point to the large action ID */
2202 rx_tx->pdata.lkup_tx_rx.act =
2203 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2204 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2205 ICE_SINGLE_ACT_PTR_VAL_M));
2207 /* Use the filter rule ID of the previously created rule with single
2208 * act. Once the update happens, hardware will treat this as large
2211 rx_tx->pdata.lkup_tx_rx.index =
2212 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2214 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2215 ice_aqc_opc_update_sw_rules, NULL);
2217 m_ent->lg_act_idx = l_id;
2218 m_ent->sw_marker_id = sw_marker;
2221 ice_free(hw, lg_act);
2226 * ice_add_counter_act - add/update filter rule with counter action
2227 * @hw: pointer to the hardware structure
2228 * @m_ent: the management entry for which counter needs to be added
2229 * @counter_id: VLAN counter ID returned as part of allocate resource
2230 * @l_id: large action resource ID
2232 static enum ice_status
2233 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2234 u16 counter_id, u16 l_id)
2236 struct ice_aqc_sw_rules_elem *lg_act;
2237 struct ice_aqc_sw_rules_elem *rx_tx;
2238 enum ice_status status;
2239 /* 2 actions will be added while adding a large action counter */
2240 const int num_acts = 2;
2247 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2248 return ICE_ERR_PARAM;
2250 /* Create two back-to-back switch rules and submit them to the HW using
2251 * one memory buffer:
2255 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2256 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2257 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2260 return ICE_ERR_NO_MEMORY;
2262 rx_tx = (struct ice_aqc_sw_rules_elem *)
2263 ((u8 *)lg_act + lg_act_size);
2265 /* Fill in the first switch rule i.e. large action */
2266 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2267 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2268 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2270 /* First action VSI forwarding or VSI list forwarding depending on how
2273 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2274 m_ent->fltr_info.fwd_id.hw_vsi_id;
2276 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2277 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2278 ICE_LG_ACT_VSI_LIST_ID_M;
2279 if (m_ent->vsi_count > 1)
2280 act |= ICE_LG_ACT_VSI_LIST;
2281 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2283 /* Second action counter ID */
2284 act = ICE_LG_ACT_STAT_COUNT;
2285 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2286 ICE_LG_ACT_STAT_COUNT_M;
2287 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2289 /* call the fill switch rule to fill the lookup Tx Rx structure */
2290 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2291 ice_aqc_opc_update_sw_rules);
2293 act = ICE_SINGLE_ACT_PTR;
2294 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2295 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2297 /* Use the filter rule ID of the previously created rule with single
2298 * act. Once the update happens, hardware will treat this as large
2301 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2302 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2304 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2305 ice_aqc_opc_update_sw_rules, NULL);
2307 m_ent->lg_act_idx = l_id;
2308 m_ent->counter_index = counter_id;
2311 ice_free(hw, lg_act);
2316 * ice_create_vsi_list_map
2317 * @hw: pointer to the hardware structure
2318 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2319 * @num_vsi: number of VSI handles in the array
2320 * @vsi_list_id: VSI list ID generated as part of allocate resource
2322 * Helper function to create a new entry of VSI list ID to VSI mapping
2323 * using the given VSI list ID
2325 static struct ice_vsi_list_map_info *
2326 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2329 struct ice_switch_info *sw = hw->switch_info;
2330 struct ice_vsi_list_map_info *v_map;
2333 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2338 v_map->vsi_list_id = vsi_list_id;
2340 for (i = 0; i < num_vsi; i++)
2341 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2343 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2348 * ice_update_vsi_list_rule
2349 * @hw: pointer to the hardware structure
2350 * @vsi_handle_arr: array of VSI handles to form a VSI list
2351 * @num_vsi: number of VSI handles in the array
2352 * @vsi_list_id: VSI list ID generated as part of allocate resource
2353 * @remove: Boolean value to indicate if this is a remove action
2354 * @opc: switch rules population command type - pass in the command opcode
2355 * @lkup_type: lookup type of the filter
2357 * Call AQ command to add a new switch rule or update existing switch rule
2358 * using the given VSI list ID
2360 static enum ice_status
2361 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2362 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2363 enum ice_sw_lkup_type lkup_type)
2365 struct ice_aqc_sw_rules_elem *s_rule;
2366 enum ice_status status;
2372 return ICE_ERR_PARAM;
2374 if (lkup_type == ICE_SW_LKUP_MAC ||
2375 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2376 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2377 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2378 lkup_type == ICE_SW_LKUP_PROMISC ||
2379 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2380 lkup_type == ICE_SW_LKUP_LAST)
2381 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2382 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2383 else if (lkup_type == ICE_SW_LKUP_VLAN)
2384 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2385 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2387 return ICE_ERR_PARAM;
2389 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2390 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2392 return ICE_ERR_NO_MEMORY;
2393 for (i = 0; i < num_vsi; i++) {
2394 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2395 status = ICE_ERR_PARAM;
2398 /* AQ call requires hw_vsi_id(s) */
2399 s_rule->pdata.vsi_list.vsi[i] =
2400 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2403 s_rule->type = CPU_TO_LE16(rule_type);
2404 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2405 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2407 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2410 ice_free(hw, s_rule);
2415 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2416 * @hw: pointer to the HW struct
2417 * @vsi_handle_arr: array of VSI handles to form a VSI list
2418 * @num_vsi: number of VSI handles in the array
2419 * @vsi_list_id: stores the ID of the VSI list to be created
2420 * @lkup_type: switch rule filter's lookup type
2422 static enum ice_status
2423 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2424 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2426 enum ice_status status;
2428 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2429 ice_aqc_opc_alloc_res);
2433 /* Update the newly created VSI list to include the specified VSIs */
2434 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2435 *vsi_list_id, false,
2436 ice_aqc_opc_add_sw_rules, lkup_type);
2440 * ice_create_pkt_fwd_rule
2441 * @hw: pointer to the hardware structure
2442 * @recp_list: corresponding filter management list
2443 * @f_entry: entry containing packet forwarding information
2445 * Create switch rule with given filter information and add an entry
2446 * to the corresponding filter management list to track this switch rule
2449 static enum ice_status
2450 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2451 struct ice_fltr_list_entry *f_entry)
2453 struct ice_fltr_mgmt_list_entry *fm_entry;
2454 struct ice_aqc_sw_rules_elem *s_rule;
2455 enum ice_status status;
2457 s_rule = (struct ice_aqc_sw_rules_elem *)
2458 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2460 return ICE_ERR_NO_MEMORY;
2461 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2462 ice_malloc(hw, sizeof(*fm_entry));
2464 status = ICE_ERR_NO_MEMORY;
2465 goto ice_create_pkt_fwd_rule_exit;
2468 fm_entry->fltr_info = f_entry->fltr_info;
2470 /* Initialize all the fields for the management entry */
2471 fm_entry->vsi_count = 1;
2472 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2473 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2474 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2476 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2477 ice_aqc_opc_add_sw_rules);
2479 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2480 ice_aqc_opc_add_sw_rules, NULL);
2482 ice_free(hw, fm_entry);
2483 goto ice_create_pkt_fwd_rule_exit;
2486 f_entry->fltr_info.fltr_rule_id =
2487 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2488 fm_entry->fltr_info.fltr_rule_id =
2489 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2491 /* The book keeping entries will get removed when base driver
2492 * calls remove filter AQ command
2494 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2496 ice_create_pkt_fwd_rule_exit:
2497 ice_free(hw, s_rule);
2502 * ice_update_pkt_fwd_rule
2503 * @hw: pointer to the hardware structure
2504 * @f_info: filter information for switch rule
2506 * Call AQ command to update a previously created switch rule with a
2509 static enum ice_status
2510 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2512 struct ice_aqc_sw_rules_elem *s_rule;
2513 enum ice_status status;
2515 s_rule = (struct ice_aqc_sw_rules_elem *)
2516 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2518 return ICE_ERR_NO_MEMORY;
2520 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2522 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2524 /* Update switch rule with new rule set to forward VSI list */
2525 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2526 ice_aqc_opc_update_sw_rules, NULL);
2528 ice_free(hw, s_rule);
2533 * ice_update_sw_rule_bridge_mode
2534 * @hw: pointer to the HW struct
2536 * Updates unicast switch filter rules based on VEB/VEPA mode
2538 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2540 struct ice_switch_info *sw = hw->switch_info;
2541 struct ice_fltr_mgmt_list_entry *fm_entry;
2542 enum ice_status status = ICE_SUCCESS;
2543 struct LIST_HEAD_TYPE *rule_head;
2544 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2546 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2547 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2549 ice_acquire_lock(rule_lock);
2550 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2552 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2553 u8 *addr = fi->l_data.mac.mac_addr;
2555 /* Update unicast Tx rules to reflect the selected
2558 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2559 (fi->fltr_act == ICE_FWD_TO_VSI ||
2560 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2561 fi->fltr_act == ICE_FWD_TO_Q ||
2562 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2563 status = ice_update_pkt_fwd_rule(hw, fi);
2569 ice_release_lock(rule_lock);
2575 * ice_add_update_vsi_list
2576 * @hw: pointer to the hardware structure
2577 * @m_entry: pointer to current filter management list entry
2578 * @cur_fltr: filter information from the book keeping entry
2579 * @new_fltr: filter information with the new VSI to be added
2581 * Call AQ command to add or update previously created VSI list with new VSI.
2583 * Helper function to do book keeping associated with adding filter information
2584 * The algorithm to do the book keeping is described below :
2585 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2586 * if only one VSI has been added till now
2587 * Allocate a new VSI list and add two VSIs
2588 * to this list using switch rule command
2589 * Update the previously created switch rule with the
2590 * newly created VSI list ID
2591 * if a VSI list was previously created
2592 * Add the new VSI to the previously created VSI list set
2593 * using the update switch rule command
2595 static enum ice_status
2596 ice_add_update_vsi_list(struct ice_hw *hw,
2597 struct ice_fltr_mgmt_list_entry *m_entry,
2598 struct ice_fltr_info *cur_fltr,
2599 struct ice_fltr_info *new_fltr)
2601 enum ice_status status = ICE_SUCCESS;
2602 u16 vsi_list_id = 0;
2604 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2605 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2606 return ICE_ERR_NOT_IMPL;
2608 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2609 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2610 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2611 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2612 return ICE_ERR_NOT_IMPL;
2614 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2615 /* Only one entry existed in the mapping and it was not already
2616 * a part of a VSI list. So, create a VSI list with the old and
2619 struct ice_fltr_info tmp_fltr;
2620 u16 vsi_handle_arr[2];
2622 /* A rule already exists with the new VSI being added */
2623 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2624 return ICE_ERR_ALREADY_EXISTS;
2626 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2627 vsi_handle_arr[1] = new_fltr->vsi_handle;
2628 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2630 new_fltr->lkup_type);
2634 tmp_fltr = *new_fltr;
2635 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2636 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2637 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2638 /* Update the previous switch rule of "MAC forward to VSI" to
2639 * "MAC fwd to VSI list"
2641 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2645 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2646 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2647 m_entry->vsi_list_info =
2648 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2651 /* If this entry was large action then the large action needs
2652 * to be updated to point to FWD to VSI list
2654 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2656 ice_add_marker_act(hw, m_entry,
2657 m_entry->sw_marker_id,
2658 m_entry->lg_act_idx);
2660 u16 vsi_handle = new_fltr->vsi_handle;
2661 enum ice_adminq_opc opcode;
2663 if (!m_entry->vsi_list_info)
2666 /* A rule already exists with the new VSI being added */
2667 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2670 /* Update the previously created VSI list set with
2671 * the new VSI ID passed in
2673 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2674 opcode = ice_aqc_opc_update_sw_rules;
2676 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2677 vsi_list_id, false, opcode,
2678 new_fltr->lkup_type);
2679 /* update VSI list mapping info with new VSI ID */
2681 ice_set_bit(vsi_handle,
2682 m_entry->vsi_list_info->vsi_map);
2685 m_entry->vsi_count++;
2690 * ice_find_rule_entry - Search a rule entry
2691 * @list_head: head of rule list
2692 * @f_info: rule information
2694 * Helper function to search for a given rule entry
2695 * Returns pointer to entry storing the rule if found
2697 static struct ice_fltr_mgmt_list_entry *
2698 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2699 struct ice_fltr_info *f_info)
2701 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2703 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2705 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2706 sizeof(f_info->l_data)) &&
2707 f_info->flag == list_itr->fltr_info.flag) {
2716 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2717 * @recp_list: VSI lists needs to be searched
2718 * @vsi_handle: VSI handle to be found in VSI list
2719 * @vsi_list_id: VSI list ID found containing vsi_handle
2721 * Helper function to search a VSI list with single entry containing given VSI
2722 * handle element. This can be extended further to search VSI list with more
2723 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2725 static struct ice_vsi_list_map_info *
2726 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2729 struct ice_vsi_list_map_info *map_info = NULL;
2730 struct LIST_HEAD_TYPE *list_head;
2732 list_head = &recp_list->filt_rules;
2733 if (recp_list->adv_rule) {
2734 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2736 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2737 ice_adv_fltr_mgmt_list_entry,
2739 if (list_itr->vsi_list_info) {
2740 map_info = list_itr->vsi_list_info;
2741 if (ice_is_bit_set(map_info->vsi_map,
2743 *vsi_list_id = map_info->vsi_list_id;
2749 struct ice_fltr_mgmt_list_entry *list_itr;
2751 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2752 ice_fltr_mgmt_list_entry,
2754 if (list_itr->vsi_count == 1 &&
2755 list_itr->vsi_list_info) {
2756 map_info = list_itr->vsi_list_info;
2757 if (ice_is_bit_set(map_info->vsi_map,
2759 *vsi_list_id = map_info->vsi_list_id;
2769 * ice_add_rule_internal - add rule for a given lookup type
2770 * @hw: pointer to the hardware structure
2771 * @recp_list: recipe list for which rule has to be added
2772 * @lport: logic port number on which function add rule
2773 * @f_entry: structure containing MAC forwarding information
2775 * Adds or updates the rule lists for a given recipe
2777 static enum ice_status
2778 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2779 u8 lport, struct ice_fltr_list_entry *f_entry)
2781 struct ice_fltr_info *new_fltr, *cur_fltr;
2782 struct ice_fltr_mgmt_list_entry *m_entry;
2783 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2784 enum ice_status status = ICE_SUCCESS;
2786 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2787 return ICE_ERR_PARAM;
2789 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2790 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2791 f_entry->fltr_info.fwd_id.hw_vsi_id =
2792 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2794 rule_lock = &recp_list->filt_rule_lock;
2796 ice_acquire_lock(rule_lock);
2797 new_fltr = &f_entry->fltr_info;
2798 if (new_fltr->flag & ICE_FLTR_RX)
2799 new_fltr->src = lport;
2800 else if (new_fltr->flag & ICE_FLTR_TX)
2802 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2804 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2806 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2807 goto exit_add_rule_internal;
2810 cur_fltr = &m_entry->fltr_info;
2811 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2813 exit_add_rule_internal:
2814 ice_release_lock(rule_lock);
2819 * ice_remove_vsi_list_rule
2820 * @hw: pointer to the hardware structure
2821 * @vsi_list_id: VSI list ID generated as part of allocate resource
2822 * @lkup_type: switch rule filter lookup type
2824 * The VSI list should be emptied before this function is called to remove the
2827 static enum ice_status
2828 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2829 enum ice_sw_lkup_type lkup_type)
2831 struct ice_aqc_sw_rules_elem *s_rule;
2832 enum ice_status status;
2835 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2836 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2838 return ICE_ERR_NO_MEMORY;
2840 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2841 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2843 /* Free the vsi_list resource that we allocated. It is assumed that the
2844 * list is empty at this point.
2846 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2847 ice_aqc_opc_free_res);
2849 ice_free(hw, s_rule);
2854 * ice_rem_update_vsi_list
2855 * @hw: pointer to the hardware structure
2856 * @vsi_handle: VSI handle of the VSI to remove
2857 * @fm_list: filter management entry for which the VSI list management needs to
2860 static enum ice_status
2861 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2862 struct ice_fltr_mgmt_list_entry *fm_list)
2864 enum ice_sw_lkup_type lkup_type;
2865 enum ice_status status = ICE_SUCCESS;
2868 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2869 fm_list->vsi_count == 0)
2870 return ICE_ERR_PARAM;
2872 /* A rule with the VSI being removed does not exist */
2873 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2874 return ICE_ERR_DOES_NOT_EXIST;
2876 lkup_type = fm_list->fltr_info.lkup_type;
2877 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2878 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2879 ice_aqc_opc_update_sw_rules,
2884 fm_list->vsi_count--;
2885 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2887 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2888 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2889 struct ice_vsi_list_map_info *vsi_list_info =
2890 fm_list->vsi_list_info;
2893 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2895 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2896 return ICE_ERR_OUT_OF_RANGE;
2898 /* Make sure VSI list is empty before removing it below */
2899 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2901 ice_aqc_opc_update_sw_rules,
2906 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2907 tmp_fltr_info.fwd_id.hw_vsi_id =
2908 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2909 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2910 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2912 ice_debug(hw, ICE_DBG_SW,
2913 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2914 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2918 fm_list->fltr_info = tmp_fltr_info;
2921 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2922 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2923 struct ice_vsi_list_map_info *vsi_list_info =
2924 fm_list->vsi_list_info;
2926 /* Remove the VSI list since it is no longer used */
2927 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2929 ice_debug(hw, ICE_DBG_SW,
2930 "Failed to remove VSI list %d, error %d\n",
2931 vsi_list_id, status);
2935 LIST_DEL(&vsi_list_info->list_entry);
2936 ice_free(hw, vsi_list_info);
2937 fm_list->vsi_list_info = NULL;
2944 * ice_remove_rule_internal - Remove a filter rule of a given type
2946 * @hw: pointer to the hardware structure
2947 * @recp_list: recipe list for which the rule needs to removed
2948 * @f_entry: rule entry containing filter information
2950 static enum ice_status
2951 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2952 struct ice_fltr_list_entry *f_entry)
2954 struct ice_fltr_mgmt_list_entry *list_elem;
2955 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2956 enum ice_status status = ICE_SUCCESS;
2957 bool remove_rule = false;
2960 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2961 return ICE_ERR_PARAM;
2962 f_entry->fltr_info.fwd_id.hw_vsi_id =
2963 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2965 rule_lock = &recp_list->filt_rule_lock;
2966 ice_acquire_lock(rule_lock);
2967 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2968 &f_entry->fltr_info);
2970 status = ICE_ERR_DOES_NOT_EXIST;
2974 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2976 } else if (!list_elem->vsi_list_info) {
2977 status = ICE_ERR_DOES_NOT_EXIST;
2979 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2980 /* a ref_cnt > 1 indicates that the vsi_list is being
2981 * shared by multiple rules. Decrement the ref_cnt and
2982 * remove this rule, but do not modify the list, as it
2983 * is in-use by other rules.
2985 list_elem->vsi_list_info->ref_cnt--;
2988 /* a ref_cnt of 1 indicates the vsi_list is only used
2989 * by one rule. However, the original removal request is only
2990 * for a single VSI. Update the vsi_list first, and only
2991 * remove the rule if there are no further VSIs in this list.
2993 vsi_handle = f_entry->fltr_info.vsi_handle;
2994 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2997 /* if VSI count goes to zero after updating the VSI list */
2998 if (list_elem->vsi_count == 0)
3003 /* Remove the lookup rule */
3004 struct ice_aqc_sw_rules_elem *s_rule;
3006 s_rule = (struct ice_aqc_sw_rules_elem *)
3007 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3009 status = ICE_ERR_NO_MEMORY;
3013 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3014 ice_aqc_opc_remove_sw_rules);
3016 status = ice_aq_sw_rules(hw, s_rule,
3017 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3018 ice_aqc_opc_remove_sw_rules, NULL);
3020 /* Remove a book keeping from the list */
3021 ice_free(hw, s_rule);
3026 LIST_DEL(&list_elem->list_entry);
3027 ice_free(hw, list_elem);
3030 ice_release_lock(rule_lock);
3035 * ice_aq_get_res_alloc - get allocated resources
3036 * @hw: pointer to the HW struct
3037 * @num_entries: pointer to u16 to store the number of resource entries returned
3038 * @buf: pointer to user-supplied buffer
3039 * @buf_size: size of buff
3040 * @cd: pointer to command details structure or NULL
3042 * The user-supplied buffer must be large enough to store the resource
3043 * information for all resource types. Each resource type is an
3044 * ice_aqc_get_res_resp_data_elem structure.
3047 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3048 u16 buf_size, struct ice_sq_cd *cd)
3050 struct ice_aqc_get_res_alloc *resp;
3051 enum ice_status status;
3052 struct ice_aq_desc desc;
3055 return ICE_ERR_BAD_PTR;
3057 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3058 return ICE_ERR_INVAL_SIZE;
3060 resp = &desc.params.get_res;
3062 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3063 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3065 if (!status && num_entries)
3066 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3072 * ice_aq_get_res_descs - get allocated resource descriptors
3073 * @hw: pointer to the hardware structure
3074 * @num_entries: number of resource entries in buffer
3075 * @buf: Indirect buffer to hold data parameters and response
3076 * @buf_size: size of buffer for indirect commands
3077 * @res_type: resource type
3078 * @res_shared: is resource shared
3079 * @desc_id: input - first desc ID to start; output - next desc ID
3080 * @cd: pointer to command details structure or NULL
3083 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3084 struct ice_aqc_get_allocd_res_desc_resp *buf,
3085 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3086 struct ice_sq_cd *cd)
3088 struct ice_aqc_get_allocd_res_desc *cmd;
3089 struct ice_aq_desc desc;
3090 enum ice_status status;
3092 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3094 cmd = &desc.params.get_res_desc;
3097 return ICE_ERR_PARAM;
3099 if (buf_size != (num_entries * sizeof(*buf)))
3100 return ICE_ERR_PARAM;
3102 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3104 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3105 ICE_AQC_RES_TYPE_M) | (res_shared ?
3106 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3107 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3109 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3111 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3117 * ice_add_mac_rule - Add a MAC address based filter rule
3118 * @hw: pointer to the hardware structure
3119 * @m_list: list of MAC addresses and forwarding information
3120 * @sw: pointer to switch info struct for which function add rule
3121 * @lport: logic port number on which function add rule
3123 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3124 * multiple unicast addresses, the function assumes that all the
3125 * addresses are unique in a given add_mac call. It doesn't
3126 * check for duplicates in this case, removing duplicates from a given
3127 * list should be taken care of in the caller of this function.
3129 static enum ice_status
3130 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3131 struct ice_switch_info *sw, u8 lport)
3133 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3134 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3135 struct ice_fltr_list_entry *m_list_itr;
3136 struct LIST_HEAD_TYPE *rule_head;
3137 u16 total_elem_left, s_rule_size;
3138 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3139 enum ice_status status = ICE_SUCCESS;
3140 u16 num_unicast = 0;
3144 rule_lock = &recp_list->filt_rule_lock;
3145 rule_head = &recp_list->filt_rules;
3147 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3149 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3153 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3154 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3155 if (!ice_is_vsi_valid(hw, vsi_handle))
3156 return ICE_ERR_PARAM;
3157 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3158 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3159 /* update the src in case it is VSI num */
3160 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3161 return ICE_ERR_PARAM;
3162 m_list_itr->fltr_info.src = hw_vsi_id;
3163 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3164 IS_ZERO_ETHER_ADDR(add))
3165 return ICE_ERR_PARAM;
3166 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3167 /* Don't overwrite the unicast address */
3168 ice_acquire_lock(rule_lock);
3169 if (ice_find_rule_entry(rule_head,
3170 &m_list_itr->fltr_info)) {
3171 ice_release_lock(rule_lock);
3172 return ICE_ERR_ALREADY_EXISTS;
3174 ice_release_lock(rule_lock);
3176 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3177 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3178 m_list_itr->status =
3179 ice_add_rule_internal(hw, recp_list, lport,
3181 if (m_list_itr->status)
3182 return m_list_itr->status;
3186 ice_acquire_lock(rule_lock);
3187 /* Exit if no suitable entries were found for adding bulk switch rule */
3189 status = ICE_SUCCESS;
3190 goto ice_add_mac_exit;
3193 /* Allocate switch rule buffer for the bulk update for unicast */
3194 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3195 s_rule = (struct ice_aqc_sw_rules_elem *)
3196 ice_calloc(hw, num_unicast, s_rule_size);
3198 status = ICE_ERR_NO_MEMORY;
3199 goto ice_add_mac_exit;
3203 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3205 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3206 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3208 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3209 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3210 ice_aqc_opc_add_sw_rules);
3211 r_iter = (struct ice_aqc_sw_rules_elem *)
3212 ((u8 *)r_iter + s_rule_size);
3216 /* Call AQ bulk switch rule update for all unicast addresses */
3218 /* Call AQ switch rule in AQ_MAX chunk */
3219 for (total_elem_left = num_unicast; total_elem_left > 0;
3220 total_elem_left -= elem_sent) {
3221 struct ice_aqc_sw_rules_elem *entry = r_iter;
3223 elem_sent = MIN_T(u8, total_elem_left,
3224 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3225 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3226 elem_sent, ice_aqc_opc_add_sw_rules,
3229 goto ice_add_mac_exit;
3230 r_iter = (struct ice_aqc_sw_rules_elem *)
3231 ((u8 *)r_iter + (elem_sent * s_rule_size));
3234 /* Fill up rule ID based on the value returned from FW */
3236 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3238 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3239 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3240 struct ice_fltr_mgmt_list_entry *fm_entry;
3242 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3243 f_info->fltr_rule_id =
3244 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3245 f_info->fltr_act = ICE_FWD_TO_VSI;
3246 /* Create an entry to track this MAC address */
3247 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3248 ice_malloc(hw, sizeof(*fm_entry));
3250 status = ICE_ERR_NO_MEMORY;
3251 goto ice_add_mac_exit;
3253 fm_entry->fltr_info = *f_info;
3254 fm_entry->vsi_count = 1;
3255 /* The book keeping entries will get removed when
3256 * base driver calls remove filter AQ command
3259 LIST_ADD(&fm_entry->list_entry, rule_head);
3260 r_iter = (struct ice_aqc_sw_rules_elem *)
3261 ((u8 *)r_iter + s_rule_size);
3266 ice_release_lock(rule_lock);
3268 ice_free(hw, s_rule);
3273 * ice_add_mac - Add a MAC address based filter rule
3274 * @hw: pointer to the hardware structure
3275 * @m_list: list of MAC addresses and forwarding information
3277 * Function add MAC rule for logical port from HW struct
3280 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3283 return ICE_ERR_PARAM;
3285 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3286 hw->port_info->lport);
3290 * ice_add_vlan_internal - Add one VLAN based filter rule
3291 * @hw: pointer to the hardware structure
3292 * @recp_list: recipe list for which rule has to be added
3293 * @f_entry: filter entry containing one VLAN information
3295 static enum ice_status
3296 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3297 struct ice_fltr_list_entry *f_entry)
3299 struct ice_fltr_mgmt_list_entry *v_list_itr;
3300 struct ice_fltr_info *new_fltr, *cur_fltr;
3301 enum ice_sw_lkup_type lkup_type;
3302 u16 vsi_list_id = 0, vsi_handle;
3303 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3304 enum ice_status status = ICE_SUCCESS;
3306 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3307 return ICE_ERR_PARAM;
3309 f_entry->fltr_info.fwd_id.hw_vsi_id =
3310 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3311 new_fltr = &f_entry->fltr_info;
3313 /* VLAN ID should only be 12 bits */
3314 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3315 return ICE_ERR_PARAM;
3317 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3318 return ICE_ERR_PARAM;
3320 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3321 lkup_type = new_fltr->lkup_type;
3322 vsi_handle = new_fltr->vsi_handle;
3323 rule_lock = &recp_list->filt_rule_lock;
3324 ice_acquire_lock(rule_lock);
3325 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3327 struct ice_vsi_list_map_info *map_info = NULL;
3329 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3330 /* All VLAN pruning rules use a VSI list. Check if
3331 * there is already a VSI list containing VSI that we
3332 * want to add. If found, use the same vsi_list_id for
3333 * this new VLAN rule or else create a new list.
3335 map_info = ice_find_vsi_list_entry(recp_list,
3339 status = ice_create_vsi_list_rule(hw,
3347 /* Convert the action to forwarding to a VSI list. */
3348 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3349 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3352 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3354 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3357 status = ICE_ERR_DOES_NOT_EXIST;
3360 /* reuse VSI list for new rule and increment ref_cnt */
3362 v_list_itr->vsi_list_info = map_info;
3363 map_info->ref_cnt++;
3365 v_list_itr->vsi_list_info =
3366 ice_create_vsi_list_map(hw, &vsi_handle,
3370 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3371 /* Update existing VSI list to add new VSI ID only if it used
3374 cur_fltr = &v_list_itr->fltr_info;
3375 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3378 /* If VLAN rule exists and VSI list being used by this rule is
3379 * referenced by more than 1 VLAN rule. Then create a new VSI
3380 * list appending previous VSI with new VSI and update existing
3381 * VLAN rule to point to new VSI list ID
3383 struct ice_fltr_info tmp_fltr;
3384 u16 vsi_handle_arr[2];
3387 /* Current implementation only supports reusing VSI list with
3388 * one VSI count. We should never hit below condition
3390 if (v_list_itr->vsi_count > 1 &&
3391 v_list_itr->vsi_list_info->ref_cnt > 1) {
3392 ice_debug(hw, ICE_DBG_SW,
3393 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3394 status = ICE_ERR_CFG;
3399 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3402 /* A rule already exists with the new VSI being added */
3403 if (cur_handle == vsi_handle) {
3404 status = ICE_ERR_ALREADY_EXISTS;
3408 vsi_handle_arr[0] = cur_handle;
3409 vsi_handle_arr[1] = vsi_handle;
3410 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3411 &vsi_list_id, lkup_type);
3415 tmp_fltr = v_list_itr->fltr_info;
3416 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3417 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3418 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3419 /* Update the previous switch rule to a new VSI list which
3420 * includes current VSI that is requested
3422 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3426 /* before overriding VSI list map info. decrement ref_cnt of
3429 v_list_itr->vsi_list_info->ref_cnt--;
3431 /* now update to newly created list */
3432 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3433 v_list_itr->vsi_list_info =
3434 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3436 v_list_itr->vsi_count++;
3440 ice_release_lock(rule_lock);
3445 * ice_add_vlan_rule - Add VLAN based filter rule
3446 * @hw: pointer to the hardware structure
3447 * @v_list: list of VLAN entries and forwarding information
3448 * @sw: pointer to switch info struct for which function add rule
3450 static enum ice_status
3451 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3452 struct ice_switch_info *sw)
3454 struct ice_fltr_list_entry *v_list_itr;
3455 struct ice_sw_recipe *recp_list;
3457 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3458 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3460 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3461 return ICE_ERR_PARAM;
3462 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3463 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3465 if (v_list_itr->status)
3466 return v_list_itr->status;
3472 * ice_add_vlan - Add a VLAN based filter rule
3473 * @hw: pointer to the hardware structure
3474 * @v_list: list of VLAN and forwarding information
3476 * Function add VLAN rule for logical port from HW struct
3479 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3482 return ICE_ERR_PARAM;
3484 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3488 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3489 * @hw: pointer to the hardware structure
3490 * @mv_list: list of MAC and VLAN filters
3491 * @sw: pointer to switch info struct for which function add rule
3492 * @lport: logic port number on which function add rule
3494 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3495 * pruning bits enabled, then it is the responsibility of the caller to make
3496 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3497 * VLAN won't be received on that VSI otherwise.
3499 static enum ice_status
3500 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3501 struct ice_switch_info *sw, u8 lport)
3503 struct ice_fltr_list_entry *mv_list_itr;
3504 struct ice_sw_recipe *recp_list;
3506 if (!mv_list || !hw)
3507 return ICE_ERR_PARAM;
3509 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3510 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3512 enum ice_sw_lkup_type l_type =
3513 mv_list_itr->fltr_info.lkup_type;
3515 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3516 return ICE_ERR_PARAM;
3517 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3518 mv_list_itr->status =
3519 ice_add_rule_internal(hw, recp_list, lport,
3521 if (mv_list_itr->status)
3522 return mv_list_itr->status;
3528 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3529 * @hw: pointer to the hardware structure
3530 * @mv_list: list of MAC VLAN addresses and forwarding information
3532 * Function add MAC VLAN rule for logical port from HW struct
3535 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3537 if (!mv_list || !hw)
3538 return ICE_ERR_PARAM;
3540 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3541 hw->port_info->lport);
3545 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3546 * @hw: pointer to the hardware structure
3547 * @em_list: list of ether type MAC filter, MAC is optional
3548 * @sw: pointer to switch info struct for which function add rule
3549 * @lport: logic port number on which function add rule
3551 * This function requires the caller to populate the entries in
3552 * the filter list with the necessary fields (including flags to
3553 * indicate Tx or Rx rules).
3555 static enum ice_status
3556 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3557 struct ice_switch_info *sw, u8 lport)
3559 struct ice_fltr_list_entry *em_list_itr;
3561 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3563 struct ice_sw_recipe *recp_list;
3564 enum ice_sw_lkup_type l_type;
3566 l_type = em_list_itr->fltr_info.lkup_type;
3567 recp_list = &sw->recp_list[l_type];
3569 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3570 l_type != ICE_SW_LKUP_ETHERTYPE)
3571 return ICE_ERR_PARAM;
3573 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3576 if (em_list_itr->status)
3577 return em_list_itr->status;
3584 * ice_add_eth_mac - Add a ethertype based filter rule
3585 * @hw: pointer to the hardware structure
3586 * @em_list: list of ethertype and forwarding information
3588 * Function add ethertype rule for logical port from HW struct
3590 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3592 if (!em_list || !hw)
3593 return ICE_ERR_PARAM;
3595 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3596 hw->port_info->lport);
3600 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3601 * @hw: pointer to the hardware structure
3602 * @em_list: list of ethertype or ethertype MAC entries
3603 * @sw: pointer to switch info struct for which function add rule
3605 static enum ice_status
3606 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3607 struct ice_switch_info *sw)
3609 struct ice_fltr_list_entry *em_list_itr, *tmp;
3611 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3613 struct ice_sw_recipe *recp_list;
3614 enum ice_sw_lkup_type l_type;
3616 l_type = em_list_itr->fltr_info.lkup_type;
3618 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3619 l_type != ICE_SW_LKUP_ETHERTYPE)
3620 return ICE_ERR_PARAM;
3622 recp_list = &sw->recp_list[l_type];
3623 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3625 if (em_list_itr->status)
3626 return em_list_itr->status;
3632 * ice_remove_eth_mac - remove a ethertype based filter rule
3633 * @hw: pointer to the hardware structure
3634 * @em_list: list of ethertype and forwarding information
3638 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3640 if (!em_list || !hw)
3641 return ICE_ERR_PARAM;
3643 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3647 * ice_rem_sw_rule_info
3648 * @hw: pointer to the hardware structure
3649 * @rule_head: pointer to the switch list structure that we want to delete
3652 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3654 if (!LIST_EMPTY(rule_head)) {
3655 struct ice_fltr_mgmt_list_entry *entry;
3656 struct ice_fltr_mgmt_list_entry *tmp;
3658 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3659 ice_fltr_mgmt_list_entry, list_entry) {
3660 LIST_DEL(&entry->list_entry);
3661 ice_free(hw, entry);
3667 * ice_rem_adv_rule_info
3668 * @hw: pointer to the hardware structure
3669 * @rule_head: pointer to the switch list structure that we want to delete
3672 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3674 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3675 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3677 if (LIST_EMPTY(rule_head))
3680 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3681 ice_adv_fltr_mgmt_list_entry, list_entry) {
3682 LIST_DEL(&lst_itr->list_entry);
3683 ice_free(hw, lst_itr->lkups);
3684 ice_free(hw, lst_itr);
3689 * ice_rem_all_sw_rules_info
3690 * @hw: pointer to the hardware structure
3692 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3694 struct ice_switch_info *sw = hw->switch_info;
3697 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3698 struct LIST_HEAD_TYPE *rule_head;
3700 rule_head = &sw->recp_list[i].filt_rules;
3701 if (!sw->recp_list[i].adv_rule)
3702 ice_rem_sw_rule_info(hw, rule_head);
3704 ice_rem_adv_rule_info(hw, rule_head);
3709 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3710 * @pi: pointer to the port_info structure
3711 * @vsi_handle: VSI handle to set as default
3712 * @set: true to add the above mentioned switch rule, false to remove it
3713 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3715 * add filter rule to set/unset given VSI as default VSI for the switch
3716 * (represented by swid)
3719 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3722 struct ice_aqc_sw_rules_elem *s_rule;
3723 struct ice_fltr_info f_info;
3724 struct ice_hw *hw = pi->hw;
3725 enum ice_adminq_opc opcode;
3726 enum ice_status status;
3730 if (!ice_is_vsi_valid(hw, vsi_handle))
3731 return ICE_ERR_PARAM;
3732 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3734 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3735 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3736 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3738 return ICE_ERR_NO_MEMORY;
3740 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3742 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3743 f_info.flag = direction;
3744 f_info.fltr_act = ICE_FWD_TO_VSI;
3745 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3747 if (f_info.flag & ICE_FLTR_RX) {
3748 f_info.src = pi->lport;
3749 f_info.src_id = ICE_SRC_ID_LPORT;
3751 f_info.fltr_rule_id =
3752 pi->dflt_rx_vsi_rule_id;
3753 } else if (f_info.flag & ICE_FLTR_TX) {
3754 f_info.src_id = ICE_SRC_ID_VSI;
3755 f_info.src = hw_vsi_id;
3757 f_info.fltr_rule_id =
3758 pi->dflt_tx_vsi_rule_id;
3762 opcode = ice_aqc_opc_add_sw_rules;
3764 opcode = ice_aqc_opc_remove_sw_rules;
3766 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3768 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3769 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3772 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3774 if (f_info.flag & ICE_FLTR_TX) {
3775 pi->dflt_tx_vsi_num = hw_vsi_id;
3776 pi->dflt_tx_vsi_rule_id = index;
3777 } else if (f_info.flag & ICE_FLTR_RX) {
3778 pi->dflt_rx_vsi_num = hw_vsi_id;
3779 pi->dflt_rx_vsi_rule_id = index;
3782 if (f_info.flag & ICE_FLTR_TX) {
3783 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3784 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3785 } else if (f_info.flag & ICE_FLTR_RX) {
3786 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3787 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3792 ice_free(hw, s_rule);
3797 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3798 * @list_head: head of rule list
3799 * @f_info: rule information
3801 * Helper function to search for a unicast rule entry - this is to be used
3802 * to remove unicast MAC filter that is not shared with other VSIs on the
3805 * Returns pointer to entry storing the rule if found
3807 static struct ice_fltr_mgmt_list_entry *
3808 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3809 struct ice_fltr_info *f_info)
3811 struct ice_fltr_mgmt_list_entry *list_itr;
3813 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3815 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3816 sizeof(f_info->l_data)) &&
3817 f_info->fwd_id.hw_vsi_id ==
3818 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3819 f_info->flag == list_itr->fltr_info.flag)
3826 * ice_remove_mac_rule - remove a MAC based filter rule
3827 * @hw: pointer to the hardware structure
3828 * @m_list: list of MAC addresses and forwarding information
3829 * @recp_list: list from which function remove MAC address
3831 * This function removes either a MAC filter rule or a specific VSI from a
3832 * VSI list for a multicast MAC address.
3834 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3835 * ice_add_mac. Caller should be aware that this call will only work if all
3836 * the entries passed into m_list were added previously. It will not attempt to
3837 * do a partial remove of entries that were found.
3839 static enum ice_status
3840 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3841 struct ice_sw_recipe *recp_list)
3843 struct ice_fltr_list_entry *list_itr, *tmp;
3844 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3847 return ICE_ERR_PARAM;
3849 rule_lock = &recp_list->filt_rule_lock;
3850 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3852 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3853 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3856 if (l_type != ICE_SW_LKUP_MAC)
3857 return ICE_ERR_PARAM;
3859 vsi_handle = list_itr->fltr_info.vsi_handle;
3860 if (!ice_is_vsi_valid(hw, vsi_handle))
3861 return ICE_ERR_PARAM;
3863 list_itr->fltr_info.fwd_id.hw_vsi_id =
3864 ice_get_hw_vsi_num(hw, vsi_handle);
3865 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3866 /* Don't remove the unicast address that belongs to
3867 * another VSI on the switch, since it is not being
3870 ice_acquire_lock(rule_lock);
3871 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3872 &list_itr->fltr_info)) {
3873 ice_release_lock(rule_lock);
3874 return ICE_ERR_DOES_NOT_EXIST;
3876 ice_release_lock(rule_lock);
3878 list_itr->status = ice_remove_rule_internal(hw, recp_list,
3880 if (list_itr->status)
3881 return list_itr->status;
3887 * ice_remove_mac - remove a MAC address based filter rule
3888 * @hw: pointer to the hardware structure
3889 * @m_list: list of MAC addresses and forwarding information
3893 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3895 struct ice_sw_recipe *recp_list;
3897 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3898 return ice_remove_mac_rule(hw, m_list, recp_list);
3902 * ice_remove_vlan_rule - Remove VLAN based filter rule
3903 * @hw: pointer to the hardware structure
3904 * @v_list: list of VLAN entries and forwarding information
3905 * @recp_list: list from which function remove VLAN
3907 static enum ice_status
3908 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3909 struct ice_sw_recipe *recp_list)
3911 struct ice_fltr_list_entry *v_list_itr, *tmp;
3913 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3915 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3917 if (l_type != ICE_SW_LKUP_VLAN)
3918 return ICE_ERR_PARAM;
3919 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3921 if (v_list_itr->status)
3922 return v_list_itr->status;
3928 * ice_remove_vlan - remove a VLAN address based filter rule
3929 * @hw: pointer to the hardware structure
3930 * @v_list: list of VLAN and forwarding information
3934 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3936 struct ice_sw_recipe *recp_list;
3939 return ICE_ERR_PARAM;
3941 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3942 return ice_remove_vlan_rule(hw, v_list, recp_list);
3946 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
3947 * @hw: pointer to the hardware structure
3948 * @v_list: list of MAC VLAN entries and forwarding information
3949 * @recp_list: list from which function remove MAC VLAN
3951 static enum ice_status
3952 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3953 struct ice_sw_recipe *recp_list)
3955 struct ice_fltr_list_entry *v_list_itr, *tmp;
3957 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3958 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3960 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3962 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3963 return ICE_ERR_PARAM;
3964 v_list_itr->status =
3965 ice_remove_rule_internal(hw, recp_list,
3967 if (v_list_itr->status)
3968 return v_list_itr->status;
3974 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
3975 * @hw: pointer to the hardware structure
3976 * @mv_list: list of MAC VLAN and forwarding information
3979 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3981 struct ice_sw_recipe *recp_list;
3983 if (!mv_list || !hw)
3984 return ICE_ERR_PARAM;
3986 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3987 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
3991 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3992 * @fm_entry: filter entry to inspect
3993 * @vsi_handle: VSI handle to compare with filter info
3996 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3998 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3999 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4000 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4001 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4006 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4007 * @hw: pointer to the hardware structure
4008 * @vsi_handle: VSI handle to remove filters from
4009 * @vsi_list_head: pointer to the list to add entry to
4010 * @fi: pointer to fltr_info of filter entry to copy & add
4012 * Helper function, used when creating a list of filters to remove from
4013 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4014 * original filter entry, with the exception of fltr_info.fltr_act and
4015 * fltr_info.fwd_id fields. These are set such that later logic can
4016 * extract which VSI to remove the fltr from, and pass on that information.
4018 static enum ice_status
4019 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4020 struct LIST_HEAD_TYPE *vsi_list_head,
4021 struct ice_fltr_info *fi)
4023 struct ice_fltr_list_entry *tmp;
4025 /* this memory is freed up in the caller function
4026 * once filters for this VSI are removed
4028 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4030 return ICE_ERR_NO_MEMORY;
4032 tmp->fltr_info = *fi;
4034 /* Overwrite these fields to indicate which VSI to remove filter from,
4035 * so find and remove logic can extract the information from the
4036 * list entries. Note that original entries will still have proper
4039 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4040 tmp->fltr_info.vsi_handle = vsi_handle;
4041 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4043 LIST_ADD(&tmp->list_entry, vsi_list_head);
4049 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4050 * @hw: pointer to the hardware structure
4051 * @vsi_handle: VSI handle to remove filters from
4052 * @lkup_list_head: pointer to the list that has certain lookup type filters
4053 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4055 * Locates all filters in lkup_list_head that are used by the given VSI,
4056 * and adds COPIES of those entries to vsi_list_head (intended to be used
4057 * to remove the listed filters).
4058 * Note that this means all entries in vsi_list_head must be explicitly
4059 * deallocated by the caller when done with list.
4061 static enum ice_status
4062 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4063 struct LIST_HEAD_TYPE *lkup_list_head,
4064 struct LIST_HEAD_TYPE *vsi_list_head)
4066 struct ice_fltr_mgmt_list_entry *fm_entry;
4067 enum ice_status status = ICE_SUCCESS;
4069 /* check to make sure VSI ID is valid and within boundary */
4070 if (!ice_is_vsi_valid(hw, vsi_handle))
4071 return ICE_ERR_PARAM;
4073 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4074 ice_fltr_mgmt_list_entry, list_entry) {
4075 struct ice_fltr_info *fi;
4077 fi = &fm_entry->fltr_info;
4078 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4081 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4090 * ice_determine_promisc_mask
4091 * @fi: filter info to parse
4093 * Helper function to determine which ICE_PROMISC_ mask corresponds
4094 * to given filter into.
4096 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4098 u16 vid = fi->l_data.mac_vlan.vlan_id;
4099 u8 *macaddr = fi->l_data.mac.mac_addr;
4100 bool is_tx_fltr = false;
4101 u8 promisc_mask = 0;
4103 if (fi->flag == ICE_FLTR_TX)
4106 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4107 promisc_mask |= is_tx_fltr ?
4108 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4109 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4110 promisc_mask |= is_tx_fltr ?
4111 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4112 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4113 promisc_mask |= is_tx_fltr ?
4114 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4116 promisc_mask |= is_tx_fltr ?
4117 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4119 return promisc_mask;
4123 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4124 * @hw: pointer to the hardware structure
4125 * @vsi_handle: VSI handle to retrieve info from
4126 * @promisc_mask: pointer to mask to be filled in
4127 * @vid: VLAN ID of promisc VLAN VSI
4130 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4133 struct ice_switch_info *sw = hw->switch_info;
4134 struct ice_fltr_mgmt_list_entry *itr;
4135 struct LIST_HEAD_TYPE *rule_head;
4136 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4138 if (!ice_is_vsi_valid(hw, vsi_handle))
4139 return ICE_ERR_PARAM;
4143 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4144 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4146 ice_acquire_lock(rule_lock);
4147 LIST_FOR_EACH_ENTRY(itr, rule_head,
4148 ice_fltr_mgmt_list_entry, list_entry) {
4149 /* Continue if this filter doesn't apply to this VSI or the
4150 * VSI ID is not in the VSI map for this filter
4152 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4155 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4157 ice_release_lock(rule_lock);
4163 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4164 * @hw: pointer to the hardware structure
4165 * @vsi_handle: VSI handle to retrieve info from
4166 * @promisc_mask: pointer to mask to be filled in
4167 * @vid: VLAN ID of promisc VLAN VSI
4170 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4173 struct ice_switch_info *sw = hw->switch_info;
4174 struct ice_fltr_mgmt_list_entry *itr;
4175 struct LIST_HEAD_TYPE *rule_head;
4176 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4178 if (!ice_is_vsi_valid(hw, vsi_handle))
4179 return ICE_ERR_PARAM;
4183 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4184 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4186 ice_acquire_lock(rule_lock);
4187 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4189 /* Continue if this filter doesn't apply to this VSI or the
4190 * VSI ID is not in the VSI map for this filter
4192 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4195 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4197 ice_release_lock(rule_lock);
4203 * ice_remove_promisc - Remove promisc based filter rules
4204 * @hw: pointer to the hardware structure
4205 * @recp_id: recipe ID for which the rule needs to removed
4206 * @v_list: list of promisc entries
4208 static enum ice_status
4209 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4210 struct LIST_HEAD_TYPE *v_list)
4212 struct ice_fltr_list_entry *v_list_itr, *tmp;
4213 struct ice_sw_recipe *recp_list;
4215 recp_list = &hw->switch_info->recp_list[recp_id];
4216 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4218 v_list_itr->status =
4219 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4220 if (v_list_itr->status)
4221 return v_list_itr->status;
4227 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4228 * @hw: pointer to the hardware structure
4229 * @vsi_handle: VSI handle to clear mode
4230 * @promisc_mask: mask of promiscuous config bits to clear
4231 * @vid: VLAN ID to clear VLAN promiscuous
4234 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4237 struct ice_switch_info *sw = hw->switch_info;
4238 struct ice_fltr_list_entry *fm_entry, *tmp;
4239 struct LIST_HEAD_TYPE remove_list_head;
4240 struct ice_fltr_mgmt_list_entry *itr;
4241 struct LIST_HEAD_TYPE *rule_head;
4242 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4243 enum ice_status status = ICE_SUCCESS;
4246 if (!ice_is_vsi_valid(hw, vsi_handle))
4247 return ICE_ERR_PARAM;
4249 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4250 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4252 recipe_id = ICE_SW_LKUP_PROMISC;
4254 rule_head = &sw->recp_list[recipe_id].filt_rules;
4255 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4257 INIT_LIST_HEAD(&remove_list_head);
4259 ice_acquire_lock(rule_lock);
4260 LIST_FOR_EACH_ENTRY(itr, rule_head,
4261 ice_fltr_mgmt_list_entry, list_entry) {
4262 struct ice_fltr_info *fltr_info;
4263 u8 fltr_promisc_mask = 0;
4265 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4267 fltr_info = &itr->fltr_info;
4269 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4270 vid != fltr_info->l_data.mac_vlan.vlan_id)
4273 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4275 /* Skip if filter is not completely specified by given mask */
4276 if (fltr_promisc_mask & ~promisc_mask)
4279 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4283 ice_release_lock(rule_lock);
4284 goto free_fltr_list;
4287 ice_release_lock(rule_lock);
4289 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4292 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4293 ice_fltr_list_entry, list_entry) {
4294 LIST_DEL(&fm_entry->list_entry);
4295 ice_free(hw, fm_entry);
4302 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4303 * @hw: pointer to the hardware structure
4304 * @vsi_handle: VSI handle to configure
4305 * @promisc_mask: mask of promiscuous config bits
4306 * @vid: VLAN ID to set VLAN promiscuous
4309 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4311 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4312 struct ice_fltr_list_entry f_list_entry;
4313 struct ice_fltr_info new_fltr;
4314 enum ice_status status = ICE_SUCCESS;
4320 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4322 if (!ice_is_vsi_valid(hw, vsi_handle))
4323 return ICE_ERR_PARAM;
4324 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4326 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4328 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4329 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4330 new_fltr.l_data.mac_vlan.vlan_id = vid;
4331 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4333 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4334 recipe_id = ICE_SW_LKUP_PROMISC;
4337 /* Separate filters must be set for each direction/packet type
4338 * combination, so we will loop over the mask value, store the
4339 * individual type, and clear it out in the input mask as it
4342 while (promisc_mask) {
4343 struct ice_sw_recipe *recp_list;
4349 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4350 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4351 pkt_type = UCAST_FLTR;
4352 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4353 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4354 pkt_type = UCAST_FLTR;
4356 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4357 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4358 pkt_type = MCAST_FLTR;
4359 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4360 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4361 pkt_type = MCAST_FLTR;
4363 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4364 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4365 pkt_type = BCAST_FLTR;
4366 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4367 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4368 pkt_type = BCAST_FLTR;
4372 /* Check for VLAN promiscuous flag */
4373 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4374 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4375 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4376 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4380 /* Set filter DA based on packet type */
4381 mac_addr = new_fltr.l_data.mac.mac_addr;
4382 if (pkt_type == BCAST_FLTR) {
4383 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4384 } else if (pkt_type == MCAST_FLTR ||
4385 pkt_type == UCAST_FLTR) {
4386 /* Use the dummy ether header DA */
4387 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4388 ICE_NONDMA_TO_NONDMA);
4389 if (pkt_type == MCAST_FLTR)
4390 mac_addr[0] |= 0x1; /* Set multicast bit */
4393 /* Need to reset this to zero for all iterations */
4396 new_fltr.flag |= ICE_FLTR_TX;
4397 new_fltr.src = hw_vsi_id;
4399 new_fltr.flag |= ICE_FLTR_RX;
4400 new_fltr.src = hw->port_info->lport;
4403 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4404 new_fltr.vsi_handle = vsi_handle;
4405 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4406 f_list_entry.fltr_info = new_fltr;
4407 recp_list = &hw->switch_info->recp_list[recipe_id];
4409 status = ice_add_rule_internal(hw, recp_list,
4410 hw->port_info->lport,
4412 if (status != ICE_SUCCESS)
4413 goto set_promisc_exit;
4421 * ice_set_vlan_vsi_promisc
4422 * @hw: pointer to the hardware structure
4423 * @vsi_handle: VSI handle to configure
4424 * @promisc_mask: mask of promiscuous config bits
4425 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4427 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4430 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4431 bool rm_vlan_promisc)
4433 struct ice_switch_info *sw = hw->switch_info;
4434 struct ice_fltr_list_entry *list_itr, *tmp;
4435 struct LIST_HEAD_TYPE vsi_list_head;
4436 struct LIST_HEAD_TYPE *vlan_head;
4437 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4438 enum ice_status status;
4441 INIT_LIST_HEAD(&vsi_list_head);
4442 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4443 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4444 ice_acquire_lock(vlan_lock);
4445 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4447 ice_release_lock(vlan_lock);
4449 goto free_fltr_list;
4451 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4453 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4454 if (rm_vlan_promisc)
4455 status = ice_clear_vsi_promisc(hw, vsi_handle,
4456 promisc_mask, vlan_id);
4458 status = ice_set_vsi_promisc(hw, vsi_handle,
4459 promisc_mask, vlan_id);
4465 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4466 ice_fltr_list_entry, list_entry) {
4467 LIST_DEL(&list_itr->list_entry);
4468 ice_free(hw, list_itr);
4474 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4475 * @hw: pointer to the hardware structure
4476 * @vsi_handle: VSI handle to remove filters from
4477 * @recp_list: recipe list from which function remove fltr
4478 * @lkup: switch rule filter lookup type
4481 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4482 struct ice_sw_recipe *recp_list,
4483 enum ice_sw_lkup_type lkup)
4485 struct ice_fltr_list_entry *fm_entry;
4486 struct LIST_HEAD_TYPE remove_list_head;
4487 struct LIST_HEAD_TYPE *rule_head;
4488 struct ice_fltr_list_entry *tmp;
4489 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4490 enum ice_status status;
4492 INIT_LIST_HEAD(&remove_list_head);
4493 rule_lock = &recp_list[lkup].filt_rule_lock;
4494 rule_head = &recp_list[lkup].filt_rules;
4495 ice_acquire_lock(rule_lock);
4496 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4498 ice_release_lock(rule_lock);
4503 case ICE_SW_LKUP_MAC:
4504 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4506 case ICE_SW_LKUP_VLAN:
4507 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4509 case ICE_SW_LKUP_PROMISC:
4510 case ICE_SW_LKUP_PROMISC_VLAN:
4511 ice_remove_promisc(hw, lkup, &remove_list_head);
4513 case ICE_SW_LKUP_MAC_VLAN:
4514 ice_remove_mac_vlan(hw, &remove_list_head);
4516 case ICE_SW_LKUP_ETHERTYPE:
4517 case ICE_SW_LKUP_ETHERTYPE_MAC:
4518 ice_remove_eth_mac(hw, &remove_list_head);
4520 case ICE_SW_LKUP_DFLT:
4521 ice_debug(hw, ICE_DBG_SW,
4522 "Remove filters for this lookup type hasn't been implemented yet\n");
4524 case ICE_SW_LKUP_LAST:
4525 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4529 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4530 ice_fltr_list_entry, list_entry) {
4531 LIST_DEL(&fm_entry->list_entry);
4532 ice_free(hw, fm_entry);
4537 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4538 * @hw: pointer to the hardware structure
4539 * @vsi_handle: VSI handle to remove filters from
4540 * @sw: pointer to switch info struct
4543 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4544 struct ice_switch_info *sw)
4546 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4548 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4549 sw->recp_list, ICE_SW_LKUP_MAC);
4550 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4551 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4552 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4553 sw->recp_list, ICE_SW_LKUP_PROMISC);
4554 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4555 sw->recp_list, ICE_SW_LKUP_VLAN);
4556 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4557 sw->recp_list, ICE_SW_LKUP_DFLT);
4558 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4559 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4560 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4561 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4562 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4563 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4567 * ice_remove_vsi_fltr - Remove all filters for a VSI
4568 * @hw: pointer to the hardware structure
4569 * @vsi_handle: VSI handle to remove filters from
4571 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4573 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4577 * ice_alloc_res_cntr - allocating resource counter
4578 * @hw: pointer to the hardware structure
4579 * @type: type of resource
4580 * @alloc_shared: if set it is shared else dedicated
4581 * @num_items: number of entries requested for FD resource type
4582 * @counter_id: counter index returned by AQ call
4585 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4588 struct ice_aqc_alloc_free_res_elem *buf;
4589 enum ice_status status;
4592 /* Allocate resource */
4593 buf_len = sizeof(*buf);
4594 buf = (struct ice_aqc_alloc_free_res_elem *)
4595 ice_malloc(hw, buf_len);
4597 return ICE_ERR_NO_MEMORY;
4599 buf->num_elems = CPU_TO_LE16(num_items);
4600 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4601 ICE_AQC_RES_TYPE_M) | alloc_shared);
4603 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4604 ice_aqc_opc_alloc_res, NULL);
4608 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4616 * ice_free_res_cntr - free resource counter
4617 * @hw: pointer to the hardware structure
4618 * @type: type of resource
4619 * @alloc_shared: if set it is shared else dedicated
4620 * @num_items: number of entries to be freed for FD resource type
4621 * @counter_id: counter ID resource which needs to be freed
4624 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4627 struct ice_aqc_alloc_free_res_elem *buf;
4628 enum ice_status status;
4632 buf_len = sizeof(*buf);
4633 buf = (struct ice_aqc_alloc_free_res_elem *)
4634 ice_malloc(hw, buf_len);
4636 return ICE_ERR_NO_MEMORY;
4638 buf->num_elems = CPU_TO_LE16(num_items);
4639 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4640 ICE_AQC_RES_TYPE_M) | alloc_shared);
4641 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4643 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4644 ice_aqc_opc_free_res, NULL);
4646 ice_debug(hw, ICE_DBG_SW,
4647 "counter resource could not be freed\n");
4654 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4655 * @hw: pointer to the hardware structure
4656 * @counter_id: returns counter index
4658 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4660 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4661 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4666 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4667 * @hw: pointer to the hardware structure
4668 * @counter_id: counter index to be freed
4670 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4672 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4673 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4678 * ice_alloc_res_lg_act - add large action resource
4679 * @hw: pointer to the hardware structure
4680 * @l_id: large action ID to fill it in
4681 * @num_acts: number of actions to hold with a large action entry
4683 static enum ice_status
4684 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4686 struct ice_aqc_alloc_free_res_elem *sw_buf;
4687 enum ice_status status;
4690 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4691 return ICE_ERR_PARAM;
4693 /* Allocate resource for large action */
4694 buf_len = sizeof(*sw_buf);
4695 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4696 ice_malloc(hw, buf_len);
4698 return ICE_ERR_NO_MEMORY;
4700 sw_buf->num_elems = CPU_TO_LE16(1);
4702 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4703 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4704 * If num_acts is greater than 2, then use
4705 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4706 * The num_acts cannot exceed 4. This was ensured at the
4707 * beginning of the function.
4710 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4711 else if (num_acts == 2)
4712 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4714 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4716 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4717 ice_aqc_opc_alloc_res, NULL);
4719 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4721 ice_free(hw, sw_buf);
4726 * ice_add_mac_with_sw_marker - add filter with sw marker
4727 * @hw: pointer to the hardware structure
4728 * @f_info: filter info structure containing the MAC filter information
4729 * @sw_marker: sw marker to tag the Rx descriptor with
4732 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4735 struct ice_fltr_mgmt_list_entry *m_entry;
4736 struct ice_fltr_list_entry fl_info;
4737 struct ice_sw_recipe *recp_list;
4738 struct LIST_HEAD_TYPE l_head;
4739 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4740 enum ice_status ret;
4744 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4745 return ICE_ERR_PARAM;
4747 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4748 return ICE_ERR_PARAM;
4750 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4751 return ICE_ERR_PARAM;
4753 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4754 return ICE_ERR_PARAM;
4755 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4757 /* Add filter if it doesn't exist so then the adding of large
4758 * action always results in update
4761 INIT_LIST_HEAD(&l_head);
4762 fl_info.fltr_info = *f_info;
4763 LIST_ADD(&fl_info.list_entry, &l_head);
4765 entry_exists = false;
4766 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4767 hw->port_info->lport);
4768 if (ret == ICE_ERR_ALREADY_EXISTS)
4769 entry_exists = true;
4773 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4774 rule_lock = &recp_list->filt_rule_lock;
4775 ice_acquire_lock(rule_lock);
4776 /* Get the book keeping entry for the filter */
4777 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4781 /* If counter action was enabled for this rule then don't enable
4782 * sw marker large action
4784 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4785 ret = ICE_ERR_PARAM;
4789 /* if same marker was added before */
4790 if (m_entry->sw_marker_id == sw_marker) {
4791 ret = ICE_ERR_ALREADY_EXISTS;
4795 /* Allocate a hardware table entry to hold large act. Three actions
4796 * for marker based large action
4798 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4802 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4805 /* Update the switch rule to add the marker action */
4806 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4808 ice_release_lock(rule_lock);
4813 ice_release_lock(rule_lock);
4814 /* only remove entry if it did not exist previously */
4816 ret = ice_remove_mac(hw, &l_head);
4822 * ice_add_mac_with_counter - add filter with counter enabled
4823 * @hw: pointer to the hardware structure
4824 * @f_info: pointer to filter info structure containing the MAC filter
4828 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4830 struct ice_fltr_mgmt_list_entry *m_entry;
4831 struct ice_fltr_list_entry fl_info;
4832 struct ice_sw_recipe *recp_list;
4833 struct LIST_HEAD_TYPE l_head;
4834 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4835 enum ice_status ret;
4840 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4841 return ICE_ERR_PARAM;
4843 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4844 return ICE_ERR_PARAM;
4846 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4847 return ICE_ERR_PARAM;
4848 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4849 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4851 entry_exist = false;
4853 rule_lock = &recp_list->filt_rule_lock;
4855 /* Add filter if it doesn't exist so then the adding of large
4856 * action always results in update
4858 INIT_LIST_HEAD(&l_head);
4860 fl_info.fltr_info = *f_info;
4861 LIST_ADD(&fl_info.list_entry, &l_head);
4863 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4864 hw->port_info->lport);
4865 if (ret == ICE_ERR_ALREADY_EXISTS)
4870 ice_acquire_lock(rule_lock);
4871 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4873 ret = ICE_ERR_BAD_PTR;
4877 /* Don't enable counter for a filter for which sw marker was enabled */
4878 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4879 ret = ICE_ERR_PARAM;
4883 /* If a counter was already enabled then don't need to add again */
4884 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4885 ret = ICE_ERR_ALREADY_EXISTS;
4889 /* Allocate a hardware table entry to VLAN counter */
4890 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4894 /* Allocate a hardware table entry to hold large act. Two actions for
4895 * counter based large action
4897 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4901 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4904 /* Update the switch rule to add the counter action */
4905 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4907 ice_release_lock(rule_lock);
4912 ice_release_lock(rule_lock);
4913 /* only remove entry if it did not exist previously */
4915 ret = ice_remove_mac(hw, &l_head);
4920 /* This is mapping table entry that maps every word within a given protocol
4921 * structure to the real byte offset as per the specification of that
4923 * for example dst address is 3 words in ethertype header and corresponding
4924 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4925 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4926 * matching entry describing its field. This needs to be updated if new
4927 * structure is added to that union.
4929 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4930 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4931 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4932 { ICE_ETYPE_OL, { 0 } },
4933 { ICE_VLAN_OFOS, { 0, 2 } },
4934 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4935 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4936 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4937 26, 28, 30, 32, 34, 36, 38 } },
4938 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4939 26, 28, 30, 32, 34, 36, 38 } },
4940 { ICE_TCP_IL, { 0, 2 } },
4941 { ICE_UDP_OF, { 0, 2 } },
4942 { ICE_UDP_ILOS, { 0, 2 } },
4943 { ICE_SCTP_IL, { 0, 2 } },
4944 { ICE_VXLAN, { 8, 10, 12, 14 } },
4945 { ICE_GENEVE, { 8, 10, 12, 14 } },
4946 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4947 { ICE_NVGRE, { 0, 2, 4, 6 } },
4948 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4949 { ICE_PPPOE, { 0, 2, 4, 6 } },
4952 /* The following table describes preferred grouping of recipes.
4953 * If a recipe that needs to be programmed is a superset or matches one of the
4954 * following combinations, then the recipe needs to be chained as per the
4958 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4959 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4960 { ICE_MAC_IL, ICE_MAC_IL_HW },
4961 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4962 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4963 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4964 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4965 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4966 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4967 { ICE_TCP_IL, ICE_TCP_IL_HW },
4968 { ICE_UDP_OF, ICE_UDP_OF_HW },
4969 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4970 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4971 { ICE_VXLAN, ICE_UDP_OF_HW },
4972 { ICE_GENEVE, ICE_UDP_OF_HW },
4973 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4974 { ICE_NVGRE, ICE_GRE_OF_HW },
4975 { ICE_GTP, ICE_UDP_OF_HW },
4976 { ICE_PPPOE, ICE_PPPOE_HW },
4980 * ice_find_recp - find a recipe
4981 * @hw: pointer to the hardware structure
4982 * @lkup_exts: extension sequence to match
4984 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4986 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4988 bool refresh_required = true;
4989 struct ice_sw_recipe *recp;
4992 /* Walk through existing recipes to find a match */
4993 recp = hw->switch_info->recp_list;
4994 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4995 /* If recipe was not created for this ID, in SW bookkeeping,
4996 * check if FW has an entry for this recipe. If the FW has an
4997 * entry update it in our SW bookkeeping and continue with the
5000 if (!recp[i].recp_created)
5001 if (ice_get_recp_frm_fw(hw,
5002 hw->switch_info->recp_list, i,
5006 /* Skip inverse action recipes */
5007 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5008 ICE_AQ_RECIPE_ACT_INV_ACT)
5011 /* if number of words we are looking for match */
5012 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5013 struct ice_fv_word *a = lkup_exts->fv_words;
5014 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
5018 for (p = 0; p < lkup_exts->n_val_words; p++) {
5019 for (q = 0; q < recp[i].lkup_exts.n_val_words;
5021 if (a[p].off == b[q].off &&
5022 a[p].prot_id == b[q].prot_id)
5023 /* Found the "p"th word in the
5028 /* After walking through all the words in the
5029 * "i"th recipe if "p"th word was not found then
5030 * this recipe is not what we are looking for.
5031 * So break out from this loop and try the next
5034 if (q >= recp[i].lkup_exts.n_val_words) {
5039 /* If for "i"th recipe the found was never set to false
5040 * then it means we found our match
5043 return i; /* Return the recipe ID */
5046 return ICE_MAX_NUM_RECIPES;
5050 * ice_prot_type_to_id - get protocol ID from protocol type
5051 * @type: protocol type
5052 * @id: pointer to variable that will receive the ID
5054 * Returns true if found, false otherwise
5056 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5060 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5061 if (ice_prot_id_tbl[i].type == type) {
5062 *id = ice_prot_id_tbl[i].protocol_id;
5069 * ice_find_valid_words - count valid words
5070 * @rule: advanced rule with lookup information
5071 * @lkup_exts: byte offset extractions of the words that are valid
5073 * calculate valid words in a lookup rule using mask value
5076 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5077 struct ice_prot_lkup_ext *lkup_exts)
5079 u8 j, word, prot_id, ret_val;
5081 if (!ice_prot_type_to_id(rule->type, &prot_id))
5084 word = lkup_exts->n_val_words;
5086 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5087 if (((u16 *)&rule->m_u)[j] &&
5088 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5089 /* No more space to accommodate */
5090 if (word >= ICE_MAX_CHAIN_WORDS)
5092 lkup_exts->fv_words[word].off =
5093 ice_prot_ext[rule->type].offs[j];
5094 lkup_exts->fv_words[word].prot_id =
5095 ice_prot_id_tbl[rule->type].protocol_id;
5096 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5100 ret_val = word - lkup_exts->n_val_words;
5101 lkup_exts->n_val_words = word;
5107 * ice_create_first_fit_recp_def - Create a recipe grouping
5108 * @hw: pointer to the hardware structure
5109 * @lkup_exts: an array of protocol header extractions
5110 * @rg_list: pointer to a list that stores new recipe groups
5111 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5113 * Using first fit algorithm, take all the words that are still not done
5114 * and start grouping them in 4-word groups. Each group makes up one
5117 static enum ice_status
5118 ice_create_first_fit_recp_def(struct ice_hw *hw,
5119 struct ice_prot_lkup_ext *lkup_exts,
5120 struct LIST_HEAD_TYPE *rg_list,
5123 struct ice_pref_recipe_group *grp = NULL;
5128 if (!lkup_exts->n_val_words) {
5129 struct ice_recp_grp_entry *entry;
5131 entry = (struct ice_recp_grp_entry *)
5132 ice_malloc(hw, sizeof(*entry));
5134 return ICE_ERR_NO_MEMORY;
5135 LIST_ADD(&entry->l_entry, rg_list);
5136 grp = &entry->r_group;
5138 grp->n_val_pairs = 0;
5141 /* Walk through every word in the rule to check if it is not done. If so
5142 * then this word needs to be part of a new recipe.
5144 for (j = 0; j < lkup_exts->n_val_words; j++)
5145 if (!ice_is_bit_set(lkup_exts->done, j)) {
5147 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5148 struct ice_recp_grp_entry *entry;
5150 entry = (struct ice_recp_grp_entry *)
5151 ice_malloc(hw, sizeof(*entry));
5153 return ICE_ERR_NO_MEMORY;
5154 LIST_ADD(&entry->l_entry, rg_list);
5155 grp = &entry->r_group;
5159 grp->pairs[grp->n_val_pairs].prot_id =
5160 lkup_exts->fv_words[j].prot_id;
5161 grp->pairs[grp->n_val_pairs].off =
5162 lkup_exts->fv_words[j].off;
5163 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5171 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5172 * @hw: pointer to the hardware structure
5173 * @fv_list: field vector with the extraction sequence information
5174 * @rg_list: recipe groupings with protocol-offset pairs
5176 * Helper function to fill in the field vector indices for protocol-offset
5177 * pairs. These indexes are then ultimately programmed into a recipe.
5179 static enum ice_status
5180 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5181 struct LIST_HEAD_TYPE *rg_list)
5183 struct ice_sw_fv_list_entry *fv;
5184 struct ice_recp_grp_entry *rg;
5185 struct ice_fv_word *fv_ext;
5187 if (LIST_EMPTY(fv_list))
5190 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5191 fv_ext = fv->fv_ptr->ew;
5193 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5196 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5197 struct ice_fv_word *pr;
5202 pr = &rg->r_group.pairs[i];
5203 mask = rg->r_group.mask[i];
5205 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5206 if (fv_ext[j].prot_id == pr->prot_id &&
5207 fv_ext[j].off == pr->off) {
5210 /* Store index of field vector */
5212 /* Mask is given by caller as big
5213 * endian, but sent to FW as little
5216 rg->fv_mask[i] = mask << 8 | mask >> 8;
5220 /* Protocol/offset could not be found, caller gave an
5224 return ICE_ERR_PARAM;
5232 * ice_find_free_recp_res_idx - find free result indexes for recipe
5233 * @hw: pointer to hardware structure
5234 * @profiles: bitmap of profiles that will be associated with the new recipe
5235 * @free_idx: pointer to variable to receive the free index bitmap
5237 * The algorithm used here is:
5238 * 1. When creating a new recipe, create a set P which contains all
5239 * Profiles that will be associated with our new recipe
5241 * 2. For each Profile p in set P:
5242 * a. Add all recipes associated with Profile p into set R
5243 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5244 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5245 * i. Or just assume they all have the same possible indexes:
5247 * i.e., PossibleIndexes = 0x0000F00000000000
5249 * 3. For each Recipe r in set R:
5250 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5251 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5253 * FreeIndexes will contain the bits indicating the indexes free for use,
5254 * then the code needs to update the recipe[r].used_result_idx_bits to
5255 * indicate which indexes were selected for use by this recipe.
5258 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5259 ice_bitmap_t *free_idx)
5261 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5262 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5263 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5267 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5268 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5269 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5270 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5272 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5273 ice_set_bit(count, possible_idx);
5275 /* For each profile we are going to associate the recipe with, add the
5276 * recipes that are associated with that profile. This will give us
5277 * the set of recipes that our recipe may collide with. Also, determine
5278 * what possible result indexes are usable given this set of profiles.
5281 while (ICE_MAX_NUM_PROFILES >
5282 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5283 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5284 ICE_MAX_NUM_RECIPES);
5285 ice_and_bitmap(possible_idx, possible_idx,
5286 hw->switch_info->prof_res_bm[bit],
5291 /* For each recipe that our new recipe may collide with, determine
5292 * which indexes have been used.
5294 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5295 if (ice_is_bit_set(recipes, bit)) {
5296 ice_or_bitmap(used_idx, used_idx,
5297 hw->switch_info->recp_list[bit].res_idxs,
5301 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5303 /* return number of free indexes */
5306 while (ICE_MAX_FV_WORDS >
5307 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5316 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5317 * @hw: pointer to hardware structure
5318 * @rm: recipe management list entry
5319 * @match_tun: if field vector index for tunnel needs to be programmed
5320 * @profiles: bitmap of profiles that will be assocated.
5322 static enum ice_status
5323 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5324 bool match_tun, ice_bitmap_t *profiles)
5326 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5327 struct ice_aqc_recipe_data_elem *tmp;
5328 struct ice_aqc_recipe_data_elem *buf;
5329 struct ice_recp_grp_entry *entry;
5330 enum ice_status status;
5336 /* When more than one recipe are required, another recipe is needed to
5337 * chain them together. Matching a tunnel metadata ID takes up one of
5338 * the match fields in the chaining recipe reducing the number of
5339 * chained recipes by one.
5341 /* check number of free result indices */
5342 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5343 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5345 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5346 free_res_idx, rm->n_grp_count);
5348 if (rm->n_grp_count > 1) {
5349 if (rm->n_grp_count > free_res_idx)
5350 return ICE_ERR_MAX_LIMIT;
5355 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5356 ICE_MAX_NUM_RECIPES,
5359 return ICE_ERR_NO_MEMORY;
5361 buf = (struct ice_aqc_recipe_data_elem *)
5362 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5364 status = ICE_ERR_NO_MEMORY;
5368 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5369 recipe_count = ICE_MAX_NUM_RECIPES;
5370 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5372 if (status || recipe_count == 0)
5375 /* Allocate the recipe resources, and configure them according to the
5376 * match fields from protocol headers and extracted field vectors.
5378 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5379 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5382 status = ice_alloc_recipe(hw, &entry->rid);
5386 /* Clear the result index of the located recipe, as this will be
5387 * updated, if needed, later in the recipe creation process.
5389 tmp[0].content.result_indx = 0;
5391 buf[recps] = tmp[0];
5392 buf[recps].recipe_indx = (u8)entry->rid;
5393 /* if the recipe is a non-root recipe RID should be programmed
5394 * as 0 for the rules to be applied correctly.
5396 buf[recps].content.rid = 0;
5397 ice_memset(&buf[recps].content.lkup_indx, 0,
5398 sizeof(buf[recps].content.lkup_indx),
5401 /* All recipes use look-up index 0 to match switch ID. */
5402 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5403 buf[recps].content.mask[0] =
5404 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5405 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5408 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5409 buf[recps].content.lkup_indx[i] = 0x80;
5410 buf[recps].content.mask[i] = 0;
5413 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5414 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5415 buf[recps].content.mask[i + 1] =
5416 CPU_TO_LE16(entry->fv_mask[i]);
5419 if (rm->n_grp_count > 1) {
5420 /* Checks to see if there really is a valid result index
5423 if (chain_idx >= ICE_MAX_FV_WORDS) {
5424 ice_debug(hw, ICE_DBG_SW,
5425 "No chain index available\n");
5426 status = ICE_ERR_MAX_LIMIT;
5430 entry->chain_idx = chain_idx;
5431 buf[recps].content.result_indx =
5432 ICE_AQ_RECIPE_RESULT_EN |
5433 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5434 ICE_AQ_RECIPE_RESULT_DATA_M);
5435 ice_clear_bit(chain_idx, result_idx_bm);
5436 chain_idx = ice_find_first_bit(result_idx_bm,
5440 /* fill recipe dependencies */
5441 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5442 ICE_MAX_NUM_RECIPES);
5443 ice_set_bit(buf[recps].recipe_indx,
5444 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5445 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5449 if (rm->n_grp_count == 1) {
5450 rm->root_rid = buf[0].recipe_indx;
5451 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5452 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5453 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5454 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5455 sizeof(buf[0].recipe_bitmap),
5456 ICE_NONDMA_TO_NONDMA);
5458 status = ICE_ERR_BAD_PTR;
5461 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5462 * the recipe which is getting created if specified
5463 * by user. Usually any advanced switch filter, which results
5464 * into new extraction sequence, ended up creating a new recipe
5465 * of type ROOT and usually recipes are associated with profiles
5466 * Switch rule referreing newly created recipe, needs to have
5467 * either/or 'fwd' or 'join' priority, otherwise switch rule
5468 * evaluation will not happen correctly. In other words, if
5469 * switch rule to be evaluated on priority basis, then recipe
5470 * needs to have priority, otherwise it will be evaluated last.
5472 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5474 struct ice_recp_grp_entry *last_chain_entry;
5477 /* Allocate the last recipe that will chain the outcomes of the
5478 * other recipes together
5480 status = ice_alloc_recipe(hw, &rid);
5484 buf[recps].recipe_indx = (u8)rid;
5485 buf[recps].content.rid = (u8)rid;
5486 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5487 /* the new entry created should also be part of rg_list to
5488 * make sure we have complete recipe
5490 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5491 sizeof(*last_chain_entry));
5492 if (!last_chain_entry) {
5493 status = ICE_ERR_NO_MEMORY;
5496 last_chain_entry->rid = rid;
5497 ice_memset(&buf[recps].content.lkup_indx, 0,
5498 sizeof(buf[recps].content.lkup_indx),
5500 /* All recipes use look-up index 0 to match switch ID. */
5501 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5502 buf[recps].content.mask[0] =
5503 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5504 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5505 buf[recps].content.lkup_indx[i] =
5506 ICE_AQ_RECIPE_LKUP_IGNORE;
5507 buf[recps].content.mask[i] = 0;
5511 /* update r_bitmap with the recp that is used for chaining */
5512 ice_set_bit(rid, rm->r_bitmap);
5513 /* this is the recipe that chains all the other recipes so it
5514 * should not have a chaining ID to indicate the same
5516 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5517 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5519 last_chain_entry->fv_idx[i] = entry->chain_idx;
5520 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5521 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5522 ice_set_bit(entry->rid, rm->r_bitmap);
5524 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5525 if (sizeof(buf[recps].recipe_bitmap) >=
5526 sizeof(rm->r_bitmap)) {
5527 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5528 sizeof(buf[recps].recipe_bitmap),
5529 ICE_NONDMA_TO_NONDMA);
5531 status = ICE_ERR_BAD_PTR;
5534 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5536 /* To differentiate among different UDP tunnels, a meta data ID
5540 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5541 buf[recps].content.mask[i] =
5542 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5546 rm->root_rid = (u8)rid;
5548 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5552 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5553 ice_release_change_lock(hw);
5557 /* Every recipe that just got created add it to the recipe
5560 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5561 struct ice_switch_info *sw = hw->switch_info;
5562 bool is_root, idx_found = false;
5563 struct ice_sw_recipe *recp;
5564 u16 idx, buf_idx = 0;
5566 /* find buffer index for copying some data */
5567 for (idx = 0; idx < rm->n_grp_count; idx++)
5568 if (buf[idx].recipe_indx == entry->rid) {
5574 status = ICE_ERR_OUT_OF_RANGE;
5578 recp = &sw->recp_list[entry->rid];
5579 is_root = (rm->root_rid == entry->rid);
5580 recp->is_root = is_root;
5582 recp->root_rid = entry->rid;
5583 recp->big_recp = (is_root && rm->n_grp_count > 1);
5585 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5586 entry->r_group.n_val_pairs *
5587 sizeof(struct ice_fv_word),
5588 ICE_NONDMA_TO_NONDMA);
5590 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5591 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5593 /* Copy non-result fv index values and masks to recipe. This
5594 * call will also update the result recipe bitmask.
5596 ice_collect_result_idx(&buf[buf_idx], recp);
5598 /* for non-root recipes, also copy to the root, this allows
5599 * easier matching of a complete chained recipe
5602 ice_collect_result_idx(&buf[buf_idx],
5603 &sw->recp_list[rm->root_rid]);
5605 recp->n_ext_words = entry->r_group.n_val_pairs;
5606 recp->chain_idx = entry->chain_idx;
5607 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5608 recp->n_grp_count = rm->n_grp_count;
5609 recp->tun_type = rm->tun_type;
5610 recp->recp_created = true;
5625 * ice_create_recipe_group - creates recipe group
5626 * @hw: pointer to hardware structure
5627 * @rm: recipe management list entry
5628 * @lkup_exts: lookup elements
5630 static enum ice_status
5631 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5632 struct ice_prot_lkup_ext *lkup_exts)
5634 enum ice_status status;
5637 rm->n_grp_count = 0;
5639 /* Create recipes for words that are marked not done by packing them
5642 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5643 &rm->rg_list, &recp_count);
5645 rm->n_grp_count += recp_count;
5646 rm->n_ext_words = lkup_exts->n_val_words;
5647 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5648 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5649 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5650 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5657 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5658 * @hw: pointer to hardware structure
5659 * @lkups: lookup elements or match criteria for the advanced recipe, one
5660 * structure per protocol header
5661 * @lkups_cnt: number of protocols
5662 * @bm: bitmap of field vectors to consider
5663 * @fv_list: pointer to a list that holds the returned field vectors
5665 static enum ice_status
5666 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5667 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5669 enum ice_status status;
5676 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5678 return ICE_ERR_NO_MEMORY;
5680 for (i = 0; i < lkups_cnt; i++)
5681 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5682 status = ICE_ERR_CFG;
5686 /* Find field vectors that include all specified protocol types */
5687 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5690 ice_free(hw, prot_ids);
5695 * ice_add_special_words - Add words that are not protocols, such as metadata
5696 * @rinfo: other information regarding the rule e.g. priority and action info
5697 * @lkup_exts: lookup word structure
5699 static enum ice_status
5700 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5701 struct ice_prot_lkup_ext *lkup_exts)
5703 /* If this is a tunneled packet, then add recipe index to match the
5704 * tunnel bit in the packet metadata flags.
5706 if (rinfo->tun_type != ICE_NON_TUN) {
5707 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5708 u8 word = lkup_exts->n_val_words++;
5710 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5711 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5713 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5715 return ICE_ERR_MAX_LIMIT;
5722 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5723 * @hw: pointer to hardware structure
5724 * @rinfo: other information regarding the rule e.g. priority and action info
5725 * @bm: pointer to memory for returning the bitmap of field vectors
5728 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5731 enum ice_prof_type prof_type;
5733 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
5735 switch (rinfo->tun_type) {
5737 prof_type = ICE_PROF_NON_TUN;
5739 case ICE_ALL_TUNNELS:
5740 prof_type = ICE_PROF_TUN_ALL;
5742 case ICE_SW_TUN_VXLAN_GPE:
5743 case ICE_SW_TUN_GENEVE:
5744 case ICE_SW_TUN_VXLAN:
5745 case ICE_SW_TUN_UDP:
5746 case ICE_SW_TUN_GTP:
5747 prof_type = ICE_PROF_TUN_UDP;
5749 case ICE_SW_TUN_NVGRE:
5750 prof_type = ICE_PROF_TUN_GRE;
5752 case ICE_SW_TUN_PPPOE:
5753 prof_type = ICE_PROF_TUN_PPPOE;
5755 case ICE_SW_TUN_PROFID_IPV6_ESP:
5756 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
5758 case ICE_SW_TUN_PROFID_IPV6_AH:
5759 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
5761 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
5762 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
5764 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
5765 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
5767 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
5768 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
5770 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
5771 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
5773 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
5774 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
5776 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
5777 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
5779 case ICE_SW_TUN_AND_NON_TUN:
5781 prof_type = ICE_PROF_ALL;
5785 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5789 * ice_is_prof_rule - determine if rule type is a profile rule
5790 * @type: the rule type
5792 * if the rule type is a profile rule, that means that there no field value
5793 * match required, in this case just a profile hit is required.
5795 static bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
5798 case ICE_SW_TUN_PROFID_IPV6_ESP:
5799 case ICE_SW_TUN_PROFID_IPV6_AH:
5800 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
5801 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
5802 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
5803 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
5804 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
5805 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
5815 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5816 * @hw: pointer to hardware structure
5817 * @lkups: lookup elements or match criteria for the advanced recipe, one
5818 * structure per protocol header
5819 * @lkups_cnt: number of protocols
5820 * @rinfo: other information regarding the rule e.g. priority and action info
5821 * @rid: return the recipe ID of the recipe created
5823 static enum ice_status
5824 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5825 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5827 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5828 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5829 struct ice_prot_lkup_ext *lkup_exts;
5830 struct ice_recp_grp_entry *r_entry;
5831 struct ice_sw_fv_list_entry *fvit;
5832 struct ice_recp_grp_entry *r_tmp;
5833 struct ice_sw_fv_list_entry *tmp;
5834 enum ice_status status = ICE_SUCCESS;
5835 struct ice_sw_recipe *rm;
5836 bool match_tun = false;
5839 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
5840 return ICE_ERR_PARAM;
5842 lkup_exts = (struct ice_prot_lkup_ext *)
5843 ice_malloc(hw, sizeof(*lkup_exts));
5845 return ICE_ERR_NO_MEMORY;
5847 /* Determine the number of words to be matched and if it exceeds a
5848 * recipe's restrictions
5850 for (i = 0; i < lkups_cnt; i++) {
5853 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5854 status = ICE_ERR_CFG;
5855 goto err_free_lkup_exts;
5858 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5860 status = ICE_ERR_CFG;
5861 goto err_free_lkup_exts;
5865 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5867 status = ICE_ERR_NO_MEMORY;
5868 goto err_free_lkup_exts;
5871 /* Get field vectors that contain fields extracted from all the protocol
5872 * headers being programmed.
5874 INIT_LIST_HEAD(&rm->fv_list);
5875 INIT_LIST_HEAD(&rm->rg_list);
5877 /* Get bitmap of field vectors (profiles) that are compatible with the
5878 * rule request; only these will be searched in the subsequent call to
5881 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5883 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5887 /* Group match words into recipes using preferred recipe grouping
5890 status = ice_create_recipe_group(hw, rm, lkup_exts);
5894 /* There is only profile for UDP tunnels. So, it is necessary to use a
5895 * metadata ID flag to differentiate different tunnel types. A separate
5896 * recipe needs to be used for the metadata.
5898 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5899 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5900 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5903 /* set the recipe priority if specified */
5904 rm->priority = (u8)rinfo->priority;
5906 /* Find offsets from the field vector. Pick the first one for all the
5909 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5913 /* An empty FV list means to use all the profiles returned in the
5916 if (LIST_EMPTY(&rm->fv_list)) {
5919 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
5920 if (ice_is_bit_set(fv_bitmap, j)) {
5921 struct ice_sw_fv_list_entry *fvl;
5923 fvl = (struct ice_sw_fv_list_entry *)
5924 ice_malloc(hw, sizeof(*fvl));
5928 fvl->profile_id = j;
5929 LIST_ADD(&fvl->list_entry, &rm->fv_list);
5933 /* get bitmap of all profiles the recipe will be associated with */
5934 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5935 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5937 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5938 ice_set_bit((u16)fvit->profile_id, profiles);
5941 /* Create any special protocol/offset pairs, such as looking at tunnel
5942 * bits by extracting metadata
5944 status = ice_add_special_words(rinfo, lkup_exts);
5946 goto err_free_lkup_exts;
5948 /* Look for a recipe which matches our requested fv / mask list */
5949 *rid = ice_find_recp(hw, lkup_exts);
5950 if (*rid < ICE_MAX_NUM_RECIPES)
5951 /* Success if found a recipe that match the existing criteria */
5954 /* Recipe we need does not exist, add a recipe */
5955 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5959 /* Associate all the recipes created with all the profiles in the
5960 * common field vector.
5962 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5964 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5967 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5968 (u8 *)r_bitmap, NULL);
5972 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5973 ICE_MAX_NUM_RECIPES);
5974 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5978 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5981 ice_release_change_lock(hw);
5986 /* Update profile to recipe bitmap array */
5987 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5988 ICE_MAX_NUM_RECIPES);
5990 /* Update recipe to profile bitmap array */
5991 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5992 if (ice_is_bit_set(r_bitmap, j))
5993 ice_set_bit((u16)fvit->profile_id,
5994 recipe_to_profile[j]);
5997 *rid = rm->root_rid;
5998 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5999 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6001 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6002 ice_recp_grp_entry, l_entry) {
6003 LIST_DEL(&r_entry->l_entry);
6004 ice_free(hw, r_entry);
6007 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6009 LIST_DEL(&fvit->list_entry);
6014 ice_free(hw, rm->root_buf);
6019 ice_free(hw, lkup_exts);
6025 * ice_find_dummy_packet - find dummy packet by tunnel type
6027 * @lkups: lookup elements or match criteria for the advanced recipe, one
6028 * structure per protocol header
6029 * @lkups_cnt: number of protocols
6030 * @tun_type: tunnel type from the match criteria
6031 * @pkt: dummy packet to fill according to filter match criteria
6032 * @pkt_len: packet length of dummy packet
6033 * @offsets: pointer to receive the pointer to the offsets for the packet
6036 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6037 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6039 const struct ice_dummy_pkt_offsets **offsets)
6041 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6045 for (i = 0; i < lkups_cnt; i++) {
6046 if (lkups[i].type == ICE_UDP_ILOS)
6048 else if (lkups[i].type == ICE_TCP_IL)
6050 else if (lkups[i].type == ICE_IPV6_OFOS)
6052 else if (lkups[i].type == ICE_VLAN_OFOS)
6054 else if (lkups[i].type == ICE_IPV4_OFOS &&
6055 lkups[i].h_u.ipv4_hdr.protocol ==
6056 ICE_IPV4_NVGRE_PROTO_ID &&
6057 lkups[i].m_u.ipv4_hdr.protocol ==
6060 else if (lkups[i].type == ICE_PPPOE &&
6061 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6062 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6063 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6066 else if (lkups[i].type == ICE_ETYPE_OL &&
6067 lkups[i].h_u.ethertype.ethtype_id ==
6068 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6069 lkups[i].m_u.ethertype.ethtype_id ==
6074 if (tun_type == ICE_SW_TUN_GTP) {
6075 *pkt = dummy_udp_gtp_packet;
6076 *pkt_len = sizeof(dummy_udp_gtp_packet);
6077 *offsets = dummy_udp_gtp_packet_offsets;
6080 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6081 *pkt = dummy_pppoe_ipv6_packet;
6082 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6083 *offsets = dummy_pppoe_packet_offsets;
6085 } else if (tun_type == ICE_SW_TUN_PPPOE) {
6086 *pkt = dummy_pppoe_ipv4_packet;
6087 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6088 *offsets = dummy_pppoe_packet_offsets;
6092 if (tun_type == ICE_ALL_TUNNELS) {
6093 *pkt = dummy_gre_udp_packet;
6094 *pkt_len = sizeof(dummy_gre_udp_packet);
6095 *offsets = dummy_gre_udp_packet_offsets;
6099 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6101 *pkt = dummy_gre_tcp_packet;
6102 *pkt_len = sizeof(dummy_gre_tcp_packet);
6103 *offsets = dummy_gre_tcp_packet_offsets;
6107 *pkt = dummy_gre_udp_packet;
6108 *pkt_len = sizeof(dummy_gre_udp_packet);
6109 *offsets = dummy_gre_udp_packet_offsets;
6113 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6114 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
6116 *pkt = dummy_udp_tun_tcp_packet;
6117 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6118 *offsets = dummy_udp_tun_tcp_packet_offsets;
6122 *pkt = dummy_udp_tun_udp_packet;
6123 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6124 *offsets = dummy_udp_tun_udp_packet_offsets;
6130 *pkt = dummy_vlan_udp_packet;
6131 *pkt_len = sizeof(dummy_vlan_udp_packet);
6132 *offsets = dummy_vlan_udp_packet_offsets;
6135 *pkt = dummy_udp_packet;
6136 *pkt_len = sizeof(dummy_udp_packet);
6137 *offsets = dummy_udp_packet_offsets;
6139 } else if (udp && ipv6) {
6141 *pkt = dummy_vlan_udp_ipv6_packet;
6142 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6143 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6146 *pkt = dummy_udp_ipv6_packet;
6147 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6148 *offsets = dummy_udp_ipv6_packet_offsets;
6150 } else if ((tcp && ipv6) || ipv6) {
6152 *pkt = dummy_vlan_tcp_ipv6_packet;
6153 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6154 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6157 *pkt = dummy_tcp_ipv6_packet;
6158 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6159 *offsets = dummy_tcp_ipv6_packet_offsets;
6164 *pkt = dummy_vlan_tcp_packet;
6165 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6166 *offsets = dummy_vlan_tcp_packet_offsets;
6168 *pkt = dummy_tcp_packet;
6169 *pkt_len = sizeof(dummy_tcp_packet);
6170 *offsets = dummy_tcp_packet_offsets;
6175 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6177 * @lkups: lookup elements or match criteria for the advanced recipe, one
6178 * structure per protocol header
6179 * @lkups_cnt: number of protocols
6180 * @s_rule: stores rule information from the match criteria
6181 * @dummy_pkt: dummy packet to fill according to filter match criteria
6182 * @pkt_len: packet length of dummy packet
6183 * @offsets: offset info for the dummy packet
6185 static enum ice_status
6186 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6187 struct ice_aqc_sw_rules_elem *s_rule,
6188 const u8 *dummy_pkt, u16 pkt_len,
6189 const struct ice_dummy_pkt_offsets *offsets)
6194 /* Start with a packet with a pre-defined/dummy content. Then, fill
6195 * in the header values to be looked up or matched.
6197 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6199 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6201 for (i = 0; i < lkups_cnt; i++) {
6202 enum ice_protocol_type type;
6203 u16 offset = 0, len = 0, j;
6206 /* find the start of this layer; it should be found since this
6207 * was already checked when search for the dummy packet
6209 type = lkups[i].type;
6210 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6211 if (type == offsets[j].type) {
6212 offset = offsets[j].offset;
6217 /* this should never happen in a correct calling sequence */
6219 return ICE_ERR_PARAM;
6221 switch (lkups[i].type) {
6224 len = sizeof(struct ice_ether_hdr);
6227 len = sizeof(struct ice_ethtype_hdr);
6230 len = sizeof(struct ice_vlan_hdr);
6234 len = sizeof(struct ice_ipv4_hdr);
6238 len = sizeof(struct ice_ipv6_hdr);
6243 len = sizeof(struct ice_l4_hdr);
6246 len = sizeof(struct ice_sctp_hdr);
6249 len = sizeof(struct ice_nvgre);
6254 len = sizeof(struct ice_udp_tnl_hdr);
6258 len = sizeof(struct ice_udp_gtp_hdr);
6261 len = sizeof(struct ice_pppoe_hdr);
6264 return ICE_ERR_PARAM;
6267 /* the length should be a word multiple */
6268 if (len % ICE_BYTES_PER_WORD)
6271 /* We have the offset to the header start, the length, the
6272 * caller's header values and mask. Use this information to
6273 * copy the data into the dummy packet appropriately based on
6274 * the mask. Note that we need to only write the bits as
6275 * indicated by the mask to make sure we don't improperly write
6276 * over any significant packet data.
6278 for (j = 0; j < len / sizeof(u16); j++)
6279 if (((u16 *)&lkups[i].m_u)[j])
6280 ((u16 *)(pkt + offset))[j] =
6281 (((u16 *)(pkt + offset))[j] &
6282 ~((u16 *)&lkups[i].m_u)[j]) |
6283 (((u16 *)&lkups[i].h_u)[j] &
6284 ((u16 *)&lkups[i].m_u)[j]);
6287 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6293 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6294 * @hw: pointer to the hardware structure
6295 * @tun_type: tunnel type
6296 * @pkt: dummy packet to fill in
6297 * @offsets: offset info for the dummy packet
6299 static enum ice_status
6300 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6301 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6306 case ICE_SW_TUN_AND_NON_TUN:
6307 case ICE_SW_TUN_VXLAN_GPE:
6308 case ICE_SW_TUN_VXLAN:
6309 case ICE_SW_TUN_UDP:
6310 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6314 case ICE_SW_TUN_GENEVE:
6315 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6320 /* Nothing needs to be done for this tunnel type */
6324 /* Find the outer UDP protocol header and insert the port number */
6325 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6326 if (offsets[i].type == ICE_UDP_OF) {
6327 struct ice_l4_hdr *hdr;
6330 offset = offsets[i].offset;
6331 hdr = (struct ice_l4_hdr *)&pkt[offset];
6332 hdr->dst_port = CPU_TO_BE16(open_port);
6342 * ice_find_adv_rule_entry - Search a rule entry
6343 * @hw: pointer to the hardware structure
6344 * @lkups: lookup elements or match criteria for the advanced recipe, one
6345 * structure per protocol header
6346 * @lkups_cnt: number of protocols
6347 * @recp_id: recipe ID for which we are finding the rule
6348 * @rinfo: other information regarding the rule e.g. priority and action info
6350 * Helper function to search for a given advance rule entry
6351 * Returns pointer to entry storing the rule if found
6353 static struct ice_adv_fltr_mgmt_list_entry *
6354 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6355 u16 lkups_cnt, u16 recp_id,
6356 struct ice_adv_rule_info *rinfo)
6358 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6359 struct ice_switch_info *sw = hw->switch_info;
6362 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6363 ice_adv_fltr_mgmt_list_entry, list_entry) {
6364 bool lkups_matched = true;
6366 if (lkups_cnt != list_itr->lkups_cnt)
6368 for (i = 0; i < list_itr->lkups_cnt; i++)
6369 if (memcmp(&list_itr->lkups[i], &lkups[i],
6371 lkups_matched = false;
6374 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6375 rinfo->tun_type == list_itr->rule_info.tun_type &&
6383 * ice_adv_add_update_vsi_list
6384 * @hw: pointer to the hardware structure
6385 * @m_entry: pointer to current adv filter management list entry
6386 * @cur_fltr: filter information from the book keeping entry
6387 * @new_fltr: filter information with the new VSI to be added
6389 * Call AQ command to add or update previously created VSI list with new VSI.
6391 * Helper function to do book keeping associated with adding filter information
6392 * The algorithm to do the booking keeping is described below :
6393 * When a VSI needs to subscribe to a given advanced filter
6394 * if only one VSI has been added till now
6395 * Allocate a new VSI list and add two VSIs
6396 * to this list using switch rule command
6397 * Update the previously created switch rule with the
6398 * newly created VSI list ID
6399 * if a VSI list was previously created
6400 * Add the new VSI to the previously created VSI list set
6401 * using the update switch rule command
6403 static enum ice_status
6404 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6405 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6406 struct ice_adv_rule_info *cur_fltr,
6407 struct ice_adv_rule_info *new_fltr)
6409 enum ice_status status;
6410 u16 vsi_list_id = 0;
6412 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6413 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6414 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6415 return ICE_ERR_NOT_IMPL;
6417 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6418 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6419 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6420 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6421 return ICE_ERR_NOT_IMPL;
6423 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6424 /* Only one entry existed in the mapping and it was not already
6425 * a part of a VSI list. So, create a VSI list with the old and
6428 struct ice_fltr_info tmp_fltr;
6429 u16 vsi_handle_arr[2];
6431 /* A rule already exists with the new VSI being added */
6432 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6433 new_fltr->sw_act.fwd_id.hw_vsi_id)
6434 return ICE_ERR_ALREADY_EXISTS;
6436 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6437 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6438 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6444 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6445 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6446 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6447 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6448 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6450 /* Update the previous switch rule of "forward to VSI" to
6453 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6457 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6458 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6459 m_entry->vsi_list_info =
6460 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6463 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6465 if (!m_entry->vsi_list_info)
6468 /* A rule already exists with the new VSI being added */
6469 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6472 /* Update the previously created VSI list set with
6473 * the new VSI ID passed in
6475 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6477 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6479 ice_aqc_opc_update_sw_rules,
6481 /* update VSI list mapping info with new VSI ID */
6483 ice_set_bit(vsi_handle,
6484 m_entry->vsi_list_info->vsi_map);
6487 m_entry->vsi_count++;
6492 * ice_add_adv_rule - helper function to create an advanced switch rule
6493 * @hw: pointer to the hardware structure
6494 * @lkups: information on the words that needs to be looked up. All words
6495 * together makes one recipe
6496 * @lkups_cnt: num of entries in the lkups array
6497 * @rinfo: other information related to the rule that needs to be programmed
6498 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6499 * ignored is case of error.
6501 * This function can program only 1 rule at a time. The lkups is used to
6502 * describe the all the words that forms the "lookup" portion of the recipe.
6503 * These words can span multiple protocols. Callers to this function need to
6504 * pass in a list of protocol headers with lookup information along and mask
6505 * that determines which words are valid from the given protocol header.
6506 * rinfo describes other information related to this rule such as forwarding
6507 * IDs, priority of this rule, etc.
6510 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6511 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6512 struct ice_rule_query_data *added_entry)
6514 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6515 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6516 const struct ice_dummy_pkt_offsets *pkt_offsets;
6517 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6518 struct LIST_HEAD_TYPE *rule_head;
6519 struct ice_switch_info *sw;
6520 enum ice_status status;
6521 const u8 *pkt = NULL;
6527 /* Initialize profile to result index bitmap */
6528 if (!hw->switch_info->prof_res_bm_init) {
6529 hw->switch_info->prof_res_bm_init = 1;
6530 ice_init_prof_result_bm(hw);
6533 prof_rule = ice_is_prof_rule(rinfo->tun_type);
6534 if (!prof_rule && !lkups_cnt)
6535 return ICE_ERR_PARAM;
6537 /* get # of words we need to match */
6539 for (i = 0; i < lkups_cnt; i++) {
6542 ptr = (u16 *)&lkups[i].m_u;
6543 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6549 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6550 return ICE_ERR_PARAM;
6552 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6553 return ICE_ERR_PARAM;
6556 /* make sure that we can locate a dummy packet */
6557 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6560 status = ICE_ERR_PARAM;
6561 goto err_ice_add_adv_rule;
6564 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6565 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6566 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6567 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6570 vsi_handle = rinfo->sw_act.vsi_handle;
6571 if (!ice_is_vsi_valid(hw, vsi_handle))
6572 return ICE_ERR_PARAM;
6574 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6575 rinfo->sw_act.fwd_id.hw_vsi_id =
6576 ice_get_hw_vsi_num(hw, vsi_handle);
6577 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6578 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6580 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6583 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6585 /* we have to add VSI to VSI_LIST and increment vsi_count.
6586 * Also Update VSI list so that we can change forwarding rule
6587 * if the rule already exists, we will check if it exists with
6588 * same vsi_id, if not then add it to the VSI list if it already
6589 * exists if not then create a VSI list and add the existing VSI
6590 * ID and the new VSI ID to the list
6591 * We will add that VSI to the list
6593 status = ice_adv_add_update_vsi_list(hw, m_entry,
6594 &m_entry->rule_info,
6597 added_entry->rid = rid;
6598 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6599 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6603 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6604 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6606 return ICE_ERR_NO_MEMORY;
6607 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6608 switch (rinfo->sw_act.fltr_act) {
6609 case ICE_FWD_TO_VSI:
6610 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6611 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6612 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6615 act |= ICE_SINGLE_ACT_TO_Q;
6616 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6617 ICE_SINGLE_ACT_Q_INDEX_M;
6619 case ICE_FWD_TO_QGRP:
6620 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6621 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6622 act |= ICE_SINGLE_ACT_TO_Q;
6623 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6624 ICE_SINGLE_ACT_Q_INDEX_M;
6625 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6626 ICE_SINGLE_ACT_Q_REGION_M;
6628 case ICE_DROP_PACKET:
6629 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6630 ICE_SINGLE_ACT_VALID_BIT;
6633 status = ICE_ERR_CFG;
6634 goto err_ice_add_adv_rule;
6637 /* set the rule LOOKUP type based on caller specified 'RX'
6638 * instead of hardcoding it to be either LOOKUP_TX/RX
6640 * for 'RX' set the source to be the port number
6641 * for 'TX' set the source to be the source HW VSI number (determined
6645 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6646 s_rule->pdata.lkup_tx_rx.src =
6647 CPU_TO_LE16(hw->port_info->lport);
6649 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6650 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6653 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6654 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6656 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6657 pkt_len, pkt_offsets);
6659 goto err_ice_add_adv_rule;
6661 if (rinfo->tun_type != ICE_NON_TUN &&
6662 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6663 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6664 s_rule->pdata.lkup_tx_rx.hdr,
6667 goto err_ice_add_adv_rule;
6670 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6671 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6674 goto err_ice_add_adv_rule;
6675 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6676 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6678 status = ICE_ERR_NO_MEMORY;
6679 goto err_ice_add_adv_rule;
6682 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6683 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6684 ICE_NONDMA_TO_NONDMA);
6685 if (!adv_fltr->lkups && !prof_rule) {
6686 status = ICE_ERR_NO_MEMORY;
6687 goto err_ice_add_adv_rule;
6690 adv_fltr->lkups_cnt = lkups_cnt;
6691 adv_fltr->rule_info = *rinfo;
6692 adv_fltr->rule_info.fltr_rule_id =
6693 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6694 sw = hw->switch_info;
6695 sw->recp_list[rid].adv_rule = true;
6696 rule_head = &sw->recp_list[rid].filt_rules;
6698 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6699 adv_fltr->vsi_count = 1;
6701 /* Add rule entry to book keeping list */
6702 LIST_ADD(&adv_fltr->list_entry, rule_head);
6704 added_entry->rid = rid;
6705 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6706 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6708 err_ice_add_adv_rule:
6709 if (status && adv_fltr) {
6710 ice_free(hw, adv_fltr->lkups);
6711 ice_free(hw, adv_fltr);
6714 ice_free(hw, s_rule);
6720 * ice_adv_rem_update_vsi_list
6721 * @hw: pointer to the hardware structure
6722 * @vsi_handle: VSI handle of the VSI to remove
6723 * @fm_list: filter management entry for which the VSI list management needs to
6726 static enum ice_status
6727 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6728 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6730 struct ice_vsi_list_map_info *vsi_list_info;
6731 enum ice_sw_lkup_type lkup_type;
6732 enum ice_status status;
6735 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6736 fm_list->vsi_count == 0)
6737 return ICE_ERR_PARAM;
6739 /* A rule with the VSI being removed does not exist */
6740 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6741 return ICE_ERR_DOES_NOT_EXIST;
6743 lkup_type = ICE_SW_LKUP_LAST;
6744 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6745 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6746 ice_aqc_opc_update_sw_rules,
6751 fm_list->vsi_count--;
6752 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6753 vsi_list_info = fm_list->vsi_list_info;
6754 if (fm_list->vsi_count == 1) {
6755 struct ice_fltr_info tmp_fltr;
6758 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6760 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6761 return ICE_ERR_OUT_OF_RANGE;
6763 /* Make sure VSI list is empty before removing it below */
6764 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6766 ice_aqc_opc_update_sw_rules,
6771 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6772 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6773 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6774 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6775 tmp_fltr.fwd_id.hw_vsi_id =
6776 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6777 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6778 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6780 /* Update the previous switch rule of "MAC forward to VSI" to
6781 * "MAC fwd to VSI list"
6783 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6785 ice_debug(hw, ICE_DBG_SW,
6786 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6787 tmp_fltr.fwd_id.hw_vsi_id, status);
6791 /* Remove the VSI list since it is no longer used */
6792 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6794 ice_debug(hw, ICE_DBG_SW,
6795 "Failed to remove VSI list %d, error %d\n",
6796 vsi_list_id, status);
6800 LIST_DEL(&vsi_list_info->list_entry);
6801 ice_free(hw, vsi_list_info);
6802 fm_list->vsi_list_info = NULL;
6809 * ice_rem_adv_rule - removes existing advanced switch rule
6810 * @hw: pointer to the hardware structure
6811 * @lkups: information on the words that needs to be looked up. All words
6812 * together makes one recipe
6813 * @lkups_cnt: num of entries in the lkups array
6814 * @rinfo: Its the pointer to the rule information for the rule
6816 * This function can be used to remove 1 rule at a time. The lkups is
6817 * used to describe all the words that forms the "lookup" portion of the
6818 * rule. These words can span multiple protocols. Callers to this function
6819 * need to pass in a list of protocol headers with lookup information along
6820 * and mask that determines which words are valid from the given protocol
6821 * header. rinfo describes other information related to this rule such as
6822 * forwarding IDs, priority of this rule, etc.
6825 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6826 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6828 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6829 struct ice_prot_lkup_ext lkup_exts;
6830 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6831 enum ice_status status = ICE_SUCCESS;
6832 bool remove_rule = false;
6833 u16 i, rid, vsi_handle;
6835 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6836 for (i = 0; i < lkups_cnt; i++) {
6839 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6842 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6847 /* Create any special protocol/offset pairs, such as looking at tunnel
6848 * bits by extracting metadata
6850 status = ice_add_special_words(rinfo, &lkup_exts);
6854 rid = ice_find_recp(hw, &lkup_exts);
6855 /* If did not find a recipe that match the existing criteria */
6856 if (rid == ICE_MAX_NUM_RECIPES)
6857 return ICE_ERR_PARAM;
6859 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6860 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6861 /* the rule is already removed */
6864 ice_acquire_lock(rule_lock);
6865 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6867 } else if (list_elem->vsi_count > 1) {
6868 list_elem->vsi_list_info->ref_cnt--;
6869 remove_rule = false;
6870 vsi_handle = rinfo->sw_act.vsi_handle;
6871 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6873 vsi_handle = rinfo->sw_act.vsi_handle;
6874 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6876 ice_release_lock(rule_lock);
6879 if (list_elem->vsi_count == 0)
6882 ice_release_lock(rule_lock);
6884 struct ice_aqc_sw_rules_elem *s_rule;
6887 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6889 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6892 return ICE_ERR_NO_MEMORY;
6893 s_rule->pdata.lkup_tx_rx.act = 0;
6894 s_rule->pdata.lkup_tx_rx.index =
6895 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6896 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6897 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6899 ice_aqc_opc_remove_sw_rules, NULL);
6900 if (status == ICE_SUCCESS) {
6901 ice_acquire_lock(rule_lock);
6902 LIST_DEL(&list_elem->list_entry);
6903 ice_free(hw, list_elem->lkups);
6904 ice_free(hw, list_elem);
6905 ice_release_lock(rule_lock);
6907 ice_free(hw, s_rule);
6913 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6914 * @hw: pointer to the hardware structure
6915 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6917 * This function is used to remove 1 rule at a time. The removal is based on
6918 * the remove_entry parameter. This function will remove rule for a given
6919 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6922 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6923 struct ice_rule_query_data *remove_entry)
6925 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6926 struct LIST_HEAD_TYPE *list_head;
6927 struct ice_adv_rule_info rinfo;
6928 struct ice_switch_info *sw;
6930 sw = hw->switch_info;
6931 if (!sw->recp_list[remove_entry->rid].recp_created)
6932 return ICE_ERR_PARAM;
6933 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6934 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6936 if (list_itr->rule_info.fltr_rule_id ==
6937 remove_entry->rule_id) {
6938 rinfo = list_itr->rule_info;
6939 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6940 return ice_rem_adv_rule(hw, list_itr->lkups,
6941 list_itr->lkups_cnt, &rinfo);
6944 return ICE_ERR_PARAM;
6948 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6950 * @hw: pointer to the hardware structure
6951 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6953 * This function is used to remove all the rules for a given VSI and as soon
6954 * as removing a rule fails, it will return immediately with the error code,
6955 * else it will return ICE_SUCCESS
6958 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6960 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6961 struct ice_vsi_list_map_info *map_info;
6962 struct LIST_HEAD_TYPE *list_head;
6963 struct ice_adv_rule_info rinfo;
6964 struct ice_switch_info *sw;
6965 enum ice_status status;
6966 u16 vsi_list_id = 0;
6969 sw = hw->switch_info;
6970 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6971 if (!sw->recp_list[rid].recp_created)
6973 if (!sw->recp_list[rid].adv_rule)
6975 list_head = &sw->recp_list[rid].filt_rules;
6977 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6978 ice_adv_fltr_mgmt_list_entry, list_entry) {
6979 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
6984 rinfo = list_itr->rule_info;
6985 rinfo.sw_act.vsi_handle = vsi_handle;
6986 status = ice_rem_adv_rule(hw, list_itr->lkups,
6987 list_itr->lkups_cnt, &rinfo);
6997 * ice_replay_fltr - Replay all the filters stored by a specific list head
6998 * @hw: pointer to the hardware structure
6999 * @list_head: list for which filters needs to be replayed
7000 * @recp_id: Recipe ID for which rules need to be replayed
7002 static enum ice_status
7003 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7005 struct ice_fltr_mgmt_list_entry *itr;
7006 enum ice_status status = ICE_SUCCESS;
7007 struct ice_sw_recipe *recp_list;
7008 u8 lport = hw->port_info->lport;
7009 struct LIST_HEAD_TYPE l_head;
7011 if (LIST_EMPTY(list_head))
7014 recp_list = &hw->switch_info->recp_list[recp_id];
7015 /* Move entries from the given list_head to a temporary l_head so that
7016 * they can be replayed. Otherwise when trying to re-add the same
7017 * filter, the function will return already exists
7019 LIST_REPLACE_INIT(list_head, &l_head);
7021 /* Mark the given list_head empty by reinitializing it so filters
7022 * could be added again by *handler
7024 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7026 struct ice_fltr_list_entry f_entry;
7028 f_entry.fltr_info = itr->fltr_info;
7029 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7030 status = ice_add_rule_internal(hw, recp_list, lport,
7032 if (status != ICE_SUCCESS)
7037 /* Add a filter per VSI separately */
7042 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7044 if (!ice_is_vsi_valid(hw, vsi_handle))
7047 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7048 f_entry.fltr_info.vsi_handle = vsi_handle;
7049 f_entry.fltr_info.fwd_id.hw_vsi_id =
7050 ice_get_hw_vsi_num(hw, vsi_handle);
7051 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7052 if (recp_id == ICE_SW_LKUP_VLAN)
7053 status = ice_add_vlan_internal(hw, recp_list,
7056 status = ice_add_rule_internal(hw, recp_list,
7059 if (status != ICE_SUCCESS)
7064 /* Clear the filter management list */
7065 ice_rem_sw_rule_info(hw, &l_head);
7070 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7071 * @hw: pointer to the hardware structure
7073 * NOTE: This function does not clean up partially added filters on error.
7074 * It is up to caller of the function to issue a reset or fail early.
7076 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7078 struct ice_switch_info *sw = hw->switch_info;
7079 enum ice_status status = ICE_SUCCESS;
7082 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7083 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7085 status = ice_replay_fltr(hw, i, head);
7086 if (status != ICE_SUCCESS)
7093 * ice_replay_vsi_fltr - Replay filters for requested VSI
7094 * @hw: pointer to the hardware structure
7095 * @vsi_handle: driver VSI handle
7096 * @recp_id: Recipe ID for which rules need to be replayed
7097 * @list_head: list for which filters need to be replayed
7099 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7100 * It is required to pass valid VSI handle.
7102 static enum ice_status
7103 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
7104 struct LIST_HEAD_TYPE *list_head)
7106 struct ice_fltr_mgmt_list_entry *itr;
7107 enum ice_status status = ICE_SUCCESS;
7108 struct ice_sw_recipe *recp_list;
7111 if (LIST_EMPTY(list_head))
7113 recp_list = &hw->switch_info->recp_list[recp_id];
7114 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7116 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7118 struct ice_fltr_list_entry f_entry;
7120 f_entry.fltr_info = itr->fltr_info;
7121 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7122 itr->fltr_info.vsi_handle == vsi_handle) {
7123 /* update the src in case it is VSI num */
7124 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7125 f_entry.fltr_info.src = hw_vsi_id;
7126 status = ice_add_rule_internal(hw, recp_list,
7127 hw->port_info->lport,
7129 if (status != ICE_SUCCESS)
7133 if (!itr->vsi_list_info ||
7134 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7136 /* Clearing it so that the logic can add it back */
7137 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7138 f_entry.fltr_info.vsi_handle = vsi_handle;
7139 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7140 /* update the src in case it is VSI num */
7141 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7142 f_entry.fltr_info.src = hw_vsi_id;
7143 if (recp_id == ICE_SW_LKUP_VLAN)
7144 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7146 status = ice_add_rule_internal(hw, recp_list,
7147 hw->port_info->lport,
7149 if (status != ICE_SUCCESS)
7157 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7158 * @hw: pointer to the hardware structure
7159 * @vsi_handle: driver VSI handle
7160 * @list_head: list for which filters need to be replayed
7162 * Replay the advanced rule for the given VSI.
7164 static enum ice_status
7165 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7166 struct LIST_HEAD_TYPE *list_head)
7168 struct ice_rule_query_data added_entry = { 0 };
7169 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7170 enum ice_status status = ICE_SUCCESS;
7172 if (LIST_EMPTY(list_head))
7174 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7176 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7177 u16 lk_cnt = adv_fltr->lkups_cnt;
7179 if (vsi_handle != rinfo->sw_act.vsi_handle)
7181 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7190 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7191 * @hw: pointer to the hardware structure
7192 * @vsi_handle: driver VSI handle
7194 * Replays filters for requested VSI via vsi_handle.
7196 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7198 struct ice_switch_info *sw = hw->switch_info;
7199 enum ice_status status;
7202 /* Update the recipes that were created */
7203 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7204 struct LIST_HEAD_TYPE *head;
7206 head = &sw->recp_list[i].filt_replay_rules;
7207 if (!sw->recp_list[i].adv_rule)
7208 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7210 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7211 if (status != ICE_SUCCESS)
7219 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7220 * @hw: pointer to the HW struct
7222 * Deletes the filter replay rules.
7224 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7226 struct ice_switch_info *sw = hw->switch_info;
7232 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7233 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7234 struct LIST_HEAD_TYPE *l_head;
7236 l_head = &sw->recp_list[i].filt_replay_rules;
7237 if (!sw->recp_list[i].adv_rule)
7238 ice_rem_sw_rule_info(hw, l_head);
7240 ice_rem_adv_rule_info(hw, l_head);