1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x08, 0x00, 0x00,
462 0x00, 0x00, /* 2 bytes for 4 byte alignment */
465 /* C-tag (802.1Q): IPv6 + UDP */
466 static const struct ice_dummy_pkt_offsets
467 dummy_vlan_udp_ipv6_packet_offsets[] = {
469 { ICE_ETYPE_OL, 12 },
470 { ICE_VLAN_OFOS, 14 },
471 { ICE_IPV6_OFOS, 18 },
472 { ICE_UDP_ILOS, 58 },
473 { ICE_PROTOCOL_LAST, 0 },
476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
477 static const u8 dummy_vlan_udp_ipv6_packet[] = {
478 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
482 0x81, 0x00, /* ICE_ETYPE_OL 12 */
484 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
487 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
498 0x00, 0x08, 0x00, 0x00,
500 0x00, 0x00, /* 2 bytes for 4 byte alignment */
503 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
505 { ICE_IPV4_OFOS, 14 },
508 { ICE_PROTOCOL_LAST, 0 },
511 static const u8 dummy_udp_gtp_packet[] = {
512 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
517 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
518 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x11, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
524 0x00, 0x1c, 0x00, 0x00,
526 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
527 0x00, 0x00, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x85,
530 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
531 0x00, 0x00, 0x00, 0x00,
534 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
536 { ICE_ETYPE_OL, 12 },
537 { ICE_VLAN_OFOS, 14},
539 { ICE_PROTOCOL_LAST, 0 },
542 static const u8 dummy_pppoe_ipv4_packet[] = {
543 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x81, 0x00, /* ICE_ETYPE_OL 12 */
549 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
551 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
554 0x00, 0x21, /* PPP Link Layer 24 */
556 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
559 0x00, 0x00, 0x00, 0x00,
560 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
565 static const u8 dummy_pppoe_ipv6_packet[] = {
566 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
567 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00,
570 0x81, 0x00, /* ICE_ETYPE_OL 12 */
572 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
574 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
577 0x00, 0x57, /* PPP Link Layer 24 */
579 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
593 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
595 { ICE_IPV4_OFOS, 14 },
597 { ICE_PROTOCOL_LAST, 0 },
600 static const u8 dummy_ipv4_esp_pkt[] = {
601 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
602 0x00, 0x00, 0x00, 0x00,
603 0x00, 0x00, 0x00, 0x00,
606 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
607 0x00, 0x00, 0x40, 0x00,
608 0x40, 0x32, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
617 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
619 { ICE_IPV6_OFOS, 14 },
621 { ICE_PROTOCOL_LAST, 0 },
624 static const u8 dummy_ipv6_esp_pkt[] = {
625 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
626 0x00, 0x00, 0x00, 0x00,
627 0x00, 0x00, 0x00, 0x00,
630 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
631 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
632 0x00, 0x00, 0x00, 0x00,
633 0x00, 0x00, 0x00, 0x00,
634 0x00, 0x00, 0x00, 0x00,
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
646 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
648 { ICE_IPV4_OFOS, 14 },
650 { ICE_PROTOCOL_LAST, 0 },
653 static const u8 dummy_ipv4_ah_pkt[] = {
654 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x00,
659 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
660 0x00, 0x00, 0x40, 0x00,
661 0x40, 0x33, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
671 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
673 { ICE_IPV6_OFOS, 14 },
675 { ICE_PROTOCOL_LAST, 0 },
678 static const u8 dummy_ipv6_ah_pkt[] = {
679 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
684 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
685 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
686 0x00, 0x00, 0x00, 0x00,
687 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
696 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
701 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
703 { ICE_IPV4_OFOS, 14 },
704 { ICE_UDP_ILOS, 34 },
706 { ICE_PROTOCOL_LAST, 0 },
709 static const u8 dummy_ipv4_nat_pkt[] = {
710 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
715 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
716 0x00, 0x00, 0x40, 0x00,
717 0x40, 0x11, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
722 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
729 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
731 { ICE_IPV6_OFOS, 14 },
732 { ICE_UDP_ILOS, 54 },
734 { ICE_PROTOCOL_LAST, 0 },
737 static const u8 dummy_ipv6_nat_pkt[] = {
738 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
739 0x00, 0x00, 0x00, 0x00,
740 0x00, 0x00, 0x00, 0x00,
743 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
744 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00,
754 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
755 0x00, 0x00, 0x00, 0x00,
757 0x00, 0x00, 0x00, 0x00,
758 0x00, 0x00, 0x00, 0x00,
759 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
763 /* this is a recipe to profile association bitmap */
764 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
765 ICE_MAX_NUM_PROFILES);
767 /* this is a profile to recipe association bitmap */
768 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
769 ICE_MAX_NUM_RECIPES);
771 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
774 * ice_collect_result_idx - copy result index values
775 * @buf: buffer that contains the result index
776 * @recp: the recipe struct to copy data into
778 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
779 struct ice_sw_recipe *recp)
781 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
782 ice_set_bit(buf->content.result_indx &
783 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
787 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
788 * @hw: pointer to hardware structure
789 * @recps: struct that we need to populate
790 * @rid: recipe ID that we are populating
791 * @refresh_required: true if we should get recipe to profile mapping from FW
793 * This function is used to populate all the necessary entries into our
794 * bookkeeping so that we have a current list of all the recipes that are
795 * programmed in the firmware.
797 static enum ice_status
798 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
799 bool *refresh_required)
801 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
802 struct ice_aqc_recipe_data_elem *tmp;
803 u16 num_recps = ICE_MAX_NUM_RECIPES;
804 struct ice_prot_lkup_ext *lkup_exts;
805 enum ice_status status;
809 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
811 /* we need a buffer big enough to accommodate all the recipes */
812 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
813 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
815 return ICE_ERR_NO_MEMORY;
817 tmp[0].recipe_indx = rid;
818 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
819 /* non-zero status meaning recipe doesn't exist */
823 /* Get recipe to profile map so that we can get the fv from lkups that
824 * we read for a recipe from FW. Since we want to minimize the number of
825 * times we make this FW call, just make one call and cache the copy
826 * until a new recipe is added. This operation is only required the
827 * first time to get the changes from FW. Then to search existing
828 * entries we don't need to update the cache again until another recipe
831 if (*refresh_required) {
832 ice_get_recp_to_prof_map(hw);
833 *refresh_required = false;
836 /* Start populating all the entries for recps[rid] based on lkups from
837 * firmware. Note that we are only creating the root recipe in our
840 lkup_exts = &recps[rid].lkup_exts;
842 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
843 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
844 struct ice_recp_grp_entry *rg_entry;
845 u8 i, prof, idx, prot = 0;
849 rg_entry = (struct ice_recp_grp_entry *)
850 ice_malloc(hw, sizeof(*rg_entry));
852 status = ICE_ERR_NO_MEMORY;
856 idx = root_bufs.recipe_indx;
857 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
859 /* Mark all result indices in this chain */
860 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
861 ice_set_bit(root_bufs.content.result_indx &
862 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
864 /* get the first profile that is associated with rid */
865 prof = ice_find_first_bit(recipe_to_profile[idx],
866 ICE_MAX_NUM_PROFILES);
867 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
868 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
870 rg_entry->fv_idx[i] = lkup_indx;
871 rg_entry->fv_mask[i] =
872 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
874 /* If the recipe is a chained recipe then all its
875 * child recipe's result will have a result index.
876 * To fill fv_words we should not use those result
877 * index, we only need the protocol ids and offsets.
878 * We will skip all the fv_idx which stores result
879 * index in them. We also need to skip any fv_idx which
880 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
881 * valid offset value.
883 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
884 rg_entry->fv_idx[i]) ||
885 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
886 rg_entry->fv_idx[i] == 0)
889 ice_find_prot_off(hw, ICE_BLK_SW, prof,
890 rg_entry->fv_idx[i], &prot, &off);
891 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
892 lkup_exts->fv_words[fv_word_idx].off = off;
895 /* populate rg_list with the data from the child entry of this
898 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
900 /* Propagate some data to the recipe database */
901 recps[idx].is_root = !!is_root;
902 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
903 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
904 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
905 recps[idx].chain_idx = root_bufs.content.result_indx &
906 ~ICE_AQ_RECIPE_RESULT_EN;
907 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
909 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
915 /* Only do the following for root recipes entries */
916 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
917 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
918 recps[idx].root_rid = root_bufs.content.rid &
919 ~ICE_AQ_RECIPE_ID_IS_ROOT;
920 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
923 /* Complete initialization of the root recipe entry */
924 lkup_exts->n_val_words = fv_word_idx;
925 recps[rid].big_recp = (num_recps > 1);
926 recps[rid].n_grp_count = (u8)num_recps;
927 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
928 ice_memdup(hw, tmp, recps[rid].n_grp_count *
929 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
930 if (!recps[rid].root_buf)
933 /* Copy result indexes */
934 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
935 recps[rid].recp_created = true;
943 * ice_get_recp_to_prof_map - updates recipe to profile mapping
944 * @hw: pointer to hardware structure
946 * This function is used to populate recipe_to_profile matrix where index to
947 * this array is the recipe ID and the element is the mapping of which profiles
948 * is this recipe mapped to.
951 ice_get_recp_to_prof_map(struct ice_hw *hw)
953 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
956 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
959 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
960 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
961 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
963 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
964 ICE_MAX_NUM_RECIPES);
965 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
966 if (ice_is_bit_set(r_bitmap, j))
967 ice_set_bit(i, recipe_to_profile[j]);
972 * ice_init_def_sw_recp - initialize the recipe book keeping tables
973 * @hw: pointer to the HW struct
974 * @recp_list: pointer to sw recipe list
976 * Allocate memory for the entire recipe table and initialize the structures/
977 * entries corresponding to basic recipes.
980 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
982 struct ice_sw_recipe *recps;
985 recps = (struct ice_sw_recipe *)
986 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
988 return ICE_ERR_NO_MEMORY;
990 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
991 recps[i].root_rid = i;
992 INIT_LIST_HEAD(&recps[i].filt_rules);
993 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
994 INIT_LIST_HEAD(&recps[i].rg_list);
995 ice_init_lock(&recps[i].filt_rule_lock);
1004 * ice_aq_get_sw_cfg - get switch configuration
1005 * @hw: pointer to the hardware structure
1006 * @buf: pointer to the result buffer
1007 * @buf_size: length of the buffer available for response
1008 * @req_desc: pointer to requested descriptor
1009 * @num_elems: pointer to number of elements
1010 * @cd: pointer to command details structure or NULL
1012 * Get switch configuration (0x0200) to be placed in 'buff'.
1013 * This admin command returns information such as initial VSI/port number
1014 * and switch ID it belongs to.
1016 * NOTE: *req_desc is both an input/output parameter.
1017 * The caller of this function first calls this function with *request_desc set
1018 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1019 * configuration information has been returned; if non-zero (meaning not all
1020 * the information was returned), the caller should call this function again
1021 * with *req_desc set to the previous value returned by f/w to get the
1022 * next block of switch configuration information.
1024 * *num_elems is output only parameter. This reflects the number of elements
1025 * in response buffer. The caller of this function to use *num_elems while
1026 * parsing the response buffer.
1028 static enum ice_status
1029 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1030 u16 buf_size, u16 *req_desc, u16 *num_elems,
1031 struct ice_sq_cd *cd)
1033 struct ice_aqc_get_sw_cfg *cmd;
1034 enum ice_status status;
1035 struct ice_aq_desc desc;
1037 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1038 cmd = &desc.params.get_sw_conf;
1039 cmd->element = CPU_TO_LE16(*req_desc);
1041 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1043 *req_desc = LE16_TO_CPU(cmd->element);
1044 *num_elems = LE16_TO_CPU(cmd->num_elems);
1051 * ice_alloc_sw - allocate resources specific to switch
1052 * @hw: pointer to the HW struct
1053 * @ena_stats: true to turn on VEB stats
1054 * @shared_res: true for shared resource, false for dedicated resource
1055 * @sw_id: switch ID returned
1056 * @counter_id: VEB counter ID returned
1058 * allocates switch resources (SWID and VEB counter) (0x0208)
1061 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1064 struct ice_aqc_alloc_free_res_elem *sw_buf;
1065 struct ice_aqc_res_elem *sw_ele;
1066 enum ice_status status;
1069 buf_len = sizeof(*sw_buf);
1070 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1071 ice_malloc(hw, buf_len);
1073 return ICE_ERR_NO_MEMORY;
1075 /* Prepare buffer for switch ID.
1076 * The number of resource entries in buffer is passed as 1 since only a
1077 * single switch/VEB instance is allocated, and hence a single sw_id
1080 sw_buf->num_elems = CPU_TO_LE16(1);
1082 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1083 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1084 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1086 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1087 ice_aqc_opc_alloc_res, NULL);
1090 goto ice_alloc_sw_exit;
1092 sw_ele = &sw_buf->elem[0];
1093 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1096 /* Prepare buffer for VEB Counter */
1097 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1098 struct ice_aqc_alloc_free_res_elem *counter_buf;
1099 struct ice_aqc_res_elem *counter_ele;
1101 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1102 ice_malloc(hw, buf_len);
1104 status = ICE_ERR_NO_MEMORY;
1105 goto ice_alloc_sw_exit;
1108 /* The number of resource entries in buffer is passed as 1 since
1109 * only a single switch/VEB instance is allocated, and hence a
1110 * single VEB counter is requested.
1112 counter_buf->num_elems = CPU_TO_LE16(1);
1113 counter_buf->res_type =
1114 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1115 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1116 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1120 ice_free(hw, counter_buf);
1121 goto ice_alloc_sw_exit;
1123 counter_ele = &counter_buf->elem[0];
1124 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1125 ice_free(hw, counter_buf);
1129 ice_free(hw, sw_buf);
1134 * ice_free_sw - free resources specific to switch
1135 * @hw: pointer to the HW struct
1136 * @sw_id: switch ID returned
1137 * @counter_id: VEB counter ID returned
1139 * free switch resources (SWID and VEB counter) (0x0209)
1141 * NOTE: This function frees multiple resources. It continues
1142 * releasing other resources even after it encounters error.
1143 * The error code returned is the last error it encountered.
1145 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1147 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1148 enum ice_status status, ret_status;
1151 buf_len = sizeof(*sw_buf);
1152 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1153 ice_malloc(hw, buf_len);
1155 return ICE_ERR_NO_MEMORY;
1157 /* Prepare buffer to free for switch ID res.
1158 * The number of resource entries in buffer is passed as 1 since only a
1159 * single switch/VEB instance is freed, and hence a single sw_id
1162 sw_buf->num_elems = CPU_TO_LE16(1);
1163 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1164 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1166 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1167 ice_aqc_opc_free_res, NULL);
1170 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1172 /* Prepare buffer to free for VEB Counter resource */
1173 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1174 ice_malloc(hw, buf_len);
1176 ice_free(hw, sw_buf);
1177 return ICE_ERR_NO_MEMORY;
1180 /* The number of resource entries in buffer is passed as 1 since only a
1181 * single switch/VEB instance is freed, and hence a single VEB counter
1184 counter_buf->num_elems = CPU_TO_LE16(1);
1185 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1186 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1188 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1189 ice_aqc_opc_free_res, NULL);
1191 ice_debug(hw, ICE_DBG_SW,
1192 "VEB counter resource could not be freed\n");
1193 ret_status = status;
1196 ice_free(hw, counter_buf);
1197 ice_free(hw, sw_buf);
1203 * @hw: pointer to the HW struct
1204 * @vsi_ctx: pointer to a VSI context struct
1205 * @cd: pointer to command details structure or NULL
1207 * Add a VSI context to the hardware (0x0210)
1210 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1211 struct ice_sq_cd *cd)
1213 struct ice_aqc_add_update_free_vsi_resp *res;
1214 struct ice_aqc_add_get_update_free_vsi *cmd;
1215 struct ice_aq_desc desc;
1216 enum ice_status status;
1218 cmd = &desc.params.vsi_cmd;
1219 res = &desc.params.add_update_free_vsi_res;
1221 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1223 if (!vsi_ctx->alloc_from_pool)
1224 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1225 ICE_AQ_VSI_IS_VALID);
1227 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1229 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1231 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1232 sizeof(vsi_ctx->info), cd);
1235 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1236 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1237 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1245 * @hw: pointer to the HW struct
1246 * @vsi_ctx: pointer to a VSI context struct
1247 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1248 * @cd: pointer to command details structure or NULL
1250 * Free VSI context info from hardware (0x0213)
1253 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1254 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1256 struct ice_aqc_add_update_free_vsi_resp *resp;
1257 struct ice_aqc_add_get_update_free_vsi *cmd;
1258 struct ice_aq_desc desc;
1259 enum ice_status status;
1261 cmd = &desc.params.vsi_cmd;
1262 resp = &desc.params.add_update_free_vsi_res;
1264 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1266 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1268 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1270 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1272 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1273 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1281 * @hw: pointer to the HW struct
1282 * @vsi_ctx: pointer to a VSI context struct
1283 * @cd: pointer to command details structure or NULL
1285 * Update VSI context in the hardware (0x0211)
1288 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1289 struct ice_sq_cd *cd)
1291 struct ice_aqc_add_update_free_vsi_resp *resp;
1292 struct ice_aqc_add_get_update_free_vsi *cmd;
1293 struct ice_aq_desc desc;
1294 enum ice_status status;
1296 cmd = &desc.params.vsi_cmd;
1297 resp = &desc.params.add_update_free_vsi_res;
1299 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1301 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1303 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1305 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1306 sizeof(vsi_ctx->info), cd);
1309 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1310 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1317 * ice_is_vsi_valid - check whether the VSI is valid or not
1318 * @hw: pointer to the HW struct
1319 * @vsi_handle: VSI handle
1321 * check whether the VSI is valid or not
1323 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1325 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1329 * ice_get_hw_vsi_num - return the HW VSI number
1330 * @hw: pointer to the HW struct
1331 * @vsi_handle: VSI handle
1333 * return the HW VSI number
1334 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1336 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1338 return hw->vsi_ctx[vsi_handle]->vsi_num;
1342 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1343 * @hw: pointer to the HW struct
1344 * @vsi_handle: VSI handle
1346 * return the VSI context entry for a given VSI handle
1348 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1350 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1354 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1355 * @hw: pointer to the HW struct
1356 * @vsi_handle: VSI handle
1357 * @vsi: VSI context pointer
1359 * save the VSI context entry for a given VSI handle
1362 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1364 hw->vsi_ctx[vsi_handle] = vsi;
1368 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1369 * @hw: pointer to the HW struct
1370 * @vsi_handle: VSI handle
1372 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1374 struct ice_vsi_ctx *vsi;
1377 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1380 ice_for_each_traffic_class(i) {
1381 if (vsi->lan_q_ctx[i]) {
1382 ice_free(hw, vsi->lan_q_ctx[i]);
1383 vsi->lan_q_ctx[i] = NULL;
1389 * ice_clear_vsi_ctx - clear the VSI context entry
1390 * @hw: pointer to the HW struct
1391 * @vsi_handle: VSI handle
1393 * clear the VSI context entry
1395 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1397 struct ice_vsi_ctx *vsi;
1399 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1401 ice_clear_vsi_q_ctx(hw, vsi_handle);
1403 hw->vsi_ctx[vsi_handle] = NULL;
1408 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1409 * @hw: pointer to the HW struct
1411 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1415 for (i = 0; i < ICE_MAX_VSI; i++)
1416 ice_clear_vsi_ctx(hw, i);
1420 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1421 * @hw: pointer to the HW struct
1422 * @vsi_handle: unique VSI handle provided by drivers
1423 * @vsi_ctx: pointer to a VSI context struct
1424 * @cd: pointer to command details structure or NULL
1426 * Add a VSI context to the hardware also add it into the VSI handle list.
1427 * If this function gets called after reset for existing VSIs then update
1428 * with the new HW VSI number in the corresponding VSI handle list entry.
1431 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1432 struct ice_sq_cd *cd)
1434 struct ice_vsi_ctx *tmp_vsi_ctx;
1435 enum ice_status status;
1437 if (vsi_handle >= ICE_MAX_VSI)
1438 return ICE_ERR_PARAM;
1439 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1442 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1444 /* Create a new VSI context */
1445 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1446 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1448 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1449 return ICE_ERR_NO_MEMORY;
1451 *tmp_vsi_ctx = *vsi_ctx;
1453 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1455 /* update with new HW VSI num */
1456 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1463 * ice_free_vsi- free VSI context from hardware and VSI handle list
1464 * @hw: pointer to the HW struct
1465 * @vsi_handle: unique VSI handle
1466 * @vsi_ctx: pointer to a VSI context struct
1467 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1468 * @cd: pointer to command details structure or NULL
1470 * Free VSI context info from hardware as well as from VSI handle list
1473 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1474 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1476 enum ice_status status;
1478 if (!ice_is_vsi_valid(hw, vsi_handle))
1479 return ICE_ERR_PARAM;
1480 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1481 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1483 ice_clear_vsi_ctx(hw, vsi_handle);
1489 * @hw: pointer to the HW struct
1490 * @vsi_handle: unique VSI handle
1491 * @vsi_ctx: pointer to a VSI context struct
1492 * @cd: pointer to command details structure or NULL
1494 * Update VSI context in the hardware
1497 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1498 struct ice_sq_cd *cd)
1500 if (!ice_is_vsi_valid(hw, vsi_handle))
1501 return ICE_ERR_PARAM;
1502 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1503 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1507 * ice_aq_get_vsi_params
1508 * @hw: pointer to the HW struct
1509 * @vsi_ctx: pointer to a VSI context struct
1510 * @cd: pointer to command details structure or NULL
1512 * Get VSI context info from hardware (0x0212)
1515 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1516 struct ice_sq_cd *cd)
1518 struct ice_aqc_add_get_update_free_vsi *cmd;
1519 struct ice_aqc_get_vsi_resp *resp;
1520 struct ice_aq_desc desc;
1521 enum ice_status status;
1523 cmd = &desc.params.vsi_cmd;
1524 resp = &desc.params.get_vsi_resp;
1526 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1528 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1530 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1531 sizeof(vsi_ctx->info), cd);
1533 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1535 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1536 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1543 * ice_aq_add_update_mir_rule - add/update a mirror rule
1544 * @hw: pointer to the HW struct
1545 * @rule_type: Rule Type
1546 * @dest_vsi: VSI number to which packets will be mirrored
1547 * @count: length of the list
1548 * @mr_buf: buffer for list of mirrored VSI numbers
1549 * @cd: pointer to command details structure or NULL
1552 * Add/Update Mirror Rule (0x260).
1555 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1556 u16 count, struct ice_mir_rule_buf *mr_buf,
1557 struct ice_sq_cd *cd, u16 *rule_id)
1559 struct ice_aqc_add_update_mir_rule *cmd;
1560 struct ice_aq_desc desc;
1561 enum ice_status status;
1562 __le16 *mr_list = NULL;
1565 switch (rule_type) {
1566 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1567 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1568 /* Make sure count and mr_buf are set for these rule_types */
1569 if (!(count && mr_buf))
1570 return ICE_ERR_PARAM;
1572 buf_size = count * sizeof(__le16);
1573 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1575 return ICE_ERR_NO_MEMORY;
1577 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1578 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1579 /* Make sure count and mr_buf are not set for these
1582 if (count || mr_buf)
1583 return ICE_ERR_PARAM;
1586 ice_debug(hw, ICE_DBG_SW,
1587 "Error due to unsupported rule_type %u\n", rule_type);
1588 return ICE_ERR_OUT_OF_RANGE;
1591 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1593 /* Pre-process 'mr_buf' items for add/update of virtual port
1594 * ingress/egress mirroring (but not physical port ingress/egress
1600 for (i = 0; i < count; i++) {
1603 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1605 /* Validate specified VSI number, make sure it is less
1606 * than ICE_MAX_VSI, if not return with error.
1608 if (id >= ICE_MAX_VSI) {
1609 ice_debug(hw, ICE_DBG_SW,
1610 "Error VSI index (%u) out-of-range\n",
1612 ice_free(hw, mr_list);
1613 return ICE_ERR_OUT_OF_RANGE;
1616 /* add VSI to mirror rule */
1619 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1620 else /* remove VSI from mirror rule */
1621 mr_list[i] = CPU_TO_LE16(id);
1625 cmd = &desc.params.add_update_rule;
1626 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1627 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1628 ICE_AQC_RULE_ID_VALID_M);
1629 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1630 cmd->num_entries = CPU_TO_LE16(count);
1631 cmd->dest = CPU_TO_LE16(dest_vsi);
1633 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1635 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1637 ice_free(hw, mr_list);
1643 * ice_aq_delete_mir_rule - delete a mirror rule
1644 * @hw: pointer to the HW struct
1645 * @rule_id: Mirror rule ID (to be deleted)
1646 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1647 * otherwise it is returned to the shared pool
1648 * @cd: pointer to command details structure or NULL
1650 * Delete Mirror Rule (0x261).
1653 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1654 struct ice_sq_cd *cd)
1656 struct ice_aqc_delete_mir_rule *cmd;
1657 struct ice_aq_desc desc;
1659 /* rule_id should be in the range 0...63 */
1660 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1661 return ICE_ERR_OUT_OF_RANGE;
1663 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1665 cmd = &desc.params.del_rule;
1666 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1667 cmd->rule_id = CPU_TO_LE16(rule_id);
1670 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1672 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1676 * ice_aq_alloc_free_vsi_list
1677 * @hw: pointer to the HW struct
1678 * @vsi_list_id: VSI list ID returned or used for lookup
1679 * @lkup_type: switch rule filter lookup type
1680 * @opc: switch rules population command type - pass in the command opcode
1682 * allocates or free a VSI list resource
1684 static enum ice_status
1685 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1686 enum ice_sw_lkup_type lkup_type,
1687 enum ice_adminq_opc opc)
1689 struct ice_aqc_alloc_free_res_elem *sw_buf;
1690 struct ice_aqc_res_elem *vsi_ele;
1691 enum ice_status status;
1694 buf_len = sizeof(*sw_buf);
1695 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1696 ice_malloc(hw, buf_len);
1698 return ICE_ERR_NO_MEMORY;
1699 sw_buf->num_elems = CPU_TO_LE16(1);
1701 if (lkup_type == ICE_SW_LKUP_MAC ||
1702 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1703 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1704 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1705 lkup_type == ICE_SW_LKUP_PROMISC ||
1706 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1707 lkup_type == ICE_SW_LKUP_LAST) {
1708 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1709 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1711 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1713 status = ICE_ERR_PARAM;
1714 goto ice_aq_alloc_free_vsi_list_exit;
1717 if (opc == ice_aqc_opc_free_res)
1718 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1720 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1722 goto ice_aq_alloc_free_vsi_list_exit;
1724 if (opc == ice_aqc_opc_alloc_res) {
1725 vsi_ele = &sw_buf->elem[0];
1726 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1729 ice_aq_alloc_free_vsi_list_exit:
1730 ice_free(hw, sw_buf);
1735 * ice_aq_set_storm_ctrl - Sets storm control configuration
1736 * @hw: pointer to the HW struct
1737 * @bcast_thresh: represents the upper threshold for broadcast storm control
1738 * @mcast_thresh: represents the upper threshold for multicast storm control
1739 * @ctl_bitmask: storm control control knobs
1741 * Sets the storm control configuration (0x0280)
1744 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1747 struct ice_aqc_storm_cfg *cmd;
1748 struct ice_aq_desc desc;
1750 cmd = &desc.params.storm_conf;
1752 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1754 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1755 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1756 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1758 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1762 * ice_aq_get_storm_ctrl - gets storm control configuration
1763 * @hw: pointer to the HW struct
1764 * @bcast_thresh: represents the upper threshold for broadcast storm control
1765 * @mcast_thresh: represents the upper threshold for multicast storm control
1766 * @ctl_bitmask: storm control control knobs
1768 * Gets the storm control configuration (0x0281)
1771 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1774 enum ice_status status;
1775 struct ice_aq_desc desc;
1777 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1779 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1781 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1784 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1787 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1790 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1797 * ice_aq_sw_rules - add/update/remove switch rules
1798 * @hw: pointer to the HW struct
1799 * @rule_list: pointer to switch rule population list
1800 * @rule_list_sz: total size of the rule list in bytes
1801 * @num_rules: number of switch rules in the rule_list
1802 * @opc: switch rules population command type - pass in the command opcode
1803 * @cd: pointer to command details structure or NULL
1805 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1807 static enum ice_status
1808 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1809 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1811 struct ice_aq_desc desc;
1813 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1815 if (opc != ice_aqc_opc_add_sw_rules &&
1816 opc != ice_aqc_opc_update_sw_rules &&
1817 opc != ice_aqc_opc_remove_sw_rules)
1818 return ICE_ERR_PARAM;
1820 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1822 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1823 desc.params.sw_rules.num_rules_fltr_entry_index =
1824 CPU_TO_LE16(num_rules);
1825 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1829 * ice_aq_add_recipe - add switch recipe
1830 * @hw: pointer to the HW struct
1831 * @s_recipe_list: pointer to switch rule population list
1832 * @num_recipes: number of switch recipes in the list
1833 * @cd: pointer to command details structure or NULL
1838 ice_aq_add_recipe(struct ice_hw *hw,
1839 struct ice_aqc_recipe_data_elem *s_recipe_list,
1840 u16 num_recipes, struct ice_sq_cd *cd)
1842 struct ice_aqc_add_get_recipe *cmd;
1843 struct ice_aq_desc desc;
1846 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1847 cmd = &desc.params.add_get_recipe;
1848 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1850 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1851 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1853 buf_size = num_recipes * sizeof(*s_recipe_list);
1855 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1859 * ice_aq_get_recipe - get switch recipe
1860 * @hw: pointer to the HW struct
1861 * @s_recipe_list: pointer to switch rule population list
1862 * @num_recipes: pointer to the number of recipes (input and output)
1863 * @recipe_root: root recipe number of recipe(s) to retrieve
1864 * @cd: pointer to command details structure or NULL
1868 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1869 * On output, *num_recipes will equal the number of entries returned in
1872 * The caller must supply enough space in s_recipe_list to hold all possible
1873 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1876 ice_aq_get_recipe(struct ice_hw *hw,
1877 struct ice_aqc_recipe_data_elem *s_recipe_list,
1878 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1880 struct ice_aqc_add_get_recipe *cmd;
1881 struct ice_aq_desc desc;
1882 enum ice_status status;
1885 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1886 return ICE_ERR_PARAM;
1888 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1889 cmd = &desc.params.add_get_recipe;
1890 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1892 cmd->return_index = CPU_TO_LE16(recipe_root);
1893 cmd->num_sub_recipes = 0;
1895 buf_size = *num_recipes * sizeof(*s_recipe_list);
1897 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1898 /* cppcheck-suppress constArgument */
1899 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1905 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1906 * @hw: pointer to the HW struct
1907 * @profile_id: package profile ID to associate the recipe with
1908 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1909 * @cd: pointer to command details structure or NULL
1910 * Recipe to profile association (0x0291)
1913 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1914 struct ice_sq_cd *cd)
1916 struct ice_aqc_recipe_to_profile *cmd;
1917 struct ice_aq_desc desc;
1919 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1920 cmd = &desc.params.recipe_to_profile;
1921 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1922 cmd->profile_id = CPU_TO_LE16(profile_id);
1923 /* Set the recipe ID bit in the bitmask to let the device know which
1924 * profile we are associating the recipe to
1926 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1927 ICE_NONDMA_TO_NONDMA);
1929 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1933 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1934 * @hw: pointer to the HW struct
1935 * @profile_id: package profile ID to associate the recipe with
1936 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1937 * @cd: pointer to command details structure or NULL
1938 * Associate profile ID with given recipe (0x0293)
1941 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1942 struct ice_sq_cd *cd)
1944 struct ice_aqc_recipe_to_profile *cmd;
1945 struct ice_aq_desc desc;
1946 enum ice_status status;
1948 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1949 cmd = &desc.params.recipe_to_profile;
1950 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1951 cmd->profile_id = CPU_TO_LE16(profile_id);
1953 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1955 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1956 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1962 * ice_alloc_recipe - add recipe resource
1963 * @hw: pointer to the hardware structure
1964 * @rid: recipe ID returned as response to AQ call
1966 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1968 struct ice_aqc_alloc_free_res_elem *sw_buf;
1969 enum ice_status status;
1972 buf_len = sizeof(*sw_buf);
1973 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1975 return ICE_ERR_NO_MEMORY;
1977 sw_buf->num_elems = CPU_TO_LE16(1);
1978 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1979 ICE_AQC_RES_TYPE_S) |
1980 ICE_AQC_RES_TYPE_FLAG_SHARED);
1981 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1982 ice_aqc_opc_alloc_res, NULL);
1984 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1985 ice_free(hw, sw_buf);
1990 /* ice_init_port_info - Initialize port_info with switch configuration data
1991 * @pi: pointer to port_info
1992 * @vsi_port_num: VSI number or port number
1993 * @type: Type of switch element (port or VSI)
1994 * @swid: switch ID of the switch the element is attached to
1995 * @pf_vf_num: PF or VF number
1996 * @is_vf: true if the element is a VF, false otherwise
1999 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2000 u16 swid, u16 pf_vf_num, bool is_vf)
2003 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2004 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2006 pi->pf_vf_num = pf_vf_num;
2008 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2009 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2012 ice_debug(pi->hw, ICE_DBG_SW,
2013 "incorrect VSI/port type received\n");
2018 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2019 * @hw: pointer to the hardware structure
2021 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2023 struct ice_aqc_get_sw_cfg_resp *rbuf;
2024 enum ice_status status;
2031 num_total_ports = 1;
2033 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2034 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2037 return ICE_ERR_NO_MEMORY;
2039 /* Multiple calls to ice_aq_get_sw_cfg may be required
2040 * to get all the switch configuration information. The need
2041 * for additional calls is indicated by ice_aq_get_sw_cfg
2042 * writing a non-zero value in req_desc
2045 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2046 &req_desc, &num_elems, NULL);
2051 for (i = 0; i < num_elems; i++) {
2052 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2053 u16 pf_vf_num, swid, vsi_port_num;
2057 ele = rbuf[i].elements;
2058 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2059 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2061 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2062 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2064 swid = LE16_TO_CPU(ele->swid);
2066 if (LE16_TO_CPU(ele->pf_vf_num) &
2067 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2070 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2071 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2074 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2075 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2076 if (j == num_total_ports) {
2077 ice_debug(hw, ICE_DBG_SW,
2078 "more ports than expected\n");
2079 status = ICE_ERR_CFG;
2082 ice_init_port_info(hw->port_info,
2083 vsi_port_num, res_type, swid,
2091 } while (req_desc && !status);
2094 ice_free(hw, (void *)rbuf);
2099 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2100 * @hw: pointer to the hardware structure
2101 * @fi: filter info structure to fill/update
2103 * This helper function populates the lb_en and lan_en elements of the provided
2104 * ice_fltr_info struct using the switch's type and characteristics of the
2105 * switch rule being configured.
2107 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2111 if ((fi->flag & ICE_FLTR_TX) &&
2112 (fi->fltr_act == ICE_FWD_TO_VSI ||
2113 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2114 fi->fltr_act == ICE_FWD_TO_Q ||
2115 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2116 /* Setting LB for prune actions will result in replicated
2117 * packets to the internal switch that will be dropped.
2119 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2122 /* Set lan_en to TRUE if
2123 * 1. The switch is a VEB AND
2125 * 2.1 The lookup is a directional lookup like ethertype,
2126 * promiscuous, ethertype-MAC, promiscuous-VLAN
2127 * and default-port OR
2128 * 2.2 The lookup is VLAN, OR
2129 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2130 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2134 * The switch is a VEPA.
2136 * In all other cases, the LAN enable has to be set to false.
2139 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2140 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2141 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2142 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2143 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2144 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2145 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2146 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2147 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2148 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2157 * ice_fill_sw_rule - Helper function to fill switch rule structure
2158 * @hw: pointer to the hardware structure
2159 * @f_info: entry containing packet forwarding information
2160 * @s_rule: switch rule structure to be filled in based on mac_entry
2161 * @opc: switch rules population command type - pass in the command opcode
2164 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2165 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2167 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2175 if (opc == ice_aqc_opc_remove_sw_rules) {
2176 s_rule->pdata.lkup_tx_rx.act = 0;
2177 s_rule->pdata.lkup_tx_rx.index =
2178 CPU_TO_LE16(f_info->fltr_rule_id);
2179 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2183 eth_hdr_sz = sizeof(dummy_eth_header);
2184 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2186 /* initialize the ether header with a dummy header */
2187 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2188 ice_fill_sw_info(hw, f_info);
2190 switch (f_info->fltr_act) {
2191 case ICE_FWD_TO_VSI:
2192 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2193 ICE_SINGLE_ACT_VSI_ID_M;
2194 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2195 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2196 ICE_SINGLE_ACT_VALID_BIT;
2198 case ICE_FWD_TO_VSI_LIST:
2199 act |= ICE_SINGLE_ACT_VSI_LIST;
2200 act |= (f_info->fwd_id.vsi_list_id <<
2201 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2202 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2203 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2204 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2205 ICE_SINGLE_ACT_VALID_BIT;
2208 act |= ICE_SINGLE_ACT_TO_Q;
2209 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2210 ICE_SINGLE_ACT_Q_INDEX_M;
2212 case ICE_DROP_PACKET:
2213 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2214 ICE_SINGLE_ACT_VALID_BIT;
2216 case ICE_FWD_TO_QGRP:
2217 q_rgn = f_info->qgrp_size > 0 ?
2218 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2219 act |= ICE_SINGLE_ACT_TO_Q;
2220 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2221 ICE_SINGLE_ACT_Q_INDEX_M;
2222 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2223 ICE_SINGLE_ACT_Q_REGION_M;
2230 act |= ICE_SINGLE_ACT_LB_ENABLE;
2232 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2234 switch (f_info->lkup_type) {
2235 case ICE_SW_LKUP_MAC:
2236 daddr = f_info->l_data.mac.mac_addr;
2238 case ICE_SW_LKUP_VLAN:
2239 vlan_id = f_info->l_data.vlan.vlan_id;
2240 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2241 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2242 act |= ICE_SINGLE_ACT_PRUNE;
2243 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2246 case ICE_SW_LKUP_ETHERTYPE_MAC:
2247 daddr = f_info->l_data.ethertype_mac.mac_addr;
2249 case ICE_SW_LKUP_ETHERTYPE:
2250 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2251 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2253 case ICE_SW_LKUP_MAC_VLAN:
2254 daddr = f_info->l_data.mac_vlan.mac_addr;
2255 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2257 case ICE_SW_LKUP_PROMISC_VLAN:
2258 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2260 case ICE_SW_LKUP_PROMISC:
2261 daddr = f_info->l_data.mac_vlan.mac_addr;
2267 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2268 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2269 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2271 /* Recipe set depending on lookup type */
2272 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2273 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2274 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2277 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2278 ICE_NONDMA_TO_NONDMA);
2280 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2281 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2282 *off = CPU_TO_BE16(vlan_id);
2285 /* Create the switch rule with the final dummy Ethernet header */
2286 if (opc != ice_aqc_opc_update_sw_rules)
2287 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2291 * ice_add_marker_act
2292 * @hw: pointer to the hardware structure
2293 * @m_ent: the management entry for which sw marker needs to be added
2294 * @sw_marker: sw marker to tag the Rx descriptor with
2295 * @l_id: large action resource ID
2297 * Create a large action to hold software marker and update the switch rule
2298 * entry pointed by m_ent with newly created large action
2300 static enum ice_status
2301 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2302 u16 sw_marker, u16 l_id)
2304 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2305 /* For software marker we need 3 large actions
2306 * 1. FWD action: FWD TO VSI or VSI LIST
2307 * 2. GENERIC VALUE action to hold the profile ID
2308 * 3. GENERIC VALUE action to hold the software marker ID
2310 const u16 num_lg_acts = 3;
2311 enum ice_status status;
2317 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2318 return ICE_ERR_PARAM;
2320 /* Create two back-to-back switch rules and submit them to the HW using
2321 * one memory buffer:
2325 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2326 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2327 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2329 return ICE_ERR_NO_MEMORY;
2331 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2333 /* Fill in the first switch rule i.e. large action */
2334 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2335 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2336 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2338 /* First action VSI forwarding or VSI list forwarding depending on how
2341 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2342 m_ent->fltr_info.fwd_id.hw_vsi_id;
2344 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2345 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2346 ICE_LG_ACT_VSI_LIST_ID_M;
2347 if (m_ent->vsi_count > 1)
2348 act |= ICE_LG_ACT_VSI_LIST;
2349 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2351 /* Second action descriptor type */
2352 act = ICE_LG_ACT_GENERIC;
2354 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2355 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2357 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2358 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2360 /* Third action Marker value */
2361 act |= ICE_LG_ACT_GENERIC;
2362 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2363 ICE_LG_ACT_GENERIC_VALUE_M;
2365 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2367 /* call the fill switch rule to fill the lookup Tx Rx structure */
2368 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2369 ice_aqc_opc_update_sw_rules);
2371 /* Update the action to point to the large action ID */
2372 rx_tx->pdata.lkup_tx_rx.act =
2373 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2374 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2375 ICE_SINGLE_ACT_PTR_VAL_M));
2377 /* Use the filter rule ID of the previously created rule with single
2378 * act. Once the update happens, hardware will treat this as large
2381 rx_tx->pdata.lkup_tx_rx.index =
2382 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2384 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2385 ice_aqc_opc_update_sw_rules, NULL);
2387 m_ent->lg_act_idx = l_id;
2388 m_ent->sw_marker_id = sw_marker;
2391 ice_free(hw, lg_act);
2396 * ice_add_counter_act - add/update filter rule with counter action
2397 * @hw: pointer to the hardware structure
2398 * @m_ent: the management entry for which counter needs to be added
2399 * @counter_id: VLAN counter ID returned as part of allocate resource
2400 * @l_id: large action resource ID
2402 static enum ice_status
2403 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2404 u16 counter_id, u16 l_id)
2406 struct ice_aqc_sw_rules_elem *lg_act;
2407 struct ice_aqc_sw_rules_elem *rx_tx;
2408 enum ice_status status;
2409 /* 2 actions will be added while adding a large action counter */
2410 const int num_acts = 2;
2417 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2418 return ICE_ERR_PARAM;
2420 /* Create two back-to-back switch rules and submit them to the HW using
2421 * one memory buffer:
2425 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2426 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2427 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2430 return ICE_ERR_NO_MEMORY;
2432 rx_tx = (struct ice_aqc_sw_rules_elem *)
2433 ((u8 *)lg_act + lg_act_size);
2435 /* Fill in the first switch rule i.e. large action */
2436 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2437 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2438 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2440 /* First action VSI forwarding or VSI list forwarding depending on how
2443 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2444 m_ent->fltr_info.fwd_id.hw_vsi_id;
2446 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2447 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2448 ICE_LG_ACT_VSI_LIST_ID_M;
2449 if (m_ent->vsi_count > 1)
2450 act |= ICE_LG_ACT_VSI_LIST;
2451 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2453 /* Second action counter ID */
2454 act = ICE_LG_ACT_STAT_COUNT;
2455 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2456 ICE_LG_ACT_STAT_COUNT_M;
2457 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2459 /* call the fill switch rule to fill the lookup Tx Rx structure */
2460 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2461 ice_aqc_opc_update_sw_rules);
2463 act = ICE_SINGLE_ACT_PTR;
2464 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2465 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2467 /* Use the filter rule ID of the previously created rule with single
2468 * act. Once the update happens, hardware will treat this as large
2471 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2472 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2474 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2475 ice_aqc_opc_update_sw_rules, NULL);
2477 m_ent->lg_act_idx = l_id;
2478 m_ent->counter_index = counter_id;
2481 ice_free(hw, lg_act);
2486 * ice_create_vsi_list_map
2487 * @hw: pointer to the hardware structure
2488 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2489 * @num_vsi: number of VSI handles in the array
2490 * @vsi_list_id: VSI list ID generated as part of allocate resource
2492 * Helper function to create a new entry of VSI list ID to VSI mapping
2493 * using the given VSI list ID
2495 static struct ice_vsi_list_map_info *
2496 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2499 struct ice_switch_info *sw = hw->switch_info;
2500 struct ice_vsi_list_map_info *v_map;
2503 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2508 v_map->vsi_list_id = vsi_list_id;
2510 for (i = 0; i < num_vsi; i++)
2511 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2513 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2518 * ice_update_vsi_list_rule
2519 * @hw: pointer to the hardware structure
2520 * @vsi_handle_arr: array of VSI handles to form a VSI list
2521 * @num_vsi: number of VSI handles in the array
2522 * @vsi_list_id: VSI list ID generated as part of allocate resource
2523 * @remove: Boolean value to indicate if this is a remove action
2524 * @opc: switch rules population command type - pass in the command opcode
2525 * @lkup_type: lookup type of the filter
2527 * Call AQ command to add a new switch rule or update existing switch rule
2528 * using the given VSI list ID
2530 static enum ice_status
2531 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2532 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2533 enum ice_sw_lkup_type lkup_type)
2535 struct ice_aqc_sw_rules_elem *s_rule;
2536 enum ice_status status;
2542 return ICE_ERR_PARAM;
2544 if (lkup_type == ICE_SW_LKUP_MAC ||
2545 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2546 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2547 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2548 lkup_type == ICE_SW_LKUP_PROMISC ||
2549 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2550 lkup_type == ICE_SW_LKUP_LAST)
2551 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2552 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2553 else if (lkup_type == ICE_SW_LKUP_VLAN)
2554 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2555 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2557 return ICE_ERR_PARAM;
2559 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2560 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2562 return ICE_ERR_NO_MEMORY;
2563 for (i = 0; i < num_vsi; i++) {
2564 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2565 status = ICE_ERR_PARAM;
2568 /* AQ call requires hw_vsi_id(s) */
2569 s_rule->pdata.vsi_list.vsi[i] =
2570 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2573 s_rule->type = CPU_TO_LE16(rule_type);
2574 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2575 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2577 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2580 ice_free(hw, s_rule);
2585 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2586 * @hw: pointer to the HW struct
2587 * @vsi_handle_arr: array of VSI handles to form a VSI list
2588 * @num_vsi: number of VSI handles in the array
2589 * @vsi_list_id: stores the ID of the VSI list to be created
2590 * @lkup_type: switch rule filter's lookup type
2592 static enum ice_status
2593 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2594 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2596 enum ice_status status;
2598 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2599 ice_aqc_opc_alloc_res);
2603 /* Update the newly created VSI list to include the specified VSIs */
2604 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2605 *vsi_list_id, false,
2606 ice_aqc_opc_add_sw_rules, lkup_type);
2610 * ice_create_pkt_fwd_rule
2611 * @hw: pointer to the hardware structure
2612 * @recp_list: corresponding filter management list
2613 * @f_entry: entry containing packet forwarding information
2615 * Create switch rule with given filter information and add an entry
2616 * to the corresponding filter management list to track this switch rule
2619 static enum ice_status
2620 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2621 struct ice_fltr_list_entry *f_entry)
2623 struct ice_fltr_mgmt_list_entry *fm_entry;
2624 struct ice_aqc_sw_rules_elem *s_rule;
2625 enum ice_status status;
2627 s_rule = (struct ice_aqc_sw_rules_elem *)
2628 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2630 return ICE_ERR_NO_MEMORY;
2631 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2632 ice_malloc(hw, sizeof(*fm_entry));
2634 status = ICE_ERR_NO_MEMORY;
2635 goto ice_create_pkt_fwd_rule_exit;
2638 fm_entry->fltr_info = f_entry->fltr_info;
2640 /* Initialize all the fields for the management entry */
2641 fm_entry->vsi_count = 1;
2642 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2643 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2644 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2646 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2647 ice_aqc_opc_add_sw_rules);
2649 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2650 ice_aqc_opc_add_sw_rules, NULL);
2652 ice_free(hw, fm_entry);
2653 goto ice_create_pkt_fwd_rule_exit;
2656 f_entry->fltr_info.fltr_rule_id =
2657 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2658 fm_entry->fltr_info.fltr_rule_id =
2659 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2661 /* The book keeping entries will get removed when base driver
2662 * calls remove filter AQ command
2664 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2666 ice_create_pkt_fwd_rule_exit:
2667 ice_free(hw, s_rule);
2672 * ice_update_pkt_fwd_rule
2673 * @hw: pointer to the hardware structure
2674 * @f_info: filter information for switch rule
2676 * Call AQ command to update a previously created switch rule with a
2679 static enum ice_status
2680 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2682 struct ice_aqc_sw_rules_elem *s_rule;
2683 enum ice_status status;
2685 s_rule = (struct ice_aqc_sw_rules_elem *)
2686 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2688 return ICE_ERR_NO_MEMORY;
2690 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2692 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2694 /* Update switch rule with new rule set to forward VSI list */
2695 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2696 ice_aqc_opc_update_sw_rules, NULL);
2698 ice_free(hw, s_rule);
2703 * ice_update_sw_rule_bridge_mode
2704 * @hw: pointer to the HW struct
2706 * Updates unicast switch filter rules based on VEB/VEPA mode
2708 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2710 struct ice_switch_info *sw = hw->switch_info;
2711 struct ice_fltr_mgmt_list_entry *fm_entry;
2712 enum ice_status status = ICE_SUCCESS;
2713 struct LIST_HEAD_TYPE *rule_head;
2714 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2716 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2717 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2719 ice_acquire_lock(rule_lock);
2720 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2722 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2723 u8 *addr = fi->l_data.mac.mac_addr;
2725 /* Update unicast Tx rules to reflect the selected
2728 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2729 (fi->fltr_act == ICE_FWD_TO_VSI ||
2730 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2731 fi->fltr_act == ICE_FWD_TO_Q ||
2732 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2733 status = ice_update_pkt_fwd_rule(hw, fi);
2739 ice_release_lock(rule_lock);
2745 * ice_add_update_vsi_list
2746 * @hw: pointer to the hardware structure
2747 * @m_entry: pointer to current filter management list entry
2748 * @cur_fltr: filter information from the book keeping entry
2749 * @new_fltr: filter information with the new VSI to be added
2751 * Call AQ command to add or update previously created VSI list with new VSI.
2753 * Helper function to do book keeping associated with adding filter information
2754 * The algorithm to do the book keeping is described below :
2755 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2756 * if only one VSI has been added till now
2757 * Allocate a new VSI list and add two VSIs
2758 * to this list using switch rule command
2759 * Update the previously created switch rule with the
2760 * newly created VSI list ID
2761 * if a VSI list was previously created
2762 * Add the new VSI to the previously created VSI list set
2763 * using the update switch rule command
2765 static enum ice_status
2766 ice_add_update_vsi_list(struct ice_hw *hw,
2767 struct ice_fltr_mgmt_list_entry *m_entry,
2768 struct ice_fltr_info *cur_fltr,
2769 struct ice_fltr_info *new_fltr)
2771 enum ice_status status = ICE_SUCCESS;
2772 u16 vsi_list_id = 0;
2774 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2775 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2776 return ICE_ERR_NOT_IMPL;
2778 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2779 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2780 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2781 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2782 return ICE_ERR_NOT_IMPL;
2784 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2785 /* Only one entry existed in the mapping and it was not already
2786 * a part of a VSI list. So, create a VSI list with the old and
2789 struct ice_fltr_info tmp_fltr;
2790 u16 vsi_handle_arr[2];
2792 /* A rule already exists with the new VSI being added */
2793 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2794 return ICE_ERR_ALREADY_EXISTS;
2796 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2797 vsi_handle_arr[1] = new_fltr->vsi_handle;
2798 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2800 new_fltr->lkup_type);
2804 tmp_fltr = *new_fltr;
2805 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2806 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2807 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2808 /* Update the previous switch rule of "MAC forward to VSI" to
2809 * "MAC fwd to VSI list"
2811 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2815 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2816 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2817 m_entry->vsi_list_info =
2818 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2821 /* If this entry was large action then the large action needs
2822 * to be updated to point to FWD to VSI list
2824 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2826 ice_add_marker_act(hw, m_entry,
2827 m_entry->sw_marker_id,
2828 m_entry->lg_act_idx);
2830 u16 vsi_handle = new_fltr->vsi_handle;
2831 enum ice_adminq_opc opcode;
2833 if (!m_entry->vsi_list_info)
2836 /* A rule already exists with the new VSI being added */
2837 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2840 /* Update the previously created VSI list set with
2841 * the new VSI ID passed in
2843 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2844 opcode = ice_aqc_opc_update_sw_rules;
2846 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2847 vsi_list_id, false, opcode,
2848 new_fltr->lkup_type);
2849 /* update VSI list mapping info with new VSI ID */
2851 ice_set_bit(vsi_handle,
2852 m_entry->vsi_list_info->vsi_map);
2855 m_entry->vsi_count++;
2860 * ice_find_rule_entry - Search a rule entry
2861 * @list_head: head of rule list
2862 * @f_info: rule information
2864 * Helper function to search for a given rule entry
2865 * Returns pointer to entry storing the rule if found
2867 static struct ice_fltr_mgmt_list_entry *
2868 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2869 struct ice_fltr_info *f_info)
2871 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2873 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2875 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2876 sizeof(f_info->l_data)) &&
2877 f_info->flag == list_itr->fltr_info.flag) {
2886 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2887 * @recp_list: VSI lists needs to be searched
2888 * @vsi_handle: VSI handle to be found in VSI list
2889 * @vsi_list_id: VSI list ID found containing vsi_handle
2891 * Helper function to search a VSI list with single entry containing given VSI
2892 * handle element. This can be extended further to search VSI list with more
2893 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2895 static struct ice_vsi_list_map_info *
2896 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2899 struct ice_vsi_list_map_info *map_info = NULL;
2900 struct LIST_HEAD_TYPE *list_head;
2902 list_head = &recp_list->filt_rules;
2903 if (recp_list->adv_rule) {
2904 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2906 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2907 ice_adv_fltr_mgmt_list_entry,
2909 if (list_itr->vsi_list_info) {
2910 map_info = list_itr->vsi_list_info;
2911 if (ice_is_bit_set(map_info->vsi_map,
2913 *vsi_list_id = map_info->vsi_list_id;
2919 struct ice_fltr_mgmt_list_entry *list_itr;
2921 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2922 ice_fltr_mgmt_list_entry,
2924 if (list_itr->vsi_count == 1 &&
2925 list_itr->vsi_list_info) {
2926 map_info = list_itr->vsi_list_info;
2927 if (ice_is_bit_set(map_info->vsi_map,
2929 *vsi_list_id = map_info->vsi_list_id;
2939 * ice_add_rule_internal - add rule for a given lookup type
2940 * @hw: pointer to the hardware structure
2941 * @recp_list: recipe list for which rule has to be added
2942 * @lport: logic port number on which function add rule
2943 * @f_entry: structure containing MAC forwarding information
2945 * Adds or updates the rule lists for a given recipe
2947 static enum ice_status
2948 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2949 u8 lport, struct ice_fltr_list_entry *f_entry)
2951 struct ice_fltr_info *new_fltr, *cur_fltr;
2952 struct ice_fltr_mgmt_list_entry *m_entry;
2953 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2954 enum ice_status status = ICE_SUCCESS;
2956 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2957 return ICE_ERR_PARAM;
2959 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2960 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2961 f_entry->fltr_info.fwd_id.hw_vsi_id =
2962 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2964 rule_lock = &recp_list->filt_rule_lock;
2966 ice_acquire_lock(rule_lock);
2967 new_fltr = &f_entry->fltr_info;
2968 if (new_fltr->flag & ICE_FLTR_RX)
2969 new_fltr->src = lport;
2970 else if (new_fltr->flag & ICE_FLTR_TX)
2972 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2974 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2976 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2977 goto exit_add_rule_internal;
2980 cur_fltr = &m_entry->fltr_info;
2981 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2983 exit_add_rule_internal:
2984 ice_release_lock(rule_lock);
2989 * ice_remove_vsi_list_rule
2990 * @hw: pointer to the hardware structure
2991 * @vsi_list_id: VSI list ID generated as part of allocate resource
2992 * @lkup_type: switch rule filter lookup type
2994 * The VSI list should be emptied before this function is called to remove the
2997 static enum ice_status
2998 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2999 enum ice_sw_lkup_type lkup_type)
3001 struct ice_aqc_sw_rules_elem *s_rule;
3002 enum ice_status status;
3005 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3006 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3008 return ICE_ERR_NO_MEMORY;
3010 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3011 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3013 /* Free the vsi_list resource that we allocated. It is assumed that the
3014 * list is empty at this point.
3016 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3017 ice_aqc_opc_free_res);
3019 ice_free(hw, s_rule);
3024 * ice_rem_update_vsi_list
3025 * @hw: pointer to the hardware structure
3026 * @vsi_handle: VSI handle of the VSI to remove
3027 * @fm_list: filter management entry for which the VSI list management needs to
3030 static enum ice_status
3031 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3032 struct ice_fltr_mgmt_list_entry *fm_list)
3034 enum ice_sw_lkup_type lkup_type;
3035 enum ice_status status = ICE_SUCCESS;
3038 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3039 fm_list->vsi_count == 0)
3040 return ICE_ERR_PARAM;
3042 /* A rule with the VSI being removed does not exist */
3043 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3044 return ICE_ERR_DOES_NOT_EXIST;
3046 lkup_type = fm_list->fltr_info.lkup_type;
3047 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3048 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3049 ice_aqc_opc_update_sw_rules,
3054 fm_list->vsi_count--;
3055 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3057 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3058 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3059 struct ice_vsi_list_map_info *vsi_list_info =
3060 fm_list->vsi_list_info;
3063 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3065 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3066 return ICE_ERR_OUT_OF_RANGE;
3068 /* Make sure VSI list is empty before removing it below */
3069 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3071 ice_aqc_opc_update_sw_rules,
3076 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3077 tmp_fltr_info.fwd_id.hw_vsi_id =
3078 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3079 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3080 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3082 ice_debug(hw, ICE_DBG_SW,
3083 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3084 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3088 fm_list->fltr_info = tmp_fltr_info;
3091 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3092 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3093 struct ice_vsi_list_map_info *vsi_list_info =
3094 fm_list->vsi_list_info;
3096 /* Remove the VSI list since it is no longer used */
3097 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3099 ice_debug(hw, ICE_DBG_SW,
3100 "Failed to remove VSI list %d, error %d\n",
3101 vsi_list_id, status);
3105 LIST_DEL(&vsi_list_info->list_entry);
3106 ice_free(hw, vsi_list_info);
3107 fm_list->vsi_list_info = NULL;
3114 * ice_remove_rule_internal - Remove a filter rule of a given type
3116 * @hw: pointer to the hardware structure
3117 * @recp_list: recipe list for which the rule needs to removed
3118 * @f_entry: rule entry containing filter information
3120 static enum ice_status
3121 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3122 struct ice_fltr_list_entry *f_entry)
3124 struct ice_fltr_mgmt_list_entry *list_elem;
3125 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3126 enum ice_status status = ICE_SUCCESS;
3127 bool remove_rule = false;
3130 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3131 return ICE_ERR_PARAM;
3132 f_entry->fltr_info.fwd_id.hw_vsi_id =
3133 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3135 rule_lock = &recp_list->filt_rule_lock;
3136 ice_acquire_lock(rule_lock);
3137 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3138 &f_entry->fltr_info);
3140 status = ICE_ERR_DOES_NOT_EXIST;
3144 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3146 } else if (!list_elem->vsi_list_info) {
3147 status = ICE_ERR_DOES_NOT_EXIST;
3149 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3150 /* a ref_cnt > 1 indicates that the vsi_list is being
3151 * shared by multiple rules. Decrement the ref_cnt and
3152 * remove this rule, but do not modify the list, as it
3153 * is in-use by other rules.
3155 list_elem->vsi_list_info->ref_cnt--;
3158 /* a ref_cnt of 1 indicates the vsi_list is only used
3159 * by one rule. However, the original removal request is only
3160 * for a single VSI. Update the vsi_list first, and only
3161 * remove the rule if there are no further VSIs in this list.
3163 vsi_handle = f_entry->fltr_info.vsi_handle;
3164 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3167 /* if VSI count goes to zero after updating the VSI list */
3168 if (list_elem->vsi_count == 0)
3173 /* Remove the lookup rule */
3174 struct ice_aqc_sw_rules_elem *s_rule;
3176 s_rule = (struct ice_aqc_sw_rules_elem *)
3177 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3179 status = ICE_ERR_NO_MEMORY;
3183 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3184 ice_aqc_opc_remove_sw_rules);
3186 status = ice_aq_sw_rules(hw, s_rule,
3187 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3188 ice_aqc_opc_remove_sw_rules, NULL);
3190 /* Remove a book keeping from the list */
3191 ice_free(hw, s_rule);
3196 LIST_DEL(&list_elem->list_entry);
3197 ice_free(hw, list_elem);
3200 ice_release_lock(rule_lock);
3205 * ice_aq_get_res_alloc - get allocated resources
3206 * @hw: pointer to the HW struct
3207 * @num_entries: pointer to u16 to store the number of resource entries returned
3208 * @buf: pointer to user-supplied buffer
3209 * @buf_size: size of buff
3210 * @cd: pointer to command details structure or NULL
3212 * The user-supplied buffer must be large enough to store the resource
3213 * information for all resource types. Each resource type is an
3214 * ice_aqc_get_res_resp_data_elem structure.
3217 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3218 u16 buf_size, struct ice_sq_cd *cd)
3220 struct ice_aqc_get_res_alloc *resp;
3221 enum ice_status status;
3222 struct ice_aq_desc desc;
3225 return ICE_ERR_BAD_PTR;
3227 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3228 return ICE_ERR_INVAL_SIZE;
3230 resp = &desc.params.get_res;
3232 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3233 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3235 if (!status && num_entries)
3236 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3242 * ice_aq_get_res_descs - get allocated resource descriptors
3243 * @hw: pointer to the hardware structure
3244 * @num_entries: number of resource entries in buffer
3245 * @buf: Indirect buffer to hold data parameters and response
3246 * @buf_size: size of buffer for indirect commands
3247 * @res_type: resource type
3248 * @res_shared: is resource shared
3249 * @desc_id: input - first desc ID to start; output - next desc ID
3250 * @cd: pointer to command details structure or NULL
3253 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3254 struct ice_aqc_get_allocd_res_desc_resp *buf,
3255 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3256 struct ice_sq_cd *cd)
3258 struct ice_aqc_get_allocd_res_desc *cmd;
3259 struct ice_aq_desc desc;
3260 enum ice_status status;
3262 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3264 cmd = &desc.params.get_res_desc;
3267 return ICE_ERR_PARAM;
3269 if (buf_size != (num_entries * sizeof(*buf)))
3270 return ICE_ERR_PARAM;
3272 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3274 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3275 ICE_AQC_RES_TYPE_M) | (res_shared ?
3276 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3277 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3279 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3281 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3287 * ice_add_mac_rule - Add a MAC address based filter rule
3288 * @hw: pointer to the hardware structure
3289 * @m_list: list of MAC addresses and forwarding information
3290 * @sw: pointer to switch info struct for which function add rule
3291 * @lport: logic port number on which function add rule
3293 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3294 * multiple unicast addresses, the function assumes that all the
3295 * addresses are unique in a given add_mac call. It doesn't
3296 * check for duplicates in this case, removing duplicates from a given
3297 * list should be taken care of in the caller of this function.
3299 static enum ice_status
3300 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3301 struct ice_switch_info *sw, u8 lport)
3303 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3304 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3305 struct ice_fltr_list_entry *m_list_itr;
3306 struct LIST_HEAD_TYPE *rule_head;
3307 u16 total_elem_left, s_rule_size;
3308 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3309 enum ice_status status = ICE_SUCCESS;
3310 u16 num_unicast = 0;
3314 rule_lock = &recp_list->filt_rule_lock;
3315 rule_head = &recp_list->filt_rules;
3317 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3319 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3323 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3324 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3325 if (!ice_is_vsi_valid(hw, vsi_handle))
3326 return ICE_ERR_PARAM;
3327 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3328 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3329 /* update the src in case it is VSI num */
3330 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3331 return ICE_ERR_PARAM;
3332 m_list_itr->fltr_info.src = hw_vsi_id;
3333 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3334 IS_ZERO_ETHER_ADDR(add))
3335 return ICE_ERR_PARAM;
3336 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3337 /* Don't overwrite the unicast address */
3338 ice_acquire_lock(rule_lock);
3339 if (ice_find_rule_entry(rule_head,
3340 &m_list_itr->fltr_info)) {
3341 ice_release_lock(rule_lock);
3342 return ICE_ERR_ALREADY_EXISTS;
3344 ice_release_lock(rule_lock);
3346 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3347 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3348 m_list_itr->status =
3349 ice_add_rule_internal(hw, recp_list, lport,
3351 if (m_list_itr->status)
3352 return m_list_itr->status;
3356 ice_acquire_lock(rule_lock);
3357 /* Exit if no suitable entries were found for adding bulk switch rule */
3359 status = ICE_SUCCESS;
3360 goto ice_add_mac_exit;
3363 /* Allocate switch rule buffer for the bulk update for unicast */
3364 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3365 s_rule = (struct ice_aqc_sw_rules_elem *)
3366 ice_calloc(hw, num_unicast, s_rule_size);
3368 status = ICE_ERR_NO_MEMORY;
3369 goto ice_add_mac_exit;
3373 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3375 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3376 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3378 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3379 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3380 ice_aqc_opc_add_sw_rules);
3381 r_iter = (struct ice_aqc_sw_rules_elem *)
3382 ((u8 *)r_iter + s_rule_size);
3386 /* Call AQ bulk switch rule update for all unicast addresses */
3388 /* Call AQ switch rule in AQ_MAX chunk */
3389 for (total_elem_left = num_unicast; total_elem_left > 0;
3390 total_elem_left -= elem_sent) {
3391 struct ice_aqc_sw_rules_elem *entry = r_iter;
3393 elem_sent = MIN_T(u8, total_elem_left,
3394 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3395 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3396 elem_sent, ice_aqc_opc_add_sw_rules,
3399 goto ice_add_mac_exit;
3400 r_iter = (struct ice_aqc_sw_rules_elem *)
3401 ((u8 *)r_iter + (elem_sent * s_rule_size));
3404 /* Fill up rule ID based on the value returned from FW */
3406 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3408 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3409 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3410 struct ice_fltr_mgmt_list_entry *fm_entry;
3412 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3413 f_info->fltr_rule_id =
3414 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3415 f_info->fltr_act = ICE_FWD_TO_VSI;
3416 /* Create an entry to track this MAC address */
3417 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3418 ice_malloc(hw, sizeof(*fm_entry));
3420 status = ICE_ERR_NO_MEMORY;
3421 goto ice_add_mac_exit;
3423 fm_entry->fltr_info = *f_info;
3424 fm_entry->vsi_count = 1;
3425 /* The book keeping entries will get removed when
3426 * base driver calls remove filter AQ command
3429 LIST_ADD(&fm_entry->list_entry, rule_head);
3430 r_iter = (struct ice_aqc_sw_rules_elem *)
3431 ((u8 *)r_iter + s_rule_size);
3436 ice_release_lock(rule_lock);
3438 ice_free(hw, s_rule);
3443 * ice_add_mac - Add a MAC address based filter rule
3444 * @hw: pointer to the hardware structure
3445 * @m_list: list of MAC addresses and forwarding information
3447 * Function add MAC rule for logical port from HW struct
3450 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3453 return ICE_ERR_PARAM;
3455 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3456 hw->port_info->lport);
3460 * ice_add_vlan_internal - Add one VLAN based filter rule
3461 * @hw: pointer to the hardware structure
3462 * @recp_list: recipe list for which rule has to be added
3463 * @f_entry: filter entry containing one VLAN information
3465 static enum ice_status
3466 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3467 struct ice_fltr_list_entry *f_entry)
3469 struct ice_fltr_mgmt_list_entry *v_list_itr;
3470 struct ice_fltr_info *new_fltr, *cur_fltr;
3471 enum ice_sw_lkup_type lkup_type;
3472 u16 vsi_list_id = 0, vsi_handle;
3473 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3474 enum ice_status status = ICE_SUCCESS;
3476 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3477 return ICE_ERR_PARAM;
3479 f_entry->fltr_info.fwd_id.hw_vsi_id =
3480 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3481 new_fltr = &f_entry->fltr_info;
3483 /* VLAN ID should only be 12 bits */
3484 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3485 return ICE_ERR_PARAM;
3487 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3488 return ICE_ERR_PARAM;
3490 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3491 lkup_type = new_fltr->lkup_type;
3492 vsi_handle = new_fltr->vsi_handle;
3493 rule_lock = &recp_list->filt_rule_lock;
3494 ice_acquire_lock(rule_lock);
3495 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3497 struct ice_vsi_list_map_info *map_info = NULL;
3499 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3500 /* All VLAN pruning rules use a VSI list. Check if
3501 * there is already a VSI list containing VSI that we
3502 * want to add. If found, use the same vsi_list_id for
3503 * this new VLAN rule or else create a new list.
3505 map_info = ice_find_vsi_list_entry(recp_list,
3509 status = ice_create_vsi_list_rule(hw,
3517 /* Convert the action to forwarding to a VSI list. */
3518 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3519 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3522 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3524 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3527 status = ICE_ERR_DOES_NOT_EXIST;
3530 /* reuse VSI list for new rule and increment ref_cnt */
3532 v_list_itr->vsi_list_info = map_info;
3533 map_info->ref_cnt++;
3535 v_list_itr->vsi_list_info =
3536 ice_create_vsi_list_map(hw, &vsi_handle,
3540 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3541 /* Update existing VSI list to add new VSI ID only if it used
3544 cur_fltr = &v_list_itr->fltr_info;
3545 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3548 /* If VLAN rule exists and VSI list being used by this rule is
3549 * referenced by more than 1 VLAN rule. Then create a new VSI
3550 * list appending previous VSI with new VSI and update existing
3551 * VLAN rule to point to new VSI list ID
3553 struct ice_fltr_info tmp_fltr;
3554 u16 vsi_handle_arr[2];
3557 /* Current implementation only supports reusing VSI list with
3558 * one VSI count. We should never hit below condition
3560 if (v_list_itr->vsi_count > 1 &&
3561 v_list_itr->vsi_list_info->ref_cnt > 1) {
3562 ice_debug(hw, ICE_DBG_SW,
3563 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3564 status = ICE_ERR_CFG;
3569 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3572 /* A rule already exists with the new VSI being added */
3573 if (cur_handle == vsi_handle) {
3574 status = ICE_ERR_ALREADY_EXISTS;
3578 vsi_handle_arr[0] = cur_handle;
3579 vsi_handle_arr[1] = vsi_handle;
3580 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3581 &vsi_list_id, lkup_type);
3585 tmp_fltr = v_list_itr->fltr_info;
3586 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3587 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3588 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3589 /* Update the previous switch rule to a new VSI list which
3590 * includes current VSI that is requested
3592 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3596 /* before overriding VSI list map info. decrement ref_cnt of
3599 v_list_itr->vsi_list_info->ref_cnt--;
3601 /* now update to newly created list */
3602 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3603 v_list_itr->vsi_list_info =
3604 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3606 v_list_itr->vsi_count++;
3610 ice_release_lock(rule_lock);
3615 * ice_add_vlan_rule - Add VLAN based filter rule
3616 * @hw: pointer to the hardware structure
3617 * @v_list: list of VLAN entries and forwarding information
3618 * @sw: pointer to switch info struct for which function add rule
3620 static enum ice_status
3621 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3622 struct ice_switch_info *sw)
3624 struct ice_fltr_list_entry *v_list_itr;
3625 struct ice_sw_recipe *recp_list;
3627 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3628 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3630 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3631 return ICE_ERR_PARAM;
3632 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3633 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3635 if (v_list_itr->status)
3636 return v_list_itr->status;
3642 * ice_add_vlan - Add a VLAN based filter rule
3643 * @hw: pointer to the hardware structure
3644 * @v_list: list of VLAN and forwarding information
3646 * Function add VLAN rule for logical port from HW struct
3649 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3652 return ICE_ERR_PARAM;
3654 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3658 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3659 * @hw: pointer to the hardware structure
3660 * @mv_list: list of MAC and VLAN filters
3661 * @sw: pointer to switch info struct for which function add rule
3662 * @lport: logic port number on which function add rule
3664 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3665 * pruning bits enabled, then it is the responsibility of the caller to make
3666 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3667 * VLAN won't be received on that VSI otherwise.
3669 static enum ice_status
3670 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3671 struct ice_switch_info *sw, u8 lport)
3673 struct ice_fltr_list_entry *mv_list_itr;
3674 struct ice_sw_recipe *recp_list;
3676 if (!mv_list || !hw)
3677 return ICE_ERR_PARAM;
3679 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3680 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3682 enum ice_sw_lkup_type l_type =
3683 mv_list_itr->fltr_info.lkup_type;
3685 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3686 return ICE_ERR_PARAM;
3687 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3688 mv_list_itr->status =
3689 ice_add_rule_internal(hw, recp_list, lport,
3691 if (mv_list_itr->status)
3692 return mv_list_itr->status;
3698 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3699 * @hw: pointer to the hardware structure
3700 * @mv_list: list of MAC VLAN addresses and forwarding information
3702 * Function add MAC VLAN rule for logical port from HW struct
3705 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3707 if (!mv_list || !hw)
3708 return ICE_ERR_PARAM;
3710 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3711 hw->port_info->lport);
3715 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3716 * @hw: pointer to the hardware structure
3717 * @em_list: list of ether type MAC filter, MAC is optional
3718 * @sw: pointer to switch info struct for which function add rule
3719 * @lport: logic port number on which function add rule
3721 * This function requires the caller to populate the entries in
3722 * the filter list with the necessary fields (including flags to
3723 * indicate Tx or Rx rules).
3725 static enum ice_status
3726 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3727 struct ice_switch_info *sw, u8 lport)
3729 struct ice_fltr_list_entry *em_list_itr;
3731 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3733 struct ice_sw_recipe *recp_list;
3734 enum ice_sw_lkup_type l_type;
3736 l_type = em_list_itr->fltr_info.lkup_type;
3737 recp_list = &sw->recp_list[l_type];
3739 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3740 l_type != ICE_SW_LKUP_ETHERTYPE)
3741 return ICE_ERR_PARAM;
3743 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3746 if (em_list_itr->status)
3747 return em_list_itr->status;
3754 * ice_add_eth_mac - Add a ethertype based filter rule
3755 * @hw: pointer to the hardware structure
3756 * @em_list: list of ethertype and forwarding information
3758 * Function add ethertype rule for logical port from HW struct
3760 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3762 if (!em_list || !hw)
3763 return ICE_ERR_PARAM;
3765 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3766 hw->port_info->lport);
3770 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3771 * @hw: pointer to the hardware structure
3772 * @em_list: list of ethertype or ethertype MAC entries
3773 * @sw: pointer to switch info struct for which function add rule
3775 static enum ice_status
3776 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3777 struct ice_switch_info *sw)
3779 struct ice_fltr_list_entry *em_list_itr, *tmp;
3781 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3783 struct ice_sw_recipe *recp_list;
3784 enum ice_sw_lkup_type l_type;
3786 l_type = em_list_itr->fltr_info.lkup_type;
3788 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3789 l_type != ICE_SW_LKUP_ETHERTYPE)
3790 return ICE_ERR_PARAM;
3792 recp_list = &sw->recp_list[l_type];
3793 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3795 if (em_list_itr->status)
3796 return em_list_itr->status;
3802 * ice_remove_eth_mac - remove a ethertype based filter rule
3803 * @hw: pointer to the hardware structure
3804 * @em_list: list of ethertype and forwarding information
3808 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3810 if (!em_list || !hw)
3811 return ICE_ERR_PARAM;
3813 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3817 * ice_rem_sw_rule_info
3818 * @hw: pointer to the hardware structure
3819 * @rule_head: pointer to the switch list structure that we want to delete
3822 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3824 if (!LIST_EMPTY(rule_head)) {
3825 struct ice_fltr_mgmt_list_entry *entry;
3826 struct ice_fltr_mgmt_list_entry *tmp;
3828 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3829 ice_fltr_mgmt_list_entry, list_entry) {
3830 LIST_DEL(&entry->list_entry);
3831 ice_free(hw, entry);
3837 * ice_rem_adv_rule_info
3838 * @hw: pointer to the hardware structure
3839 * @rule_head: pointer to the switch list structure that we want to delete
3842 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3844 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3845 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3847 if (LIST_EMPTY(rule_head))
3850 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3851 ice_adv_fltr_mgmt_list_entry, list_entry) {
3852 LIST_DEL(&lst_itr->list_entry);
3853 ice_free(hw, lst_itr->lkups);
3854 ice_free(hw, lst_itr);
3859 * ice_rem_all_sw_rules_info
3860 * @hw: pointer to the hardware structure
3862 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3864 struct ice_switch_info *sw = hw->switch_info;
3867 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3868 struct LIST_HEAD_TYPE *rule_head;
3870 rule_head = &sw->recp_list[i].filt_rules;
3871 if (!sw->recp_list[i].adv_rule)
3872 ice_rem_sw_rule_info(hw, rule_head);
3874 ice_rem_adv_rule_info(hw, rule_head);
3879 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3880 * @pi: pointer to the port_info structure
3881 * @vsi_handle: VSI handle to set as default
3882 * @set: true to add the above mentioned switch rule, false to remove it
3883 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3885 * add filter rule to set/unset given VSI as default VSI for the switch
3886 * (represented by swid)
3889 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3892 struct ice_aqc_sw_rules_elem *s_rule;
3893 struct ice_fltr_info f_info;
3894 struct ice_hw *hw = pi->hw;
3895 enum ice_adminq_opc opcode;
3896 enum ice_status status;
3900 if (!ice_is_vsi_valid(hw, vsi_handle))
3901 return ICE_ERR_PARAM;
3902 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3904 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3905 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3906 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3908 return ICE_ERR_NO_MEMORY;
3910 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3912 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3913 f_info.flag = direction;
3914 f_info.fltr_act = ICE_FWD_TO_VSI;
3915 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3917 if (f_info.flag & ICE_FLTR_RX) {
3918 f_info.src = pi->lport;
3919 f_info.src_id = ICE_SRC_ID_LPORT;
3921 f_info.fltr_rule_id =
3922 pi->dflt_rx_vsi_rule_id;
3923 } else if (f_info.flag & ICE_FLTR_TX) {
3924 f_info.src_id = ICE_SRC_ID_VSI;
3925 f_info.src = hw_vsi_id;
3927 f_info.fltr_rule_id =
3928 pi->dflt_tx_vsi_rule_id;
3932 opcode = ice_aqc_opc_add_sw_rules;
3934 opcode = ice_aqc_opc_remove_sw_rules;
3936 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3938 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3939 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3942 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3944 if (f_info.flag & ICE_FLTR_TX) {
3945 pi->dflt_tx_vsi_num = hw_vsi_id;
3946 pi->dflt_tx_vsi_rule_id = index;
3947 } else if (f_info.flag & ICE_FLTR_RX) {
3948 pi->dflt_rx_vsi_num = hw_vsi_id;
3949 pi->dflt_rx_vsi_rule_id = index;
3952 if (f_info.flag & ICE_FLTR_TX) {
3953 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3954 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3955 } else if (f_info.flag & ICE_FLTR_RX) {
3956 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3957 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3962 ice_free(hw, s_rule);
3967 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3968 * @list_head: head of rule list
3969 * @f_info: rule information
3971 * Helper function to search for a unicast rule entry - this is to be used
3972 * to remove unicast MAC filter that is not shared with other VSIs on the
3975 * Returns pointer to entry storing the rule if found
3977 static struct ice_fltr_mgmt_list_entry *
3978 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3979 struct ice_fltr_info *f_info)
3981 struct ice_fltr_mgmt_list_entry *list_itr;
3983 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3985 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3986 sizeof(f_info->l_data)) &&
3987 f_info->fwd_id.hw_vsi_id ==
3988 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3989 f_info->flag == list_itr->fltr_info.flag)
3996 * ice_remove_mac_rule - remove a MAC based filter rule
3997 * @hw: pointer to the hardware structure
3998 * @m_list: list of MAC addresses and forwarding information
3999 * @recp_list: list from which function remove MAC address
4001 * This function removes either a MAC filter rule or a specific VSI from a
4002 * VSI list for a multicast MAC address.
4004 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4005 * ice_add_mac. Caller should be aware that this call will only work if all
4006 * the entries passed into m_list were added previously. It will not attempt to
4007 * do a partial remove of entries that were found.
4009 static enum ice_status
4010 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4011 struct ice_sw_recipe *recp_list)
4013 struct ice_fltr_list_entry *list_itr, *tmp;
4014 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4017 return ICE_ERR_PARAM;
4019 rule_lock = &recp_list->filt_rule_lock;
4020 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4022 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4023 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4026 if (l_type != ICE_SW_LKUP_MAC)
4027 return ICE_ERR_PARAM;
4029 vsi_handle = list_itr->fltr_info.vsi_handle;
4030 if (!ice_is_vsi_valid(hw, vsi_handle))
4031 return ICE_ERR_PARAM;
4033 list_itr->fltr_info.fwd_id.hw_vsi_id =
4034 ice_get_hw_vsi_num(hw, vsi_handle);
4035 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4036 /* Don't remove the unicast address that belongs to
4037 * another VSI on the switch, since it is not being
4040 ice_acquire_lock(rule_lock);
4041 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4042 &list_itr->fltr_info)) {
4043 ice_release_lock(rule_lock);
4044 return ICE_ERR_DOES_NOT_EXIST;
4046 ice_release_lock(rule_lock);
4048 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4050 if (list_itr->status)
4051 return list_itr->status;
4057 * ice_remove_mac - remove a MAC address based filter rule
4058 * @hw: pointer to the hardware structure
4059 * @m_list: list of MAC addresses and forwarding information
4063 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4065 struct ice_sw_recipe *recp_list;
4067 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4068 return ice_remove_mac_rule(hw, m_list, recp_list);
4072 * ice_remove_vlan_rule - Remove VLAN based filter rule
4073 * @hw: pointer to the hardware structure
4074 * @v_list: list of VLAN entries and forwarding information
4075 * @recp_list: list from which function remove VLAN
4077 static enum ice_status
4078 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4079 struct ice_sw_recipe *recp_list)
4081 struct ice_fltr_list_entry *v_list_itr, *tmp;
4083 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4085 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4087 if (l_type != ICE_SW_LKUP_VLAN)
4088 return ICE_ERR_PARAM;
4089 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4091 if (v_list_itr->status)
4092 return v_list_itr->status;
4098 * ice_remove_vlan - remove a VLAN address based filter rule
4099 * @hw: pointer to the hardware structure
4100 * @v_list: list of VLAN and forwarding information
4104 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4106 struct ice_sw_recipe *recp_list;
4109 return ICE_ERR_PARAM;
4111 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4112 return ice_remove_vlan_rule(hw, v_list, recp_list);
4116 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4117 * @hw: pointer to the hardware structure
4118 * @v_list: list of MAC VLAN entries and forwarding information
4119 * @recp_list: list from which function remove MAC VLAN
4121 static enum ice_status
4122 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4123 struct ice_sw_recipe *recp_list)
4125 struct ice_fltr_list_entry *v_list_itr, *tmp;
4127 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4128 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4130 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4132 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4133 return ICE_ERR_PARAM;
4134 v_list_itr->status =
4135 ice_remove_rule_internal(hw, recp_list,
4137 if (v_list_itr->status)
4138 return v_list_itr->status;
4144 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4145 * @hw: pointer to the hardware structure
4146 * @mv_list: list of MAC VLAN and forwarding information
4149 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4151 struct ice_sw_recipe *recp_list;
4153 if (!mv_list || !hw)
4154 return ICE_ERR_PARAM;
4156 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4157 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4161 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4162 * @fm_entry: filter entry to inspect
4163 * @vsi_handle: VSI handle to compare with filter info
4166 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4168 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4169 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4170 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4171 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4176 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4177 * @hw: pointer to the hardware structure
4178 * @vsi_handle: VSI handle to remove filters from
4179 * @vsi_list_head: pointer to the list to add entry to
4180 * @fi: pointer to fltr_info of filter entry to copy & add
4182 * Helper function, used when creating a list of filters to remove from
4183 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4184 * original filter entry, with the exception of fltr_info.fltr_act and
4185 * fltr_info.fwd_id fields. These are set such that later logic can
4186 * extract which VSI to remove the fltr from, and pass on that information.
4188 static enum ice_status
4189 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4190 struct LIST_HEAD_TYPE *vsi_list_head,
4191 struct ice_fltr_info *fi)
4193 struct ice_fltr_list_entry *tmp;
4195 /* this memory is freed up in the caller function
4196 * once filters for this VSI are removed
4198 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4200 return ICE_ERR_NO_MEMORY;
4202 tmp->fltr_info = *fi;
4204 /* Overwrite these fields to indicate which VSI to remove filter from,
4205 * so find and remove logic can extract the information from the
4206 * list entries. Note that original entries will still have proper
4209 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4210 tmp->fltr_info.vsi_handle = vsi_handle;
4211 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4213 LIST_ADD(&tmp->list_entry, vsi_list_head);
4219 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4220 * @hw: pointer to the hardware structure
4221 * @vsi_handle: VSI handle to remove filters from
4222 * @lkup_list_head: pointer to the list that has certain lookup type filters
4223 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4225 * Locates all filters in lkup_list_head that are used by the given VSI,
4226 * and adds COPIES of those entries to vsi_list_head (intended to be used
4227 * to remove the listed filters).
4228 * Note that this means all entries in vsi_list_head must be explicitly
4229 * deallocated by the caller when done with list.
4231 static enum ice_status
4232 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4233 struct LIST_HEAD_TYPE *lkup_list_head,
4234 struct LIST_HEAD_TYPE *vsi_list_head)
4236 struct ice_fltr_mgmt_list_entry *fm_entry;
4237 enum ice_status status = ICE_SUCCESS;
4239 /* check to make sure VSI ID is valid and within boundary */
4240 if (!ice_is_vsi_valid(hw, vsi_handle))
4241 return ICE_ERR_PARAM;
4243 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4244 ice_fltr_mgmt_list_entry, list_entry) {
4245 struct ice_fltr_info *fi;
4247 fi = &fm_entry->fltr_info;
4248 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4251 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4260 * ice_determine_promisc_mask
4261 * @fi: filter info to parse
4263 * Helper function to determine which ICE_PROMISC_ mask corresponds
4264 * to given filter into.
4266 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4268 u16 vid = fi->l_data.mac_vlan.vlan_id;
4269 u8 *macaddr = fi->l_data.mac.mac_addr;
4270 bool is_tx_fltr = false;
4271 u8 promisc_mask = 0;
4273 if (fi->flag == ICE_FLTR_TX)
4276 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4277 promisc_mask |= is_tx_fltr ?
4278 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4279 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4280 promisc_mask |= is_tx_fltr ?
4281 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4282 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4283 promisc_mask |= is_tx_fltr ?
4284 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4286 promisc_mask |= is_tx_fltr ?
4287 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4289 return promisc_mask;
4293 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4294 * @hw: pointer to the hardware structure
4295 * @vsi_handle: VSI handle to retrieve info from
4296 * @promisc_mask: pointer to mask to be filled in
4297 * @vid: VLAN ID of promisc VLAN VSI
4300 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4303 struct ice_switch_info *sw = hw->switch_info;
4304 struct ice_fltr_mgmt_list_entry *itr;
4305 struct LIST_HEAD_TYPE *rule_head;
4306 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4308 if (!ice_is_vsi_valid(hw, vsi_handle))
4309 return ICE_ERR_PARAM;
4313 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4314 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4316 ice_acquire_lock(rule_lock);
4317 LIST_FOR_EACH_ENTRY(itr, rule_head,
4318 ice_fltr_mgmt_list_entry, list_entry) {
4319 /* Continue if this filter doesn't apply to this VSI or the
4320 * VSI ID is not in the VSI map for this filter
4322 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4325 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4327 ice_release_lock(rule_lock);
4333 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4334 * @hw: pointer to the hardware structure
4335 * @vsi_handle: VSI handle to retrieve info from
4336 * @promisc_mask: pointer to mask to be filled in
4337 * @vid: VLAN ID of promisc VLAN VSI
4340 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4343 struct ice_switch_info *sw = hw->switch_info;
4344 struct ice_fltr_mgmt_list_entry *itr;
4345 struct LIST_HEAD_TYPE *rule_head;
4346 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4348 if (!ice_is_vsi_valid(hw, vsi_handle))
4349 return ICE_ERR_PARAM;
4353 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4354 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4356 ice_acquire_lock(rule_lock);
4357 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4359 /* Continue if this filter doesn't apply to this VSI or the
4360 * VSI ID is not in the VSI map for this filter
4362 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4365 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4367 ice_release_lock(rule_lock);
4373 * ice_remove_promisc - Remove promisc based filter rules
4374 * @hw: pointer to the hardware structure
4375 * @recp_id: recipe ID for which the rule needs to removed
4376 * @v_list: list of promisc entries
4378 static enum ice_status
4379 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4380 struct LIST_HEAD_TYPE *v_list)
4382 struct ice_fltr_list_entry *v_list_itr, *tmp;
4383 struct ice_sw_recipe *recp_list;
4385 recp_list = &hw->switch_info->recp_list[recp_id];
4386 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4388 v_list_itr->status =
4389 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4390 if (v_list_itr->status)
4391 return v_list_itr->status;
4397 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4398 * @hw: pointer to the hardware structure
4399 * @vsi_handle: VSI handle to clear mode
4400 * @promisc_mask: mask of promiscuous config bits to clear
4401 * @vid: VLAN ID to clear VLAN promiscuous
4404 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4407 struct ice_switch_info *sw = hw->switch_info;
4408 struct ice_fltr_list_entry *fm_entry, *tmp;
4409 struct LIST_HEAD_TYPE remove_list_head;
4410 struct ice_fltr_mgmt_list_entry *itr;
4411 struct LIST_HEAD_TYPE *rule_head;
4412 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4413 enum ice_status status = ICE_SUCCESS;
4416 if (!ice_is_vsi_valid(hw, vsi_handle))
4417 return ICE_ERR_PARAM;
4419 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4420 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4422 recipe_id = ICE_SW_LKUP_PROMISC;
4424 rule_head = &sw->recp_list[recipe_id].filt_rules;
4425 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4427 INIT_LIST_HEAD(&remove_list_head);
4429 ice_acquire_lock(rule_lock);
4430 LIST_FOR_EACH_ENTRY(itr, rule_head,
4431 ice_fltr_mgmt_list_entry, list_entry) {
4432 struct ice_fltr_info *fltr_info;
4433 u8 fltr_promisc_mask = 0;
4435 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4437 fltr_info = &itr->fltr_info;
4439 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4440 vid != fltr_info->l_data.mac_vlan.vlan_id)
4443 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4445 /* Skip if filter is not completely specified by given mask */
4446 if (fltr_promisc_mask & ~promisc_mask)
4449 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4453 ice_release_lock(rule_lock);
4454 goto free_fltr_list;
4457 ice_release_lock(rule_lock);
4459 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4462 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4463 ice_fltr_list_entry, list_entry) {
4464 LIST_DEL(&fm_entry->list_entry);
4465 ice_free(hw, fm_entry);
4472 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4473 * @hw: pointer to the hardware structure
4474 * @vsi_handle: VSI handle to configure
4475 * @promisc_mask: mask of promiscuous config bits
4476 * @vid: VLAN ID to set VLAN promiscuous
4479 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4481 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4482 struct ice_fltr_list_entry f_list_entry;
4483 struct ice_fltr_info new_fltr;
4484 enum ice_status status = ICE_SUCCESS;
4490 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4492 if (!ice_is_vsi_valid(hw, vsi_handle))
4493 return ICE_ERR_PARAM;
4494 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4496 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4498 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4499 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4500 new_fltr.l_data.mac_vlan.vlan_id = vid;
4501 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4503 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4504 recipe_id = ICE_SW_LKUP_PROMISC;
4507 /* Separate filters must be set for each direction/packet type
4508 * combination, so we will loop over the mask value, store the
4509 * individual type, and clear it out in the input mask as it
4512 while (promisc_mask) {
4513 struct ice_sw_recipe *recp_list;
4519 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4520 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4521 pkt_type = UCAST_FLTR;
4522 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4523 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4524 pkt_type = UCAST_FLTR;
4526 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4527 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4528 pkt_type = MCAST_FLTR;
4529 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4530 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4531 pkt_type = MCAST_FLTR;
4533 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4534 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4535 pkt_type = BCAST_FLTR;
4536 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4537 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4538 pkt_type = BCAST_FLTR;
4542 /* Check for VLAN promiscuous flag */
4543 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4544 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4545 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4546 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4550 /* Set filter DA based on packet type */
4551 mac_addr = new_fltr.l_data.mac.mac_addr;
4552 if (pkt_type == BCAST_FLTR) {
4553 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4554 } else if (pkt_type == MCAST_FLTR ||
4555 pkt_type == UCAST_FLTR) {
4556 /* Use the dummy ether header DA */
4557 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4558 ICE_NONDMA_TO_NONDMA);
4559 if (pkt_type == MCAST_FLTR)
4560 mac_addr[0] |= 0x1; /* Set multicast bit */
4563 /* Need to reset this to zero for all iterations */
4566 new_fltr.flag |= ICE_FLTR_TX;
4567 new_fltr.src = hw_vsi_id;
4569 new_fltr.flag |= ICE_FLTR_RX;
4570 new_fltr.src = hw->port_info->lport;
4573 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4574 new_fltr.vsi_handle = vsi_handle;
4575 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4576 f_list_entry.fltr_info = new_fltr;
4577 recp_list = &hw->switch_info->recp_list[recipe_id];
4579 status = ice_add_rule_internal(hw, recp_list,
4580 hw->port_info->lport,
4582 if (status != ICE_SUCCESS)
4583 goto set_promisc_exit;
4591 * ice_set_vlan_vsi_promisc
4592 * @hw: pointer to the hardware structure
4593 * @vsi_handle: VSI handle to configure
4594 * @promisc_mask: mask of promiscuous config bits
4595 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4597 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4600 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4601 bool rm_vlan_promisc)
4603 struct ice_switch_info *sw = hw->switch_info;
4604 struct ice_fltr_list_entry *list_itr, *tmp;
4605 struct LIST_HEAD_TYPE vsi_list_head;
4606 struct LIST_HEAD_TYPE *vlan_head;
4607 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4608 enum ice_status status;
4611 INIT_LIST_HEAD(&vsi_list_head);
4612 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4613 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4614 ice_acquire_lock(vlan_lock);
4615 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4617 ice_release_lock(vlan_lock);
4619 goto free_fltr_list;
4621 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4623 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4624 if (rm_vlan_promisc)
4625 status = ice_clear_vsi_promisc(hw, vsi_handle,
4626 promisc_mask, vlan_id);
4628 status = ice_set_vsi_promisc(hw, vsi_handle,
4629 promisc_mask, vlan_id);
4635 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4636 ice_fltr_list_entry, list_entry) {
4637 LIST_DEL(&list_itr->list_entry);
4638 ice_free(hw, list_itr);
4644 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4645 * @hw: pointer to the hardware structure
4646 * @vsi_handle: VSI handle to remove filters from
4647 * @recp_list: recipe list from which function remove fltr
4648 * @lkup: switch rule filter lookup type
4651 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4652 struct ice_sw_recipe *recp_list,
4653 enum ice_sw_lkup_type lkup)
4655 struct ice_fltr_list_entry *fm_entry;
4656 struct LIST_HEAD_TYPE remove_list_head;
4657 struct LIST_HEAD_TYPE *rule_head;
4658 struct ice_fltr_list_entry *tmp;
4659 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4660 enum ice_status status;
4662 INIT_LIST_HEAD(&remove_list_head);
4663 rule_lock = &recp_list[lkup].filt_rule_lock;
4664 rule_head = &recp_list[lkup].filt_rules;
4665 ice_acquire_lock(rule_lock);
4666 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4668 ice_release_lock(rule_lock);
4673 case ICE_SW_LKUP_MAC:
4674 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4676 case ICE_SW_LKUP_VLAN:
4677 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4679 case ICE_SW_LKUP_PROMISC:
4680 case ICE_SW_LKUP_PROMISC_VLAN:
4681 ice_remove_promisc(hw, lkup, &remove_list_head);
4683 case ICE_SW_LKUP_MAC_VLAN:
4684 ice_remove_mac_vlan(hw, &remove_list_head);
4686 case ICE_SW_LKUP_ETHERTYPE:
4687 case ICE_SW_LKUP_ETHERTYPE_MAC:
4688 ice_remove_eth_mac(hw, &remove_list_head);
4690 case ICE_SW_LKUP_DFLT:
4691 ice_debug(hw, ICE_DBG_SW,
4692 "Remove filters for this lookup type hasn't been implemented yet\n");
4694 case ICE_SW_LKUP_LAST:
4695 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4699 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4700 ice_fltr_list_entry, list_entry) {
4701 LIST_DEL(&fm_entry->list_entry);
4702 ice_free(hw, fm_entry);
4707 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4708 * @hw: pointer to the hardware structure
4709 * @vsi_handle: VSI handle to remove filters from
4710 * @sw: pointer to switch info struct
4713 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4714 struct ice_switch_info *sw)
4716 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4718 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4719 sw->recp_list, ICE_SW_LKUP_MAC);
4720 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4721 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4722 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4723 sw->recp_list, ICE_SW_LKUP_PROMISC);
4724 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4725 sw->recp_list, ICE_SW_LKUP_VLAN);
4726 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4727 sw->recp_list, ICE_SW_LKUP_DFLT);
4728 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4729 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4730 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4731 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4732 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4733 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4737 * ice_remove_vsi_fltr - Remove all filters for a VSI
4738 * @hw: pointer to the hardware structure
4739 * @vsi_handle: VSI handle to remove filters from
4741 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4743 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4747 * ice_alloc_res_cntr - allocating resource counter
4748 * @hw: pointer to the hardware structure
4749 * @type: type of resource
4750 * @alloc_shared: if set it is shared else dedicated
4751 * @num_items: number of entries requested for FD resource type
4752 * @counter_id: counter index returned by AQ call
4755 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4758 struct ice_aqc_alloc_free_res_elem *buf;
4759 enum ice_status status;
4762 /* Allocate resource */
4763 buf_len = sizeof(*buf);
4764 buf = (struct ice_aqc_alloc_free_res_elem *)
4765 ice_malloc(hw, buf_len);
4767 return ICE_ERR_NO_MEMORY;
4769 buf->num_elems = CPU_TO_LE16(num_items);
4770 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4771 ICE_AQC_RES_TYPE_M) | alloc_shared);
4773 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4774 ice_aqc_opc_alloc_res, NULL);
4778 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4786 * ice_free_res_cntr - free resource counter
4787 * @hw: pointer to the hardware structure
4788 * @type: type of resource
4789 * @alloc_shared: if set it is shared else dedicated
4790 * @num_items: number of entries to be freed for FD resource type
4791 * @counter_id: counter ID resource which needs to be freed
4794 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4797 struct ice_aqc_alloc_free_res_elem *buf;
4798 enum ice_status status;
4802 buf_len = sizeof(*buf);
4803 buf = (struct ice_aqc_alloc_free_res_elem *)
4804 ice_malloc(hw, buf_len);
4806 return ICE_ERR_NO_MEMORY;
4808 buf->num_elems = CPU_TO_LE16(num_items);
4809 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4810 ICE_AQC_RES_TYPE_M) | alloc_shared);
4811 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4813 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4814 ice_aqc_opc_free_res, NULL);
4816 ice_debug(hw, ICE_DBG_SW,
4817 "counter resource could not be freed\n");
4824 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4825 * @hw: pointer to the hardware structure
4826 * @counter_id: returns counter index
4828 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4830 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4831 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4836 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4837 * @hw: pointer to the hardware structure
4838 * @counter_id: counter index to be freed
4840 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4842 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4843 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4848 * ice_alloc_res_lg_act - add large action resource
4849 * @hw: pointer to the hardware structure
4850 * @l_id: large action ID to fill it in
4851 * @num_acts: number of actions to hold with a large action entry
4853 static enum ice_status
4854 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4856 struct ice_aqc_alloc_free_res_elem *sw_buf;
4857 enum ice_status status;
4860 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4861 return ICE_ERR_PARAM;
4863 /* Allocate resource for large action */
4864 buf_len = sizeof(*sw_buf);
4865 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4866 ice_malloc(hw, buf_len);
4868 return ICE_ERR_NO_MEMORY;
4870 sw_buf->num_elems = CPU_TO_LE16(1);
4872 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4873 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4874 * If num_acts is greater than 2, then use
4875 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4876 * The num_acts cannot exceed 4. This was ensured at the
4877 * beginning of the function.
4880 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4881 else if (num_acts == 2)
4882 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4884 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4886 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4887 ice_aqc_opc_alloc_res, NULL);
4889 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4891 ice_free(hw, sw_buf);
4896 * ice_add_mac_with_sw_marker - add filter with sw marker
4897 * @hw: pointer to the hardware structure
4898 * @f_info: filter info structure containing the MAC filter information
4899 * @sw_marker: sw marker to tag the Rx descriptor with
4902 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4905 struct ice_fltr_mgmt_list_entry *m_entry;
4906 struct ice_fltr_list_entry fl_info;
4907 struct ice_sw_recipe *recp_list;
4908 struct LIST_HEAD_TYPE l_head;
4909 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4910 enum ice_status ret;
4914 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4915 return ICE_ERR_PARAM;
4917 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4918 return ICE_ERR_PARAM;
4920 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4921 return ICE_ERR_PARAM;
4923 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4924 return ICE_ERR_PARAM;
4925 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4927 /* Add filter if it doesn't exist so then the adding of large
4928 * action always results in update
4931 INIT_LIST_HEAD(&l_head);
4932 fl_info.fltr_info = *f_info;
4933 LIST_ADD(&fl_info.list_entry, &l_head);
4935 entry_exists = false;
4936 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4937 hw->port_info->lport);
4938 if (ret == ICE_ERR_ALREADY_EXISTS)
4939 entry_exists = true;
4943 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4944 rule_lock = &recp_list->filt_rule_lock;
4945 ice_acquire_lock(rule_lock);
4946 /* Get the book keeping entry for the filter */
4947 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4951 /* If counter action was enabled for this rule then don't enable
4952 * sw marker large action
4954 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4955 ret = ICE_ERR_PARAM;
4959 /* if same marker was added before */
4960 if (m_entry->sw_marker_id == sw_marker) {
4961 ret = ICE_ERR_ALREADY_EXISTS;
4965 /* Allocate a hardware table entry to hold large act. Three actions
4966 * for marker based large action
4968 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4972 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4975 /* Update the switch rule to add the marker action */
4976 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4978 ice_release_lock(rule_lock);
4983 ice_release_lock(rule_lock);
4984 /* only remove entry if it did not exist previously */
4986 ret = ice_remove_mac(hw, &l_head);
4992 * ice_add_mac_with_counter - add filter with counter enabled
4993 * @hw: pointer to the hardware structure
4994 * @f_info: pointer to filter info structure containing the MAC filter
4998 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5000 struct ice_fltr_mgmt_list_entry *m_entry;
5001 struct ice_fltr_list_entry fl_info;
5002 struct ice_sw_recipe *recp_list;
5003 struct LIST_HEAD_TYPE l_head;
5004 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5005 enum ice_status ret;
5010 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5011 return ICE_ERR_PARAM;
5013 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5014 return ICE_ERR_PARAM;
5016 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5017 return ICE_ERR_PARAM;
5018 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5019 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5021 entry_exist = false;
5023 rule_lock = &recp_list->filt_rule_lock;
5025 /* Add filter if it doesn't exist so then the adding of large
5026 * action always results in update
5028 INIT_LIST_HEAD(&l_head);
5030 fl_info.fltr_info = *f_info;
5031 LIST_ADD(&fl_info.list_entry, &l_head);
5033 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5034 hw->port_info->lport);
5035 if (ret == ICE_ERR_ALREADY_EXISTS)
5040 ice_acquire_lock(rule_lock);
5041 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5043 ret = ICE_ERR_BAD_PTR;
5047 /* Don't enable counter for a filter for which sw marker was enabled */
5048 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5049 ret = ICE_ERR_PARAM;
5053 /* If a counter was already enabled then don't need to add again */
5054 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5055 ret = ICE_ERR_ALREADY_EXISTS;
5059 /* Allocate a hardware table entry to VLAN counter */
5060 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5064 /* Allocate a hardware table entry to hold large act. Two actions for
5065 * counter based large action
5067 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5071 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5074 /* Update the switch rule to add the counter action */
5075 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5077 ice_release_lock(rule_lock);
5082 ice_release_lock(rule_lock);
5083 /* only remove entry if it did not exist previously */
5085 ret = ice_remove_mac(hw, &l_head);
5090 /* This is mapping table entry that maps every word within a given protocol
5091 * structure to the real byte offset as per the specification of that
5093 * for example dst address is 3 words in ethertype header and corresponding
5094 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5095 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5096 * matching entry describing its field. This needs to be updated if new
5097 * structure is added to that union.
5099 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5100 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5101 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5102 { ICE_ETYPE_OL, { 0 } },
5103 { ICE_VLAN_OFOS, { 0, 2 } },
5104 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5105 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5106 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5107 26, 28, 30, 32, 34, 36, 38 } },
5108 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5109 26, 28, 30, 32, 34, 36, 38 } },
5110 { ICE_TCP_IL, { 0, 2 } },
5111 { ICE_UDP_OF, { 0, 2 } },
5112 { ICE_UDP_ILOS, { 0, 2 } },
5113 { ICE_SCTP_IL, { 0, 2 } },
5114 { ICE_VXLAN, { 8, 10, 12, 14 } },
5115 { ICE_GENEVE, { 8, 10, 12, 14 } },
5116 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5117 { ICE_NVGRE, { 0, 2, 4, 6 } },
5118 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5119 { ICE_PPPOE, { 0, 2, 4, 6 } },
5120 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5121 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5122 { ICE_ESP, { 0, 2, 4, 6 } },
5123 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5124 { ICE_NAT_T, { 8, 10, 12, 14 } },
5127 /* The following table describes preferred grouping of recipes.
5128 * If a recipe that needs to be programmed is a superset or matches one of the
5129 * following combinations, then the recipe needs to be chained as per the
5133 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5134 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5135 { ICE_MAC_IL, ICE_MAC_IL_HW },
5136 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5137 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5138 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5139 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5140 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5141 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5142 { ICE_TCP_IL, ICE_TCP_IL_HW },
5143 { ICE_UDP_OF, ICE_UDP_OF_HW },
5144 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5145 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5146 { ICE_VXLAN, ICE_UDP_OF_HW },
5147 { ICE_GENEVE, ICE_UDP_OF_HW },
5148 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5149 { ICE_NVGRE, ICE_GRE_OF_HW },
5150 { ICE_GTP, ICE_UDP_OF_HW },
5151 { ICE_PPPOE, ICE_PPPOE_HW },
5152 { ICE_PFCP, ICE_UDP_ILOS_HW },
5153 { ICE_L2TPV3, ICE_L2TPV3_HW },
5154 { ICE_ESP, ICE_ESP_HW },
5155 { ICE_AH, ICE_AH_HW },
5156 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5160 * ice_find_recp - find a recipe
5161 * @hw: pointer to the hardware structure
5162 * @lkup_exts: extension sequence to match
5164 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5166 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5167 enum ice_sw_tunnel_type tun_type)
5169 bool refresh_required = true;
5170 struct ice_sw_recipe *recp;
5173 /* Walk through existing recipes to find a match */
5174 recp = hw->switch_info->recp_list;
5175 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5176 /* If recipe was not created for this ID, in SW bookkeeping,
5177 * check if FW has an entry for this recipe. If the FW has an
5178 * entry update it in our SW bookkeeping and continue with the
5181 if (!recp[i].recp_created)
5182 if (ice_get_recp_frm_fw(hw,
5183 hw->switch_info->recp_list, i,
5187 /* Skip inverse action recipes */
5188 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5189 ICE_AQ_RECIPE_ACT_INV_ACT)
5192 /* if number of words we are looking for match */
5193 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5194 struct ice_fv_word *a = lkup_exts->fv_words;
5195 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
5196 u16 *c = recp[i].lkup_exts.field_mask;
5197 u16 *d = lkup_exts->field_mask;
5201 for (p = 0; p < lkup_exts->n_val_words; p++) {
5202 for (q = 0; q < recp[i].lkup_exts.n_val_words;
5204 if (a[p].off == b[q].off &&
5205 a[p].prot_id == b[q].prot_id &&
5207 /* Found the "p"th word in the
5212 /* After walking through all the words in the
5213 * "i"th recipe if "p"th word was not found then
5214 * this recipe is not what we are looking for.
5215 * So break out from this loop and try the next
5218 if (q >= recp[i].lkup_exts.n_val_words) {
5223 /* If for "i"th recipe the found was never set to false
5224 * then it means we found our match
5226 if (ice_is_prof_rule(tun_type) &&
5227 tun_type == recp[i].tun_type && found)
5228 return i; /* Return the recipe ID */
5229 else if (!ice_is_prof_rule(tun_type) && found)
5230 return i; /* Return the recipe ID */
5233 return ICE_MAX_NUM_RECIPES;
5237 * ice_prot_type_to_id - get protocol ID from protocol type
5238 * @type: protocol type
5239 * @id: pointer to variable that will receive the ID
5241 * Returns true if found, false otherwise
5243 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5247 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5248 if (ice_prot_id_tbl[i].type == type) {
5249 *id = ice_prot_id_tbl[i].protocol_id;
5256 * ice_find_valid_words - count valid words
5257 * @rule: advanced rule with lookup information
5258 * @lkup_exts: byte offset extractions of the words that are valid
5260 * calculate valid words in a lookup rule using mask value
5263 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5264 struct ice_prot_lkup_ext *lkup_exts)
5266 u8 j, word, prot_id, ret_val;
5268 if (!ice_prot_type_to_id(rule->type, &prot_id))
5271 word = lkup_exts->n_val_words;
5273 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5274 if (((u16 *)&rule->m_u)[j] &&
5275 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5276 /* No more space to accommodate */
5277 if (word >= ICE_MAX_CHAIN_WORDS)
5279 lkup_exts->fv_words[word].off =
5280 ice_prot_ext[rule->type].offs[j];
5281 lkup_exts->fv_words[word].prot_id =
5282 ice_prot_id_tbl[rule->type].protocol_id;
5283 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5287 ret_val = word - lkup_exts->n_val_words;
5288 lkup_exts->n_val_words = word;
5294 * ice_create_first_fit_recp_def - Create a recipe grouping
5295 * @hw: pointer to the hardware structure
5296 * @lkup_exts: an array of protocol header extractions
5297 * @rg_list: pointer to a list that stores new recipe groups
5298 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5300 * Using first fit algorithm, take all the words that are still not done
5301 * and start grouping them in 4-word groups. Each group makes up one
5304 static enum ice_status
5305 ice_create_first_fit_recp_def(struct ice_hw *hw,
5306 struct ice_prot_lkup_ext *lkup_exts,
5307 struct LIST_HEAD_TYPE *rg_list,
5310 struct ice_pref_recipe_group *grp = NULL;
5315 if (!lkup_exts->n_val_words) {
5316 struct ice_recp_grp_entry *entry;
5318 entry = (struct ice_recp_grp_entry *)
5319 ice_malloc(hw, sizeof(*entry));
5321 return ICE_ERR_NO_MEMORY;
5322 LIST_ADD(&entry->l_entry, rg_list);
5323 grp = &entry->r_group;
5325 grp->n_val_pairs = 0;
5328 /* Walk through every word in the rule to check if it is not done. If so
5329 * then this word needs to be part of a new recipe.
5331 for (j = 0; j < lkup_exts->n_val_words; j++)
5332 if (!ice_is_bit_set(lkup_exts->done, j)) {
5334 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5335 struct ice_recp_grp_entry *entry;
5337 entry = (struct ice_recp_grp_entry *)
5338 ice_malloc(hw, sizeof(*entry));
5340 return ICE_ERR_NO_MEMORY;
5341 LIST_ADD(&entry->l_entry, rg_list);
5342 grp = &entry->r_group;
5346 grp->pairs[grp->n_val_pairs].prot_id =
5347 lkup_exts->fv_words[j].prot_id;
5348 grp->pairs[grp->n_val_pairs].off =
5349 lkup_exts->fv_words[j].off;
5350 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5358 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5359 * @hw: pointer to the hardware structure
5360 * @fv_list: field vector with the extraction sequence information
5361 * @rg_list: recipe groupings with protocol-offset pairs
5363 * Helper function to fill in the field vector indices for protocol-offset
5364 * pairs. These indexes are then ultimately programmed into a recipe.
5366 static enum ice_status
5367 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5368 struct LIST_HEAD_TYPE *rg_list)
5370 struct ice_sw_fv_list_entry *fv;
5371 struct ice_recp_grp_entry *rg;
5372 struct ice_fv_word *fv_ext;
5374 if (LIST_EMPTY(fv_list))
5377 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5378 fv_ext = fv->fv_ptr->ew;
5380 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5383 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5384 struct ice_fv_word *pr;
5389 pr = &rg->r_group.pairs[i];
5390 mask = rg->r_group.mask[i];
5392 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5393 if (fv_ext[j].prot_id == pr->prot_id &&
5394 fv_ext[j].off == pr->off) {
5397 /* Store index of field vector */
5399 /* Mask is given by caller as big
5400 * endian, but sent to FW as little
5403 rg->fv_mask[i] = mask << 8 | mask >> 8;
5407 /* Protocol/offset could not be found, caller gave an
5411 return ICE_ERR_PARAM;
5419 * ice_find_free_recp_res_idx - find free result indexes for recipe
5420 * @hw: pointer to hardware structure
5421 * @profiles: bitmap of profiles that will be associated with the new recipe
5422 * @free_idx: pointer to variable to receive the free index bitmap
5424 * The algorithm used here is:
5425 * 1. When creating a new recipe, create a set P which contains all
5426 * Profiles that will be associated with our new recipe
5428 * 2. For each Profile p in set P:
5429 * a. Add all recipes associated with Profile p into set R
5430 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5431 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5432 * i. Or just assume they all have the same possible indexes:
5434 * i.e., PossibleIndexes = 0x0000F00000000000
5436 * 3. For each Recipe r in set R:
5437 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5438 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5440 * FreeIndexes will contain the bits indicating the indexes free for use,
5441 * then the code needs to update the recipe[r].used_result_idx_bits to
5442 * indicate which indexes were selected for use by this recipe.
5445 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5446 ice_bitmap_t *free_idx)
5448 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5449 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5450 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5454 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5455 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5456 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5457 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5459 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5460 ice_set_bit(count, possible_idx);
5462 /* For each profile we are going to associate the recipe with, add the
5463 * recipes that are associated with that profile. This will give us
5464 * the set of recipes that our recipe may collide with. Also, determine
5465 * what possible result indexes are usable given this set of profiles.
5468 while (ICE_MAX_NUM_PROFILES >
5469 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5470 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5471 ICE_MAX_NUM_RECIPES);
5472 ice_and_bitmap(possible_idx, possible_idx,
5473 hw->switch_info->prof_res_bm[bit],
5478 /* For each recipe that our new recipe may collide with, determine
5479 * which indexes have been used.
5481 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5482 if (ice_is_bit_set(recipes, bit)) {
5483 ice_or_bitmap(used_idx, used_idx,
5484 hw->switch_info->recp_list[bit].res_idxs,
5488 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5490 /* return number of free indexes */
5493 while (ICE_MAX_FV_WORDS >
5494 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5503 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5504 * @hw: pointer to hardware structure
5505 * @rm: recipe management list entry
5506 * @match_tun: if field vector index for tunnel needs to be programmed
5507 * @profiles: bitmap of profiles that will be assocated.
5509 static enum ice_status
5510 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5511 bool match_tun, ice_bitmap_t *profiles)
5513 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5514 struct ice_aqc_recipe_data_elem *tmp;
5515 struct ice_aqc_recipe_data_elem *buf;
5516 struct ice_recp_grp_entry *entry;
5517 enum ice_status status;
5523 /* When more than one recipe are required, another recipe is needed to
5524 * chain them together. Matching a tunnel metadata ID takes up one of
5525 * the match fields in the chaining recipe reducing the number of
5526 * chained recipes by one.
5528 /* check number of free result indices */
5529 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5530 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5532 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5533 free_res_idx, rm->n_grp_count);
5535 if (rm->n_grp_count > 1) {
5536 if (rm->n_grp_count > free_res_idx)
5537 return ICE_ERR_MAX_LIMIT;
5542 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5543 return ICE_ERR_MAX_LIMIT;
5545 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5546 ICE_MAX_NUM_RECIPES,
5549 return ICE_ERR_NO_MEMORY;
5551 buf = (struct ice_aqc_recipe_data_elem *)
5552 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5554 status = ICE_ERR_NO_MEMORY;
5558 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5559 recipe_count = ICE_MAX_NUM_RECIPES;
5560 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5562 if (status || recipe_count == 0)
5565 /* Allocate the recipe resources, and configure them according to the
5566 * match fields from protocol headers and extracted field vectors.
5568 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5569 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5572 status = ice_alloc_recipe(hw, &entry->rid);
5576 /* Clear the result index of the located recipe, as this will be
5577 * updated, if needed, later in the recipe creation process.
5579 tmp[0].content.result_indx = 0;
5581 buf[recps] = tmp[0];
5582 buf[recps].recipe_indx = (u8)entry->rid;
5583 /* if the recipe is a non-root recipe RID should be programmed
5584 * as 0 for the rules to be applied correctly.
5586 buf[recps].content.rid = 0;
5587 ice_memset(&buf[recps].content.lkup_indx, 0,
5588 sizeof(buf[recps].content.lkup_indx),
5591 /* All recipes use look-up index 0 to match switch ID. */
5592 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5593 buf[recps].content.mask[0] =
5594 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5595 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5598 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5599 buf[recps].content.lkup_indx[i] = 0x80;
5600 buf[recps].content.mask[i] = 0;
5603 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5604 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5605 buf[recps].content.mask[i + 1] =
5606 CPU_TO_LE16(entry->fv_mask[i]);
5609 if (rm->n_grp_count > 1) {
5610 /* Checks to see if there really is a valid result index
5613 if (chain_idx >= ICE_MAX_FV_WORDS) {
5614 ice_debug(hw, ICE_DBG_SW,
5615 "No chain index available\n");
5616 status = ICE_ERR_MAX_LIMIT;
5620 entry->chain_idx = chain_idx;
5621 buf[recps].content.result_indx =
5622 ICE_AQ_RECIPE_RESULT_EN |
5623 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5624 ICE_AQ_RECIPE_RESULT_DATA_M);
5625 ice_clear_bit(chain_idx, result_idx_bm);
5626 chain_idx = ice_find_first_bit(result_idx_bm,
5630 /* fill recipe dependencies */
5631 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5632 ICE_MAX_NUM_RECIPES);
5633 ice_set_bit(buf[recps].recipe_indx,
5634 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5635 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5639 if (rm->n_grp_count == 1) {
5640 rm->root_rid = buf[0].recipe_indx;
5641 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5642 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5643 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5644 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5645 sizeof(buf[0].recipe_bitmap),
5646 ICE_NONDMA_TO_NONDMA);
5648 status = ICE_ERR_BAD_PTR;
5651 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5652 * the recipe which is getting created if specified
5653 * by user. Usually any advanced switch filter, which results
5654 * into new extraction sequence, ended up creating a new recipe
5655 * of type ROOT and usually recipes are associated with profiles
5656 * Switch rule referreing newly created recipe, needs to have
5657 * either/or 'fwd' or 'join' priority, otherwise switch rule
5658 * evaluation will not happen correctly. In other words, if
5659 * switch rule to be evaluated on priority basis, then recipe
5660 * needs to have priority, otherwise it will be evaluated last.
5662 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5664 struct ice_recp_grp_entry *last_chain_entry;
5667 /* Allocate the last recipe that will chain the outcomes of the
5668 * other recipes together
5670 status = ice_alloc_recipe(hw, &rid);
5674 buf[recps].recipe_indx = (u8)rid;
5675 buf[recps].content.rid = (u8)rid;
5676 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5677 /* the new entry created should also be part of rg_list to
5678 * make sure we have complete recipe
5680 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5681 sizeof(*last_chain_entry));
5682 if (!last_chain_entry) {
5683 status = ICE_ERR_NO_MEMORY;
5686 last_chain_entry->rid = rid;
5687 ice_memset(&buf[recps].content.lkup_indx, 0,
5688 sizeof(buf[recps].content.lkup_indx),
5690 /* All recipes use look-up index 0 to match switch ID. */
5691 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5692 buf[recps].content.mask[0] =
5693 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5694 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5695 buf[recps].content.lkup_indx[i] =
5696 ICE_AQ_RECIPE_LKUP_IGNORE;
5697 buf[recps].content.mask[i] = 0;
5701 /* update r_bitmap with the recp that is used for chaining */
5702 ice_set_bit(rid, rm->r_bitmap);
5703 /* this is the recipe that chains all the other recipes so it
5704 * should not have a chaining ID to indicate the same
5706 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5707 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5709 last_chain_entry->fv_idx[i] = entry->chain_idx;
5710 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5711 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5712 ice_set_bit(entry->rid, rm->r_bitmap);
5714 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5715 if (sizeof(buf[recps].recipe_bitmap) >=
5716 sizeof(rm->r_bitmap)) {
5717 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5718 sizeof(buf[recps].recipe_bitmap),
5719 ICE_NONDMA_TO_NONDMA);
5721 status = ICE_ERR_BAD_PTR;
5724 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5726 /* To differentiate among different UDP tunnels, a meta data ID
5730 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5731 buf[recps].content.mask[i] =
5732 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5736 rm->root_rid = (u8)rid;
5738 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5742 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5743 ice_release_change_lock(hw);
5747 /* Every recipe that just got created add it to the recipe
5750 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5751 struct ice_switch_info *sw = hw->switch_info;
5752 bool is_root, idx_found = false;
5753 struct ice_sw_recipe *recp;
5754 u16 idx, buf_idx = 0;
5756 /* find buffer index for copying some data */
5757 for (idx = 0; idx < rm->n_grp_count; idx++)
5758 if (buf[idx].recipe_indx == entry->rid) {
5764 status = ICE_ERR_OUT_OF_RANGE;
5768 recp = &sw->recp_list[entry->rid];
5769 is_root = (rm->root_rid == entry->rid);
5770 recp->is_root = is_root;
5772 recp->root_rid = entry->rid;
5773 recp->big_recp = (is_root && rm->n_grp_count > 1);
5775 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5776 entry->r_group.n_val_pairs *
5777 sizeof(struct ice_fv_word),
5778 ICE_NONDMA_TO_NONDMA);
5780 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5781 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5783 /* Copy non-result fv index values and masks to recipe. This
5784 * call will also update the result recipe bitmask.
5786 ice_collect_result_idx(&buf[buf_idx], recp);
5788 /* for non-root recipes, also copy to the root, this allows
5789 * easier matching of a complete chained recipe
5792 ice_collect_result_idx(&buf[buf_idx],
5793 &sw->recp_list[rm->root_rid]);
5795 recp->n_ext_words = entry->r_group.n_val_pairs;
5796 recp->chain_idx = entry->chain_idx;
5797 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5798 recp->n_grp_count = rm->n_grp_count;
5799 recp->tun_type = rm->tun_type;
5800 recp->recp_created = true;
5815 * ice_create_recipe_group - creates recipe group
5816 * @hw: pointer to hardware structure
5817 * @rm: recipe management list entry
5818 * @lkup_exts: lookup elements
5820 static enum ice_status
5821 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5822 struct ice_prot_lkup_ext *lkup_exts)
5824 enum ice_status status;
5827 rm->n_grp_count = 0;
5829 /* Create recipes for words that are marked not done by packing them
5832 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5833 &rm->rg_list, &recp_count);
5835 rm->n_grp_count += recp_count;
5836 rm->n_ext_words = lkup_exts->n_val_words;
5837 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5838 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5839 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5840 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5847 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5848 * @hw: pointer to hardware structure
5849 * @lkups: lookup elements or match criteria for the advanced recipe, one
5850 * structure per protocol header
5851 * @lkups_cnt: number of protocols
5852 * @bm: bitmap of field vectors to consider
5853 * @fv_list: pointer to a list that holds the returned field vectors
5855 static enum ice_status
5856 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5857 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5859 enum ice_status status;
5866 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5868 return ICE_ERR_NO_MEMORY;
5870 for (i = 0; i < lkups_cnt; i++)
5871 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5872 status = ICE_ERR_CFG;
5876 /* Find field vectors that include all specified protocol types */
5877 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5880 ice_free(hw, prot_ids);
5885 * ice_add_special_words - Add words that are not protocols, such as metadata
5886 * @rinfo: other information regarding the rule e.g. priority and action info
5887 * @lkup_exts: lookup word structure
5889 static enum ice_status
5890 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5891 struct ice_prot_lkup_ext *lkup_exts)
5893 /* If this is a tunneled packet, then add recipe index to match the
5894 * tunnel bit in the packet metadata flags.
5896 if (rinfo->tun_type != ICE_NON_TUN) {
5897 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5898 u8 word = lkup_exts->n_val_words++;
5900 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5901 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5903 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5905 return ICE_ERR_MAX_LIMIT;
5912 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5913 * @hw: pointer to hardware structure
5914 * @rinfo: other information regarding the rule e.g. priority and action info
5915 * @bm: pointer to memory for returning the bitmap of field vectors
5918 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5921 enum ice_prof_type prof_type;
5923 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
5925 switch (rinfo->tun_type) {
5927 prof_type = ICE_PROF_NON_TUN;
5929 case ICE_ALL_TUNNELS:
5930 prof_type = ICE_PROF_TUN_ALL;
5932 case ICE_SW_TUN_VXLAN_GPE:
5933 case ICE_SW_TUN_GENEVE:
5934 case ICE_SW_TUN_VXLAN:
5935 case ICE_SW_TUN_UDP:
5936 case ICE_SW_TUN_GTP:
5937 prof_type = ICE_PROF_TUN_UDP;
5939 case ICE_SW_TUN_NVGRE:
5940 prof_type = ICE_PROF_TUN_GRE;
5942 case ICE_SW_TUN_PPPOE:
5943 prof_type = ICE_PROF_TUN_PPPOE;
5945 case ICE_SW_TUN_PROFID_IPV6_ESP:
5946 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
5948 case ICE_SW_TUN_PROFID_IPV6_AH:
5949 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
5951 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
5952 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
5954 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
5955 case ICE_SW_TUN_IPV6_NAT_T:
5956 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
5958 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
5959 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
5961 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
5962 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
5964 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
5965 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
5967 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
5968 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
5970 case ICE_SW_TUN_IPV4_NAT_T:
5971 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
5973 case ICE_SW_TUN_AND_NON_TUN:
5975 prof_type = ICE_PROF_ALL;
5979 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5983 * ice_is_prof_rule - determine if rule type is a profile rule
5984 * @type: the rule type
5986 * if the rule type is a profile rule, that means that there no field value
5987 * match required, in this case just a profile hit is required.
5989 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
5992 case ICE_SW_TUN_PROFID_IPV6_ESP:
5993 case ICE_SW_TUN_PROFID_IPV6_AH:
5994 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
5995 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
5996 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
5997 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
5998 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
5999 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6009 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6010 * @hw: pointer to hardware structure
6011 * @lkups: lookup elements or match criteria for the advanced recipe, one
6012 * structure per protocol header
6013 * @lkups_cnt: number of protocols
6014 * @rinfo: other information regarding the rule e.g. priority and action info
6015 * @rid: return the recipe ID of the recipe created
6017 static enum ice_status
6018 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6019 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6021 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6022 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6023 struct ice_prot_lkup_ext *lkup_exts;
6024 struct ice_recp_grp_entry *r_entry;
6025 struct ice_sw_fv_list_entry *fvit;
6026 struct ice_recp_grp_entry *r_tmp;
6027 struct ice_sw_fv_list_entry *tmp;
6028 enum ice_status status = ICE_SUCCESS;
6029 struct ice_sw_recipe *rm;
6030 bool match_tun = false;
6033 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6034 return ICE_ERR_PARAM;
6036 lkup_exts = (struct ice_prot_lkup_ext *)
6037 ice_malloc(hw, sizeof(*lkup_exts));
6039 return ICE_ERR_NO_MEMORY;
6041 /* Determine the number of words to be matched and if it exceeds a
6042 * recipe's restrictions
6044 for (i = 0; i < lkups_cnt; i++) {
6047 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6048 status = ICE_ERR_CFG;
6049 goto err_free_lkup_exts;
6052 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6054 status = ICE_ERR_CFG;
6055 goto err_free_lkup_exts;
6059 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6061 status = ICE_ERR_NO_MEMORY;
6062 goto err_free_lkup_exts;
6065 /* Get field vectors that contain fields extracted from all the protocol
6066 * headers being programmed.
6068 INIT_LIST_HEAD(&rm->fv_list);
6069 INIT_LIST_HEAD(&rm->rg_list);
6071 /* Get bitmap of field vectors (profiles) that are compatible with the
6072 * rule request; only these will be searched in the subsequent call to
6075 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6077 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6081 /* Group match words into recipes using preferred recipe grouping
6084 status = ice_create_recipe_group(hw, rm, lkup_exts);
6088 /* There is only profile for UDP tunnels. So, it is necessary to use a
6089 * metadata ID flag to differentiate different tunnel types. A separate
6090 * recipe needs to be used for the metadata.
6092 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
6093 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
6094 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
6097 /* set the recipe priority if specified */
6098 rm->priority = (u8)rinfo->priority;
6100 /* Find offsets from the field vector. Pick the first one for all the
6103 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6107 /* An empty FV list means to use all the profiles returned in the
6110 if (LIST_EMPTY(&rm->fv_list)) {
6113 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6114 if (ice_is_bit_set(fv_bitmap, j)) {
6115 struct ice_sw_fv_list_entry *fvl;
6117 fvl = (struct ice_sw_fv_list_entry *)
6118 ice_malloc(hw, sizeof(*fvl));
6122 fvl->profile_id = j;
6123 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6127 /* get bitmap of all profiles the recipe will be associated with */
6128 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6129 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6131 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6132 ice_set_bit((u16)fvit->profile_id, profiles);
6135 /* Create any special protocol/offset pairs, such as looking at tunnel
6136 * bits by extracting metadata
6138 status = ice_add_special_words(rinfo, lkup_exts);
6140 goto err_free_lkup_exts;
6142 /* Look for a recipe which matches our requested fv / mask list */
6143 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6144 if (*rid < ICE_MAX_NUM_RECIPES)
6145 /* Success if found a recipe that match the existing criteria */
6148 rm->tun_type = rinfo->tun_type;
6149 /* Recipe we need does not exist, add a recipe */
6150 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
6154 /* Associate all the recipes created with all the profiles in the
6155 * common field vector.
6157 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6159 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6162 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6163 (u8 *)r_bitmap, NULL);
6167 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6168 ICE_MAX_NUM_RECIPES);
6169 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6173 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6176 ice_release_change_lock(hw);
6181 /* Update profile to recipe bitmap array */
6182 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6183 ICE_MAX_NUM_RECIPES);
6185 /* Update recipe to profile bitmap array */
6186 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6187 if (ice_is_bit_set(r_bitmap, j))
6188 ice_set_bit((u16)fvit->profile_id,
6189 recipe_to_profile[j]);
6192 *rid = rm->root_rid;
6193 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6194 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6196 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6197 ice_recp_grp_entry, l_entry) {
6198 LIST_DEL(&r_entry->l_entry);
6199 ice_free(hw, r_entry);
6202 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6204 LIST_DEL(&fvit->list_entry);
6209 ice_free(hw, rm->root_buf);
6214 ice_free(hw, lkup_exts);
6220 * ice_find_dummy_packet - find dummy packet by tunnel type
6222 * @lkups: lookup elements or match criteria for the advanced recipe, one
6223 * structure per protocol header
6224 * @lkups_cnt: number of protocols
6225 * @tun_type: tunnel type from the match criteria
6226 * @pkt: dummy packet to fill according to filter match criteria
6227 * @pkt_len: packet length of dummy packet
6228 * @offsets: pointer to receive the pointer to the offsets for the packet
6231 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6232 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6234 const struct ice_dummy_pkt_offsets **offsets)
6236 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6240 for (i = 0; i < lkups_cnt; i++) {
6241 if (lkups[i].type == ICE_UDP_ILOS)
6243 else if (lkups[i].type == ICE_TCP_IL)
6245 else if (lkups[i].type == ICE_IPV6_OFOS)
6247 else if (lkups[i].type == ICE_VLAN_OFOS)
6249 else if (lkups[i].type == ICE_IPV4_OFOS &&
6250 lkups[i].h_u.ipv4_hdr.protocol ==
6251 ICE_IPV4_NVGRE_PROTO_ID &&
6252 lkups[i].m_u.ipv4_hdr.protocol ==
6255 else if (lkups[i].type == ICE_PPPOE &&
6256 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6257 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6258 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6261 else if (lkups[i].type == ICE_ETYPE_OL &&
6262 lkups[i].h_u.ethertype.ethtype_id ==
6263 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6264 lkups[i].m_u.ethertype.ethtype_id ==
6269 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6270 *pkt = dummy_ipv4_esp_pkt;
6271 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6272 *offsets = dummy_ipv4_esp_packet_offsets;
6276 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6277 *pkt = dummy_ipv6_esp_pkt;
6278 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6279 *offsets = dummy_ipv6_esp_packet_offsets;
6283 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6284 *pkt = dummy_ipv4_ah_pkt;
6285 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6286 *offsets = dummy_ipv4_ah_packet_offsets;
6290 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6291 *pkt = dummy_ipv6_ah_pkt;
6292 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6293 *offsets = dummy_ipv6_ah_packet_offsets;
6297 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6298 *pkt = dummy_ipv4_nat_pkt;
6299 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6300 *offsets = dummy_ipv4_nat_packet_offsets;
6304 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6305 *pkt = dummy_ipv6_nat_pkt;
6306 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6307 *offsets = dummy_ipv6_nat_packet_offsets;
6311 if (tun_type == ICE_SW_TUN_GTP) {
6312 *pkt = dummy_udp_gtp_packet;
6313 *pkt_len = sizeof(dummy_udp_gtp_packet);
6314 *offsets = dummy_udp_gtp_packet_offsets;
6317 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6318 *pkt = dummy_pppoe_ipv6_packet;
6319 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6320 *offsets = dummy_pppoe_packet_offsets;
6322 } else if (tun_type == ICE_SW_TUN_PPPOE) {
6323 *pkt = dummy_pppoe_ipv4_packet;
6324 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6325 *offsets = dummy_pppoe_packet_offsets;
6329 if (tun_type == ICE_ALL_TUNNELS) {
6330 *pkt = dummy_gre_udp_packet;
6331 *pkt_len = sizeof(dummy_gre_udp_packet);
6332 *offsets = dummy_gre_udp_packet_offsets;
6336 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6338 *pkt = dummy_gre_tcp_packet;
6339 *pkt_len = sizeof(dummy_gre_tcp_packet);
6340 *offsets = dummy_gre_tcp_packet_offsets;
6344 *pkt = dummy_gre_udp_packet;
6345 *pkt_len = sizeof(dummy_gre_udp_packet);
6346 *offsets = dummy_gre_udp_packet_offsets;
6350 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6351 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
6353 *pkt = dummy_udp_tun_tcp_packet;
6354 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6355 *offsets = dummy_udp_tun_tcp_packet_offsets;
6359 *pkt = dummy_udp_tun_udp_packet;
6360 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6361 *offsets = dummy_udp_tun_udp_packet_offsets;
6367 *pkt = dummy_vlan_udp_packet;
6368 *pkt_len = sizeof(dummy_vlan_udp_packet);
6369 *offsets = dummy_vlan_udp_packet_offsets;
6372 *pkt = dummy_udp_packet;
6373 *pkt_len = sizeof(dummy_udp_packet);
6374 *offsets = dummy_udp_packet_offsets;
6376 } else if (udp && ipv6) {
6378 *pkt = dummy_vlan_udp_ipv6_packet;
6379 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6380 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6383 *pkt = dummy_udp_ipv6_packet;
6384 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6385 *offsets = dummy_udp_ipv6_packet_offsets;
6387 } else if ((tcp && ipv6) || ipv6) {
6389 *pkt = dummy_vlan_tcp_ipv6_packet;
6390 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6391 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6394 *pkt = dummy_tcp_ipv6_packet;
6395 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6396 *offsets = dummy_tcp_ipv6_packet_offsets;
6401 *pkt = dummy_vlan_tcp_packet;
6402 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6403 *offsets = dummy_vlan_tcp_packet_offsets;
6405 *pkt = dummy_tcp_packet;
6406 *pkt_len = sizeof(dummy_tcp_packet);
6407 *offsets = dummy_tcp_packet_offsets;
6412 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6414 * @lkups: lookup elements or match criteria for the advanced recipe, one
6415 * structure per protocol header
6416 * @lkups_cnt: number of protocols
6417 * @s_rule: stores rule information from the match criteria
6418 * @dummy_pkt: dummy packet to fill according to filter match criteria
6419 * @pkt_len: packet length of dummy packet
6420 * @offsets: offset info for the dummy packet
6422 static enum ice_status
6423 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6424 struct ice_aqc_sw_rules_elem *s_rule,
6425 const u8 *dummy_pkt, u16 pkt_len,
6426 const struct ice_dummy_pkt_offsets *offsets)
6431 /* Start with a packet with a pre-defined/dummy content. Then, fill
6432 * in the header values to be looked up or matched.
6434 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6436 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6438 for (i = 0; i < lkups_cnt; i++) {
6439 enum ice_protocol_type type;
6440 u16 offset = 0, len = 0, j;
6443 /* find the start of this layer; it should be found since this
6444 * was already checked when search for the dummy packet
6446 type = lkups[i].type;
6447 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6448 if (type == offsets[j].type) {
6449 offset = offsets[j].offset;
6454 /* this should never happen in a correct calling sequence */
6456 return ICE_ERR_PARAM;
6458 switch (lkups[i].type) {
6461 len = sizeof(struct ice_ether_hdr);
6464 len = sizeof(struct ice_ethtype_hdr);
6467 len = sizeof(struct ice_vlan_hdr);
6471 len = sizeof(struct ice_ipv4_hdr);
6475 len = sizeof(struct ice_ipv6_hdr);
6480 len = sizeof(struct ice_l4_hdr);
6483 len = sizeof(struct ice_sctp_hdr);
6486 len = sizeof(struct ice_nvgre);
6491 len = sizeof(struct ice_udp_tnl_hdr);
6495 len = sizeof(struct ice_udp_gtp_hdr);
6498 len = sizeof(struct ice_pppoe_hdr);
6501 len = sizeof(struct ice_esp_hdr);
6504 len = sizeof(struct ice_nat_t_hdr);
6507 len = sizeof(struct ice_ah_hdr);
6510 return ICE_ERR_PARAM;
6513 /* the length should be a word multiple */
6514 if (len % ICE_BYTES_PER_WORD)
6517 /* We have the offset to the header start, the length, the
6518 * caller's header values and mask. Use this information to
6519 * copy the data into the dummy packet appropriately based on
6520 * the mask. Note that we need to only write the bits as
6521 * indicated by the mask to make sure we don't improperly write
6522 * over any significant packet data.
6524 for (j = 0; j < len / sizeof(u16); j++)
6525 if (((u16 *)&lkups[i].m_u)[j])
6526 ((u16 *)(pkt + offset))[j] =
6527 (((u16 *)(pkt + offset))[j] &
6528 ~((u16 *)&lkups[i].m_u)[j]) |
6529 (((u16 *)&lkups[i].h_u)[j] &
6530 ((u16 *)&lkups[i].m_u)[j]);
6533 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6539 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6540 * @hw: pointer to the hardware structure
6541 * @tun_type: tunnel type
6542 * @pkt: dummy packet to fill in
6543 * @offsets: offset info for the dummy packet
6545 static enum ice_status
6546 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6547 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6552 case ICE_SW_TUN_AND_NON_TUN:
6553 case ICE_SW_TUN_VXLAN_GPE:
6554 case ICE_SW_TUN_VXLAN:
6555 case ICE_SW_TUN_UDP:
6556 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6560 case ICE_SW_TUN_GENEVE:
6561 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6566 /* Nothing needs to be done for this tunnel type */
6570 /* Find the outer UDP protocol header and insert the port number */
6571 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6572 if (offsets[i].type == ICE_UDP_OF) {
6573 struct ice_l4_hdr *hdr;
6576 offset = offsets[i].offset;
6577 hdr = (struct ice_l4_hdr *)&pkt[offset];
6578 hdr->dst_port = CPU_TO_BE16(open_port);
6588 * ice_find_adv_rule_entry - Search a rule entry
6589 * @hw: pointer to the hardware structure
6590 * @lkups: lookup elements or match criteria for the advanced recipe, one
6591 * structure per protocol header
6592 * @lkups_cnt: number of protocols
6593 * @recp_id: recipe ID for which we are finding the rule
6594 * @rinfo: other information regarding the rule e.g. priority and action info
6596 * Helper function to search for a given advance rule entry
6597 * Returns pointer to entry storing the rule if found
6599 static struct ice_adv_fltr_mgmt_list_entry *
6600 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6601 u16 lkups_cnt, u16 recp_id,
6602 struct ice_adv_rule_info *rinfo)
6604 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6605 struct ice_switch_info *sw = hw->switch_info;
6608 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6609 ice_adv_fltr_mgmt_list_entry, list_entry) {
6610 bool lkups_matched = true;
6612 if (lkups_cnt != list_itr->lkups_cnt)
6614 for (i = 0; i < list_itr->lkups_cnt; i++)
6615 if (memcmp(&list_itr->lkups[i], &lkups[i],
6617 lkups_matched = false;
6620 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6621 rinfo->tun_type == list_itr->rule_info.tun_type &&
6629 * ice_adv_add_update_vsi_list
6630 * @hw: pointer to the hardware structure
6631 * @m_entry: pointer to current adv filter management list entry
6632 * @cur_fltr: filter information from the book keeping entry
6633 * @new_fltr: filter information with the new VSI to be added
6635 * Call AQ command to add or update previously created VSI list with new VSI.
6637 * Helper function to do book keeping associated with adding filter information
6638 * The algorithm to do the booking keeping is described below :
6639 * When a VSI needs to subscribe to a given advanced filter
6640 * if only one VSI has been added till now
6641 * Allocate a new VSI list and add two VSIs
6642 * to this list using switch rule command
6643 * Update the previously created switch rule with the
6644 * newly created VSI list ID
6645 * if a VSI list was previously created
6646 * Add the new VSI to the previously created VSI list set
6647 * using the update switch rule command
6649 static enum ice_status
6650 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6651 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6652 struct ice_adv_rule_info *cur_fltr,
6653 struct ice_adv_rule_info *new_fltr)
6655 enum ice_status status;
6656 u16 vsi_list_id = 0;
6658 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6659 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6660 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6661 return ICE_ERR_NOT_IMPL;
6663 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6664 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6665 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6666 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6667 return ICE_ERR_NOT_IMPL;
6669 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6670 /* Only one entry existed in the mapping and it was not already
6671 * a part of a VSI list. So, create a VSI list with the old and
6674 struct ice_fltr_info tmp_fltr;
6675 u16 vsi_handle_arr[2];
6677 /* A rule already exists with the new VSI being added */
6678 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6679 new_fltr->sw_act.fwd_id.hw_vsi_id)
6680 return ICE_ERR_ALREADY_EXISTS;
6682 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6683 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6684 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6690 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6691 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6692 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6693 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6694 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6696 /* Update the previous switch rule of "forward to VSI" to
6699 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6703 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6704 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6705 m_entry->vsi_list_info =
6706 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6709 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6711 if (!m_entry->vsi_list_info)
6714 /* A rule already exists with the new VSI being added */
6715 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6718 /* Update the previously created VSI list set with
6719 * the new VSI ID passed in
6721 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6723 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6725 ice_aqc_opc_update_sw_rules,
6727 /* update VSI list mapping info with new VSI ID */
6729 ice_set_bit(vsi_handle,
6730 m_entry->vsi_list_info->vsi_map);
6733 m_entry->vsi_count++;
6738 * ice_add_adv_rule - helper function to create an advanced switch rule
6739 * @hw: pointer to the hardware structure
6740 * @lkups: information on the words that needs to be looked up. All words
6741 * together makes one recipe
6742 * @lkups_cnt: num of entries in the lkups array
6743 * @rinfo: other information related to the rule that needs to be programmed
6744 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6745 * ignored is case of error.
6747 * This function can program only 1 rule at a time. The lkups is used to
6748 * describe the all the words that forms the "lookup" portion of the recipe.
6749 * These words can span multiple protocols. Callers to this function need to
6750 * pass in a list of protocol headers with lookup information along and mask
6751 * that determines which words are valid from the given protocol header.
6752 * rinfo describes other information related to this rule such as forwarding
6753 * IDs, priority of this rule, etc.
6756 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6757 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6758 struct ice_rule_query_data *added_entry)
6760 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6761 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6762 const struct ice_dummy_pkt_offsets *pkt_offsets;
6763 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6764 struct LIST_HEAD_TYPE *rule_head;
6765 struct ice_switch_info *sw;
6766 enum ice_status status;
6767 const u8 *pkt = NULL;
6773 /* Initialize profile to result index bitmap */
6774 if (!hw->switch_info->prof_res_bm_init) {
6775 hw->switch_info->prof_res_bm_init = 1;
6776 ice_init_prof_result_bm(hw);
6779 prof_rule = ice_is_prof_rule(rinfo->tun_type);
6780 if (!prof_rule && !lkups_cnt)
6781 return ICE_ERR_PARAM;
6783 /* get # of words we need to match */
6785 for (i = 0; i < lkups_cnt; i++) {
6788 ptr = (u16 *)&lkups[i].m_u;
6789 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6795 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6796 return ICE_ERR_PARAM;
6798 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6799 return ICE_ERR_PARAM;
6802 /* make sure that we can locate a dummy packet */
6803 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6806 status = ICE_ERR_PARAM;
6807 goto err_ice_add_adv_rule;
6810 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6811 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6812 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6813 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6816 vsi_handle = rinfo->sw_act.vsi_handle;
6817 if (!ice_is_vsi_valid(hw, vsi_handle))
6818 return ICE_ERR_PARAM;
6820 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6821 rinfo->sw_act.fwd_id.hw_vsi_id =
6822 ice_get_hw_vsi_num(hw, vsi_handle);
6823 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6824 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6826 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6829 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6831 /* we have to add VSI to VSI_LIST and increment vsi_count.
6832 * Also Update VSI list so that we can change forwarding rule
6833 * if the rule already exists, we will check if it exists with
6834 * same vsi_id, if not then add it to the VSI list if it already
6835 * exists if not then create a VSI list and add the existing VSI
6836 * ID and the new VSI ID to the list
6837 * We will add that VSI to the list
6839 status = ice_adv_add_update_vsi_list(hw, m_entry,
6840 &m_entry->rule_info,
6843 added_entry->rid = rid;
6844 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6845 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6849 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6850 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6852 return ICE_ERR_NO_MEMORY;
6853 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6854 switch (rinfo->sw_act.fltr_act) {
6855 case ICE_FWD_TO_VSI:
6856 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6857 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6858 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6861 act |= ICE_SINGLE_ACT_TO_Q;
6862 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6863 ICE_SINGLE_ACT_Q_INDEX_M;
6865 case ICE_FWD_TO_QGRP:
6866 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6867 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6868 act |= ICE_SINGLE_ACT_TO_Q;
6869 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6870 ICE_SINGLE_ACT_Q_INDEX_M;
6871 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6872 ICE_SINGLE_ACT_Q_REGION_M;
6874 case ICE_DROP_PACKET:
6875 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6876 ICE_SINGLE_ACT_VALID_BIT;
6879 status = ICE_ERR_CFG;
6880 goto err_ice_add_adv_rule;
6883 /* set the rule LOOKUP type based on caller specified 'RX'
6884 * instead of hardcoding it to be either LOOKUP_TX/RX
6886 * for 'RX' set the source to be the port number
6887 * for 'TX' set the source to be the source HW VSI number (determined
6891 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6892 s_rule->pdata.lkup_tx_rx.src =
6893 CPU_TO_LE16(hw->port_info->lport);
6895 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6896 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6899 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6900 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6902 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6903 pkt_len, pkt_offsets);
6905 goto err_ice_add_adv_rule;
6907 if (rinfo->tun_type != ICE_NON_TUN &&
6908 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6909 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6910 s_rule->pdata.lkup_tx_rx.hdr,
6913 goto err_ice_add_adv_rule;
6916 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6917 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6920 goto err_ice_add_adv_rule;
6921 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6922 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6924 status = ICE_ERR_NO_MEMORY;
6925 goto err_ice_add_adv_rule;
6928 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6929 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6930 ICE_NONDMA_TO_NONDMA);
6931 if (!adv_fltr->lkups && !prof_rule) {
6932 status = ICE_ERR_NO_MEMORY;
6933 goto err_ice_add_adv_rule;
6936 adv_fltr->lkups_cnt = lkups_cnt;
6937 adv_fltr->rule_info = *rinfo;
6938 adv_fltr->rule_info.fltr_rule_id =
6939 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6940 sw = hw->switch_info;
6941 sw->recp_list[rid].adv_rule = true;
6942 rule_head = &sw->recp_list[rid].filt_rules;
6944 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6945 adv_fltr->vsi_count = 1;
6947 /* Add rule entry to book keeping list */
6948 LIST_ADD(&adv_fltr->list_entry, rule_head);
6950 added_entry->rid = rid;
6951 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6952 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6954 err_ice_add_adv_rule:
6955 if (status && adv_fltr) {
6956 ice_free(hw, adv_fltr->lkups);
6957 ice_free(hw, adv_fltr);
6960 ice_free(hw, s_rule);
6966 * ice_adv_rem_update_vsi_list
6967 * @hw: pointer to the hardware structure
6968 * @vsi_handle: VSI handle of the VSI to remove
6969 * @fm_list: filter management entry for which the VSI list management needs to
6972 static enum ice_status
6973 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6974 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6976 struct ice_vsi_list_map_info *vsi_list_info;
6977 enum ice_sw_lkup_type lkup_type;
6978 enum ice_status status;
6981 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6982 fm_list->vsi_count == 0)
6983 return ICE_ERR_PARAM;
6985 /* A rule with the VSI being removed does not exist */
6986 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6987 return ICE_ERR_DOES_NOT_EXIST;
6989 lkup_type = ICE_SW_LKUP_LAST;
6990 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6991 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6992 ice_aqc_opc_update_sw_rules,
6997 fm_list->vsi_count--;
6998 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6999 vsi_list_info = fm_list->vsi_list_info;
7000 if (fm_list->vsi_count == 1) {
7001 struct ice_fltr_info tmp_fltr;
7004 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7006 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7007 return ICE_ERR_OUT_OF_RANGE;
7009 /* Make sure VSI list is empty before removing it below */
7010 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7012 ice_aqc_opc_update_sw_rules,
7017 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7018 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7019 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7020 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7021 tmp_fltr.fwd_id.hw_vsi_id =
7022 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7023 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7024 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7026 /* Update the previous switch rule of "MAC forward to VSI" to
7027 * "MAC fwd to VSI list"
7029 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7031 ice_debug(hw, ICE_DBG_SW,
7032 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7033 tmp_fltr.fwd_id.hw_vsi_id, status);
7037 /* Remove the VSI list since it is no longer used */
7038 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7040 ice_debug(hw, ICE_DBG_SW,
7041 "Failed to remove VSI list %d, error %d\n",
7042 vsi_list_id, status);
7046 LIST_DEL(&vsi_list_info->list_entry);
7047 ice_free(hw, vsi_list_info);
7048 fm_list->vsi_list_info = NULL;
7055 * ice_rem_adv_rule - removes existing advanced switch rule
7056 * @hw: pointer to the hardware structure
7057 * @lkups: information on the words that needs to be looked up. All words
7058 * together makes one recipe
7059 * @lkups_cnt: num of entries in the lkups array
7060 * @rinfo: Its the pointer to the rule information for the rule
7062 * This function can be used to remove 1 rule at a time. The lkups is
7063 * used to describe all the words that forms the "lookup" portion of the
7064 * rule. These words can span multiple protocols. Callers to this function
7065 * need to pass in a list of protocol headers with lookup information along
7066 * and mask that determines which words are valid from the given protocol
7067 * header. rinfo describes other information related to this rule such as
7068 * forwarding IDs, priority of this rule, etc.
7071 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7072 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7074 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7075 struct ice_prot_lkup_ext lkup_exts;
7076 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7077 enum ice_status status = ICE_SUCCESS;
7078 bool remove_rule = false;
7079 u16 i, rid, vsi_handle;
7081 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7082 for (i = 0; i < lkups_cnt; i++) {
7085 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7088 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7093 /* Create any special protocol/offset pairs, such as looking at tunnel
7094 * bits by extracting metadata
7096 status = ice_add_special_words(rinfo, &lkup_exts);
7100 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7101 /* If did not find a recipe that match the existing criteria */
7102 if (rid == ICE_MAX_NUM_RECIPES)
7103 return ICE_ERR_PARAM;
7105 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7106 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7107 /* the rule is already removed */
7110 ice_acquire_lock(rule_lock);
7111 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7113 } else if (list_elem->vsi_count > 1) {
7114 list_elem->vsi_list_info->ref_cnt--;
7115 remove_rule = false;
7116 vsi_handle = rinfo->sw_act.vsi_handle;
7117 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7119 vsi_handle = rinfo->sw_act.vsi_handle;
7120 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7122 ice_release_lock(rule_lock);
7125 if (list_elem->vsi_count == 0)
7128 ice_release_lock(rule_lock);
7130 struct ice_aqc_sw_rules_elem *s_rule;
7133 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7135 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7138 return ICE_ERR_NO_MEMORY;
7139 s_rule->pdata.lkup_tx_rx.act = 0;
7140 s_rule->pdata.lkup_tx_rx.index =
7141 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7142 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7143 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7145 ice_aqc_opc_remove_sw_rules, NULL);
7146 if (status == ICE_SUCCESS) {
7147 ice_acquire_lock(rule_lock);
7148 LIST_DEL(&list_elem->list_entry);
7149 ice_free(hw, list_elem->lkups);
7150 ice_free(hw, list_elem);
7151 ice_release_lock(rule_lock);
7153 ice_free(hw, s_rule);
7159 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7160 * @hw: pointer to the hardware structure
7161 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7163 * This function is used to remove 1 rule at a time. The removal is based on
7164 * the remove_entry parameter. This function will remove rule for a given
7165 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7168 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7169 struct ice_rule_query_data *remove_entry)
7171 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7172 struct LIST_HEAD_TYPE *list_head;
7173 struct ice_adv_rule_info rinfo;
7174 struct ice_switch_info *sw;
7176 sw = hw->switch_info;
7177 if (!sw->recp_list[remove_entry->rid].recp_created)
7178 return ICE_ERR_PARAM;
7179 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7180 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7182 if (list_itr->rule_info.fltr_rule_id ==
7183 remove_entry->rule_id) {
7184 rinfo = list_itr->rule_info;
7185 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7186 return ice_rem_adv_rule(hw, list_itr->lkups,
7187 list_itr->lkups_cnt, &rinfo);
7190 return ICE_ERR_PARAM;
7194 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7196 * @hw: pointer to the hardware structure
7197 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7199 * This function is used to remove all the rules for a given VSI and as soon
7200 * as removing a rule fails, it will return immediately with the error code,
7201 * else it will return ICE_SUCCESS
7204 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7206 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7207 struct ice_vsi_list_map_info *map_info;
7208 struct LIST_HEAD_TYPE *list_head;
7209 struct ice_adv_rule_info rinfo;
7210 struct ice_switch_info *sw;
7211 enum ice_status status;
7212 u16 vsi_list_id = 0;
7215 sw = hw->switch_info;
7216 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7217 if (!sw->recp_list[rid].recp_created)
7219 if (!sw->recp_list[rid].adv_rule)
7221 list_head = &sw->recp_list[rid].filt_rules;
7223 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7224 ice_adv_fltr_mgmt_list_entry, list_entry) {
7225 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7230 rinfo = list_itr->rule_info;
7231 rinfo.sw_act.vsi_handle = vsi_handle;
7232 status = ice_rem_adv_rule(hw, list_itr->lkups,
7233 list_itr->lkups_cnt, &rinfo);
7243 * ice_replay_fltr - Replay all the filters stored by a specific list head
7244 * @hw: pointer to the hardware structure
7245 * @list_head: list for which filters needs to be replayed
7246 * @recp_id: Recipe ID for which rules need to be replayed
7248 static enum ice_status
7249 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7251 struct ice_fltr_mgmt_list_entry *itr;
7252 enum ice_status status = ICE_SUCCESS;
7253 struct ice_sw_recipe *recp_list;
7254 u8 lport = hw->port_info->lport;
7255 struct LIST_HEAD_TYPE l_head;
7257 if (LIST_EMPTY(list_head))
7260 recp_list = &hw->switch_info->recp_list[recp_id];
7261 /* Move entries from the given list_head to a temporary l_head so that
7262 * they can be replayed. Otherwise when trying to re-add the same
7263 * filter, the function will return already exists
7265 LIST_REPLACE_INIT(list_head, &l_head);
7267 /* Mark the given list_head empty by reinitializing it so filters
7268 * could be added again by *handler
7270 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7272 struct ice_fltr_list_entry f_entry;
7274 f_entry.fltr_info = itr->fltr_info;
7275 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7276 status = ice_add_rule_internal(hw, recp_list, lport,
7278 if (status != ICE_SUCCESS)
7283 /* Add a filter per VSI separately */
7288 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7290 if (!ice_is_vsi_valid(hw, vsi_handle))
7293 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7294 f_entry.fltr_info.vsi_handle = vsi_handle;
7295 f_entry.fltr_info.fwd_id.hw_vsi_id =
7296 ice_get_hw_vsi_num(hw, vsi_handle);
7297 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7298 if (recp_id == ICE_SW_LKUP_VLAN)
7299 status = ice_add_vlan_internal(hw, recp_list,
7302 status = ice_add_rule_internal(hw, recp_list,
7305 if (status != ICE_SUCCESS)
7310 /* Clear the filter management list */
7311 ice_rem_sw_rule_info(hw, &l_head);
7316 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7317 * @hw: pointer to the hardware structure
7319 * NOTE: This function does not clean up partially added filters on error.
7320 * It is up to caller of the function to issue a reset or fail early.
7322 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7324 struct ice_switch_info *sw = hw->switch_info;
7325 enum ice_status status = ICE_SUCCESS;
7328 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7329 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7331 status = ice_replay_fltr(hw, i, head);
7332 if (status != ICE_SUCCESS)
7339 * ice_replay_vsi_fltr - Replay filters for requested VSI
7340 * @hw: pointer to the hardware structure
7341 * @vsi_handle: driver VSI handle
7342 * @recp_id: Recipe ID for which rules need to be replayed
7343 * @list_head: list for which filters need to be replayed
7345 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7346 * It is required to pass valid VSI handle.
7348 static enum ice_status
7349 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
7350 struct LIST_HEAD_TYPE *list_head)
7352 struct ice_fltr_mgmt_list_entry *itr;
7353 enum ice_status status = ICE_SUCCESS;
7354 struct ice_sw_recipe *recp_list;
7357 if (LIST_EMPTY(list_head))
7359 recp_list = &hw->switch_info->recp_list[recp_id];
7360 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7362 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7364 struct ice_fltr_list_entry f_entry;
7366 f_entry.fltr_info = itr->fltr_info;
7367 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7368 itr->fltr_info.vsi_handle == vsi_handle) {
7369 /* update the src in case it is VSI num */
7370 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7371 f_entry.fltr_info.src = hw_vsi_id;
7372 status = ice_add_rule_internal(hw, recp_list,
7373 hw->port_info->lport,
7375 if (status != ICE_SUCCESS)
7379 if (!itr->vsi_list_info ||
7380 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7382 /* Clearing it so that the logic can add it back */
7383 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7384 f_entry.fltr_info.vsi_handle = vsi_handle;
7385 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7386 /* update the src in case it is VSI num */
7387 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7388 f_entry.fltr_info.src = hw_vsi_id;
7389 if (recp_id == ICE_SW_LKUP_VLAN)
7390 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7392 status = ice_add_rule_internal(hw, recp_list,
7393 hw->port_info->lport,
7395 if (status != ICE_SUCCESS)
7403 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7404 * @hw: pointer to the hardware structure
7405 * @vsi_handle: driver VSI handle
7406 * @list_head: list for which filters need to be replayed
7408 * Replay the advanced rule for the given VSI.
7410 static enum ice_status
7411 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7412 struct LIST_HEAD_TYPE *list_head)
7414 struct ice_rule_query_data added_entry = { 0 };
7415 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7416 enum ice_status status = ICE_SUCCESS;
7418 if (LIST_EMPTY(list_head))
7420 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7422 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7423 u16 lk_cnt = adv_fltr->lkups_cnt;
7425 if (vsi_handle != rinfo->sw_act.vsi_handle)
7427 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7436 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7437 * @hw: pointer to the hardware structure
7438 * @vsi_handle: driver VSI handle
7440 * Replays filters for requested VSI via vsi_handle.
7442 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7444 struct ice_switch_info *sw = hw->switch_info;
7445 enum ice_status status;
7448 /* Update the recipes that were created */
7449 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7450 struct LIST_HEAD_TYPE *head;
7452 head = &sw->recp_list[i].filt_replay_rules;
7453 if (!sw->recp_list[i].adv_rule)
7454 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7456 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7457 if (status != ICE_SUCCESS)
7465 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7466 * @hw: pointer to the HW struct
7468 * Deletes the filter replay rules.
7470 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7472 struct ice_switch_info *sw = hw->switch_info;
7478 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7479 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7480 struct LIST_HEAD_TYPE *l_head;
7482 l_head = &sw->recp_list[i].filt_replay_rules;
7483 if (!sw->recp_list[i].adv_rule)
7484 ice_rem_sw_rule_info(hw, l_head);
7486 ice_rem_adv_rule_info(hw, l_head);