1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x08, 0x00, 0x00,
462 0x00, 0x00, /* 2 bytes for 4 byte alignment */
465 /* C-tag (802.1Q): IPv6 + UDP */
466 static const struct ice_dummy_pkt_offsets
467 dummy_vlan_udp_ipv6_packet_offsets[] = {
469 { ICE_ETYPE_OL, 12 },
470 { ICE_VLAN_OFOS, 14 },
471 { ICE_IPV6_OFOS, 18 },
472 { ICE_UDP_ILOS, 58 },
473 { ICE_PROTOCOL_LAST, 0 },
476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
477 static const u8 dummy_vlan_udp_ipv6_packet[] = {
478 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
482 0x81, 0x00, /* ICE_ETYPE_OL 12 */
484 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
487 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
498 0x00, 0x08, 0x00, 0x00,
500 0x00, 0x00, /* 2 bytes for 4 byte alignment */
503 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
505 { ICE_IPV4_OFOS, 14 },
508 { ICE_PROTOCOL_LAST, 0 },
511 static const u8 dummy_udp_gtp_packet[] = {
512 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
517 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
518 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x11, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
524 0x00, 0x1c, 0x00, 0x00,
526 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
527 0x00, 0x00, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x85,
530 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
531 0x00, 0x00, 0x00, 0x00,
534 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
536 { ICE_ETYPE_OL, 12 },
537 { ICE_VLAN_OFOS, 14},
539 { ICE_PROTOCOL_LAST, 0 },
542 static const u8 dummy_pppoe_ipv4_packet[] = {
543 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
544 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
547 0x81, 0x00, /* ICE_ETYPE_OL 12 */
549 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
551 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
554 0x00, 0x21, /* PPP Link Layer 24 */
556 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
559 0x00, 0x00, 0x00, 0x00,
560 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
565 static const u8 dummy_pppoe_ipv6_packet[] = {
566 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
567 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00,
570 0x81, 0x00, /* ICE_ETYPE_OL 12 */
572 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
574 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
577 0x00, 0x57, /* PPP Link Layer 24 */
579 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
593 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
595 { ICE_IPV4_OFOS, 14 },
597 { ICE_PROTOCOL_LAST, 0 },
600 static const u8 dummy_ipv4_esp_pkt[] = {
601 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
602 0x00, 0x00, 0x00, 0x00,
603 0x00, 0x00, 0x00, 0x00,
606 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
607 0x00, 0x00, 0x40, 0x00,
608 0x40, 0x32, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
617 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
619 { ICE_IPV6_OFOS, 14 },
621 { ICE_PROTOCOL_LAST, 0 },
624 static const u8 dummy_ipv6_esp_pkt[] = {
625 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
626 0x00, 0x00, 0x00, 0x00,
627 0x00, 0x00, 0x00, 0x00,
630 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
631 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
632 0x00, 0x00, 0x00, 0x00,
633 0x00, 0x00, 0x00, 0x00,
634 0x00, 0x00, 0x00, 0x00,
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
646 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
648 { ICE_IPV4_OFOS, 14 },
650 { ICE_PROTOCOL_LAST, 0 },
653 static const u8 dummy_ipv4_ah_pkt[] = {
654 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x00,
659 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
660 0x00, 0x00, 0x40, 0x00,
661 0x40, 0x33, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
671 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
673 { ICE_IPV6_OFOS, 14 },
675 { ICE_PROTOCOL_LAST, 0 },
678 static const u8 dummy_ipv6_ah_pkt[] = {
679 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
684 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
685 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
686 0x00, 0x00, 0x00, 0x00,
687 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
696 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
701 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
703 { ICE_IPV4_OFOS, 14 },
704 { ICE_UDP_ILOS, 34 },
706 { ICE_PROTOCOL_LAST, 0 },
709 static const u8 dummy_ipv4_nat_pkt[] = {
710 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
715 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
716 0x00, 0x00, 0x40, 0x00,
717 0x40, 0x11, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
722 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
729 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
731 { ICE_IPV6_OFOS, 14 },
732 { ICE_UDP_ILOS, 54 },
734 { ICE_PROTOCOL_LAST, 0 },
737 static const u8 dummy_ipv6_nat_pkt[] = {
738 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
739 0x00, 0x00, 0x00, 0x00,
740 0x00, 0x00, 0x00, 0x00,
743 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
744 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00,
754 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
755 0x00, 0x00, 0x00, 0x00,
757 0x00, 0x00, 0x00, 0x00,
758 0x00, 0x00, 0x00, 0x00,
759 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
763 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
765 { ICE_IPV4_OFOS, 14 },
767 { ICE_PROTOCOL_LAST, 0 },
770 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
771 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
776 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
777 0x00, 0x00, 0x40, 0x00,
778 0x40, 0x73, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
783 0x00, 0x00, 0x00, 0x00,
784 0x00, 0x00, 0x00, 0x00,
785 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
788 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
790 { ICE_IPV6_OFOS, 14 },
792 { ICE_PROTOCOL_LAST, 0 },
795 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
796 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
797 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00,
801 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
802 0x00, 0x0c, 0x73, 0x40,
803 0x00, 0x00, 0x00, 0x00,
804 0x00, 0x00, 0x00, 0x00,
805 0x00, 0x00, 0x00, 0x00,
806 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
809 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
813 0x00, 0x00, 0x00, 0x00,
814 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
818 /* this is a recipe to profile association bitmap */
819 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
820 ICE_MAX_NUM_PROFILES);
822 /* this is a profile to recipe association bitmap */
823 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
824 ICE_MAX_NUM_RECIPES);
826 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
829 * ice_collect_result_idx - copy result index values
830 * @buf: buffer that contains the result index
831 * @recp: the recipe struct to copy data into
833 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
834 struct ice_sw_recipe *recp)
836 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
837 ice_set_bit(buf->content.result_indx &
838 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
842 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
843 * @hw: pointer to hardware structure
844 * @recps: struct that we need to populate
845 * @rid: recipe ID that we are populating
846 * @refresh_required: true if we should get recipe to profile mapping from FW
848 * This function is used to populate all the necessary entries into our
849 * bookkeeping so that we have a current list of all the recipes that are
850 * programmed in the firmware.
852 static enum ice_status
853 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
854 bool *refresh_required)
856 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
857 struct ice_aqc_recipe_data_elem *tmp;
858 u16 num_recps = ICE_MAX_NUM_RECIPES;
859 struct ice_prot_lkup_ext *lkup_exts;
860 enum ice_status status;
864 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
866 /* we need a buffer big enough to accommodate all the recipes */
867 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
868 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
870 return ICE_ERR_NO_MEMORY;
872 tmp[0].recipe_indx = rid;
873 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
874 /* non-zero status meaning recipe doesn't exist */
878 /* Get recipe to profile map so that we can get the fv from lkups that
879 * we read for a recipe from FW. Since we want to minimize the number of
880 * times we make this FW call, just make one call and cache the copy
881 * until a new recipe is added. This operation is only required the
882 * first time to get the changes from FW. Then to search existing
883 * entries we don't need to update the cache again until another recipe
886 if (*refresh_required) {
887 ice_get_recp_to_prof_map(hw);
888 *refresh_required = false;
891 /* Start populating all the entries for recps[rid] based on lkups from
892 * firmware. Note that we are only creating the root recipe in our
895 lkup_exts = &recps[rid].lkup_exts;
897 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
898 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
899 struct ice_recp_grp_entry *rg_entry;
900 u8 i, prof, idx, prot = 0;
904 rg_entry = (struct ice_recp_grp_entry *)
905 ice_malloc(hw, sizeof(*rg_entry));
907 status = ICE_ERR_NO_MEMORY;
911 idx = root_bufs.recipe_indx;
912 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
914 /* Mark all result indices in this chain */
915 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
916 ice_set_bit(root_bufs.content.result_indx &
917 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
919 /* get the first profile that is associated with rid */
920 prof = ice_find_first_bit(recipe_to_profile[idx],
921 ICE_MAX_NUM_PROFILES);
922 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
923 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
925 rg_entry->fv_idx[i] = lkup_indx;
926 rg_entry->fv_mask[i] =
927 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
929 /* If the recipe is a chained recipe then all its
930 * child recipe's result will have a result index.
931 * To fill fv_words we should not use those result
932 * index, we only need the protocol ids and offsets.
933 * We will skip all the fv_idx which stores result
934 * index in them. We also need to skip any fv_idx which
935 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
936 * valid offset value.
938 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
939 rg_entry->fv_idx[i]) ||
940 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
941 rg_entry->fv_idx[i] == 0)
944 ice_find_prot_off(hw, ICE_BLK_SW, prof,
945 rg_entry->fv_idx[i], &prot, &off);
946 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
947 lkup_exts->fv_words[fv_word_idx].off = off;
950 /* populate rg_list with the data from the child entry of this
953 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
955 /* Propagate some data to the recipe database */
956 recps[idx].is_root = !!is_root;
957 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
958 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
959 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
960 recps[idx].chain_idx = root_bufs.content.result_indx &
961 ~ICE_AQ_RECIPE_RESULT_EN;
962 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
964 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
970 /* Only do the following for root recipes entries */
971 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
972 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
973 recps[idx].root_rid = root_bufs.content.rid &
974 ~ICE_AQ_RECIPE_ID_IS_ROOT;
975 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
978 /* Complete initialization of the root recipe entry */
979 lkup_exts->n_val_words = fv_word_idx;
980 recps[rid].big_recp = (num_recps > 1);
981 recps[rid].n_grp_count = (u8)num_recps;
982 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
983 ice_memdup(hw, tmp, recps[rid].n_grp_count *
984 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
985 if (!recps[rid].root_buf)
988 /* Copy result indexes */
989 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
990 recps[rid].recp_created = true;
998 * ice_get_recp_to_prof_map - updates recipe to profile mapping
999 * @hw: pointer to hardware structure
1001 * This function is used to populate recipe_to_profile matrix where index to
1002 * this array is the recipe ID and the element is the mapping of which profiles
1003 * is this recipe mapped to.
1006 ice_get_recp_to_prof_map(struct ice_hw *hw)
1008 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1011 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1014 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1015 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1016 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1018 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1019 ICE_MAX_NUM_RECIPES);
1020 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1021 if (ice_is_bit_set(r_bitmap, j))
1022 ice_set_bit(i, recipe_to_profile[j]);
1027 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1028 * @hw: pointer to the HW struct
1029 * @recp_list: pointer to sw recipe list
1031 * Allocate memory for the entire recipe table and initialize the structures/
1032 * entries corresponding to basic recipes.
1035 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1037 struct ice_sw_recipe *recps;
1040 recps = (struct ice_sw_recipe *)
1041 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1043 return ICE_ERR_NO_MEMORY;
1045 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1046 recps[i].root_rid = i;
1047 INIT_LIST_HEAD(&recps[i].filt_rules);
1048 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1049 INIT_LIST_HEAD(&recps[i].rg_list);
1050 ice_init_lock(&recps[i].filt_rule_lock);
1059 * ice_aq_get_sw_cfg - get switch configuration
1060 * @hw: pointer to the hardware structure
1061 * @buf: pointer to the result buffer
1062 * @buf_size: length of the buffer available for response
1063 * @req_desc: pointer to requested descriptor
1064 * @num_elems: pointer to number of elements
1065 * @cd: pointer to command details structure or NULL
1067 * Get switch configuration (0x0200) to be placed in 'buff'.
1068 * This admin command returns information such as initial VSI/port number
1069 * and switch ID it belongs to.
1071 * NOTE: *req_desc is both an input/output parameter.
1072 * The caller of this function first calls this function with *request_desc set
1073 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1074 * configuration information has been returned; if non-zero (meaning not all
1075 * the information was returned), the caller should call this function again
1076 * with *req_desc set to the previous value returned by f/w to get the
1077 * next block of switch configuration information.
1079 * *num_elems is output only parameter. This reflects the number of elements
1080 * in response buffer. The caller of this function to use *num_elems while
1081 * parsing the response buffer.
1083 static enum ice_status
1084 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1085 u16 buf_size, u16 *req_desc, u16 *num_elems,
1086 struct ice_sq_cd *cd)
1088 struct ice_aqc_get_sw_cfg *cmd;
1089 enum ice_status status;
1090 struct ice_aq_desc desc;
1092 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1093 cmd = &desc.params.get_sw_conf;
1094 cmd->element = CPU_TO_LE16(*req_desc);
1096 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1098 *req_desc = LE16_TO_CPU(cmd->element);
1099 *num_elems = LE16_TO_CPU(cmd->num_elems);
1106 * ice_alloc_sw - allocate resources specific to switch
1107 * @hw: pointer to the HW struct
1108 * @ena_stats: true to turn on VEB stats
1109 * @shared_res: true for shared resource, false for dedicated resource
1110 * @sw_id: switch ID returned
1111 * @counter_id: VEB counter ID returned
1113 * allocates switch resources (SWID and VEB counter) (0x0208)
1116 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1119 struct ice_aqc_alloc_free_res_elem *sw_buf;
1120 struct ice_aqc_res_elem *sw_ele;
1121 enum ice_status status;
1124 buf_len = sizeof(*sw_buf);
1125 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1126 ice_malloc(hw, buf_len);
1128 return ICE_ERR_NO_MEMORY;
1130 /* Prepare buffer for switch ID.
1131 * The number of resource entries in buffer is passed as 1 since only a
1132 * single switch/VEB instance is allocated, and hence a single sw_id
1135 sw_buf->num_elems = CPU_TO_LE16(1);
1137 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1138 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1139 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1141 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1142 ice_aqc_opc_alloc_res, NULL);
1145 goto ice_alloc_sw_exit;
1147 sw_ele = &sw_buf->elem[0];
1148 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1151 /* Prepare buffer for VEB Counter */
1152 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1153 struct ice_aqc_alloc_free_res_elem *counter_buf;
1154 struct ice_aqc_res_elem *counter_ele;
1156 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1157 ice_malloc(hw, buf_len);
1159 status = ICE_ERR_NO_MEMORY;
1160 goto ice_alloc_sw_exit;
1163 /* The number of resource entries in buffer is passed as 1 since
1164 * only a single switch/VEB instance is allocated, and hence a
1165 * single VEB counter is requested.
1167 counter_buf->num_elems = CPU_TO_LE16(1);
1168 counter_buf->res_type =
1169 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1170 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1171 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1175 ice_free(hw, counter_buf);
1176 goto ice_alloc_sw_exit;
1178 counter_ele = &counter_buf->elem[0];
1179 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1180 ice_free(hw, counter_buf);
1184 ice_free(hw, sw_buf);
1189 * ice_free_sw - free resources specific to switch
1190 * @hw: pointer to the HW struct
1191 * @sw_id: switch ID returned
1192 * @counter_id: VEB counter ID returned
1194 * free switch resources (SWID and VEB counter) (0x0209)
1196 * NOTE: This function frees multiple resources. It continues
1197 * releasing other resources even after it encounters error.
1198 * The error code returned is the last error it encountered.
1200 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1202 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1203 enum ice_status status, ret_status;
1206 buf_len = sizeof(*sw_buf);
1207 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1208 ice_malloc(hw, buf_len);
1210 return ICE_ERR_NO_MEMORY;
1212 /* Prepare buffer to free for switch ID res.
1213 * The number of resource entries in buffer is passed as 1 since only a
1214 * single switch/VEB instance is freed, and hence a single sw_id
1217 sw_buf->num_elems = CPU_TO_LE16(1);
1218 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1219 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1221 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1222 ice_aqc_opc_free_res, NULL);
1225 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1227 /* Prepare buffer to free for VEB Counter resource */
1228 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1229 ice_malloc(hw, buf_len);
1231 ice_free(hw, sw_buf);
1232 return ICE_ERR_NO_MEMORY;
1235 /* The number of resource entries in buffer is passed as 1 since only a
1236 * single switch/VEB instance is freed, and hence a single VEB counter
1239 counter_buf->num_elems = CPU_TO_LE16(1);
1240 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1241 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1243 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1244 ice_aqc_opc_free_res, NULL);
1246 ice_debug(hw, ICE_DBG_SW,
1247 "VEB counter resource could not be freed\n");
1248 ret_status = status;
1251 ice_free(hw, counter_buf);
1252 ice_free(hw, sw_buf);
1258 * @hw: pointer to the HW struct
1259 * @vsi_ctx: pointer to a VSI context struct
1260 * @cd: pointer to command details structure or NULL
1262 * Add a VSI context to the hardware (0x0210)
1265 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1266 struct ice_sq_cd *cd)
1268 struct ice_aqc_add_update_free_vsi_resp *res;
1269 struct ice_aqc_add_get_update_free_vsi *cmd;
1270 struct ice_aq_desc desc;
1271 enum ice_status status;
1273 cmd = &desc.params.vsi_cmd;
1274 res = &desc.params.add_update_free_vsi_res;
1276 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1278 if (!vsi_ctx->alloc_from_pool)
1279 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1280 ICE_AQ_VSI_IS_VALID);
1282 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1284 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1286 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1287 sizeof(vsi_ctx->info), cd);
1290 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1291 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1292 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1300 * @hw: pointer to the HW struct
1301 * @vsi_ctx: pointer to a VSI context struct
1302 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1303 * @cd: pointer to command details structure or NULL
1305 * Free VSI context info from hardware (0x0213)
1308 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1309 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1311 struct ice_aqc_add_update_free_vsi_resp *resp;
1312 struct ice_aqc_add_get_update_free_vsi *cmd;
1313 struct ice_aq_desc desc;
1314 enum ice_status status;
1316 cmd = &desc.params.vsi_cmd;
1317 resp = &desc.params.add_update_free_vsi_res;
1319 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1321 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1323 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1325 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1327 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1328 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1336 * @hw: pointer to the HW struct
1337 * @vsi_ctx: pointer to a VSI context struct
1338 * @cd: pointer to command details structure or NULL
1340 * Update VSI context in the hardware (0x0211)
1343 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1344 struct ice_sq_cd *cd)
1346 struct ice_aqc_add_update_free_vsi_resp *resp;
1347 struct ice_aqc_add_get_update_free_vsi *cmd;
1348 struct ice_aq_desc desc;
1349 enum ice_status status;
1351 cmd = &desc.params.vsi_cmd;
1352 resp = &desc.params.add_update_free_vsi_res;
1354 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1356 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1358 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1360 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1361 sizeof(vsi_ctx->info), cd);
1364 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1365 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1372 * ice_is_vsi_valid - check whether the VSI is valid or not
1373 * @hw: pointer to the HW struct
1374 * @vsi_handle: VSI handle
1376 * check whether the VSI is valid or not
1378 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1380 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1384 * ice_get_hw_vsi_num - return the HW VSI number
1385 * @hw: pointer to the HW struct
1386 * @vsi_handle: VSI handle
1388 * return the HW VSI number
1389 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1391 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1393 return hw->vsi_ctx[vsi_handle]->vsi_num;
1397 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1398 * @hw: pointer to the HW struct
1399 * @vsi_handle: VSI handle
1401 * return the VSI context entry for a given VSI handle
1403 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1405 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1409 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1410 * @hw: pointer to the HW struct
1411 * @vsi_handle: VSI handle
1412 * @vsi: VSI context pointer
1414 * save the VSI context entry for a given VSI handle
1417 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1419 hw->vsi_ctx[vsi_handle] = vsi;
1423 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1424 * @hw: pointer to the HW struct
1425 * @vsi_handle: VSI handle
1427 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1429 struct ice_vsi_ctx *vsi;
1432 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1435 ice_for_each_traffic_class(i) {
1436 if (vsi->lan_q_ctx[i]) {
1437 ice_free(hw, vsi->lan_q_ctx[i]);
1438 vsi->lan_q_ctx[i] = NULL;
1444 * ice_clear_vsi_ctx - clear the VSI context entry
1445 * @hw: pointer to the HW struct
1446 * @vsi_handle: VSI handle
1448 * clear the VSI context entry
1450 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1452 struct ice_vsi_ctx *vsi;
1454 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1456 ice_clear_vsi_q_ctx(hw, vsi_handle);
1458 hw->vsi_ctx[vsi_handle] = NULL;
1463 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1464 * @hw: pointer to the HW struct
1466 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1470 for (i = 0; i < ICE_MAX_VSI; i++)
1471 ice_clear_vsi_ctx(hw, i);
1475 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1476 * @hw: pointer to the HW struct
1477 * @vsi_handle: unique VSI handle provided by drivers
1478 * @vsi_ctx: pointer to a VSI context struct
1479 * @cd: pointer to command details structure or NULL
1481 * Add a VSI context to the hardware also add it into the VSI handle list.
1482 * If this function gets called after reset for existing VSIs then update
1483 * with the new HW VSI number in the corresponding VSI handle list entry.
1486 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1487 struct ice_sq_cd *cd)
1489 struct ice_vsi_ctx *tmp_vsi_ctx;
1490 enum ice_status status;
1492 if (vsi_handle >= ICE_MAX_VSI)
1493 return ICE_ERR_PARAM;
1494 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1497 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1499 /* Create a new VSI context */
1500 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1501 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1503 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1504 return ICE_ERR_NO_MEMORY;
1506 *tmp_vsi_ctx = *vsi_ctx;
1508 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1510 /* update with new HW VSI num */
1511 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1518 * ice_free_vsi- free VSI context from hardware and VSI handle list
1519 * @hw: pointer to the HW struct
1520 * @vsi_handle: unique VSI handle
1521 * @vsi_ctx: pointer to a VSI context struct
1522 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1523 * @cd: pointer to command details structure or NULL
1525 * Free VSI context info from hardware as well as from VSI handle list
1528 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1529 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1531 enum ice_status status;
1533 if (!ice_is_vsi_valid(hw, vsi_handle))
1534 return ICE_ERR_PARAM;
1535 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1536 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1538 ice_clear_vsi_ctx(hw, vsi_handle);
1544 * @hw: pointer to the HW struct
1545 * @vsi_handle: unique VSI handle
1546 * @vsi_ctx: pointer to a VSI context struct
1547 * @cd: pointer to command details structure or NULL
1549 * Update VSI context in the hardware
1552 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1553 struct ice_sq_cd *cd)
1555 if (!ice_is_vsi_valid(hw, vsi_handle))
1556 return ICE_ERR_PARAM;
1557 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1558 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1562 * ice_aq_get_vsi_params
1563 * @hw: pointer to the HW struct
1564 * @vsi_ctx: pointer to a VSI context struct
1565 * @cd: pointer to command details structure or NULL
1567 * Get VSI context info from hardware (0x0212)
1570 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1571 struct ice_sq_cd *cd)
1573 struct ice_aqc_add_get_update_free_vsi *cmd;
1574 struct ice_aqc_get_vsi_resp *resp;
1575 struct ice_aq_desc desc;
1576 enum ice_status status;
1578 cmd = &desc.params.vsi_cmd;
1579 resp = &desc.params.get_vsi_resp;
1581 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1583 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1585 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1586 sizeof(vsi_ctx->info), cd);
1588 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1590 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1591 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1598 * ice_aq_add_update_mir_rule - add/update a mirror rule
1599 * @hw: pointer to the HW struct
1600 * @rule_type: Rule Type
1601 * @dest_vsi: VSI number to which packets will be mirrored
1602 * @count: length of the list
1603 * @mr_buf: buffer for list of mirrored VSI numbers
1604 * @cd: pointer to command details structure or NULL
1607 * Add/Update Mirror Rule (0x260).
1610 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1611 u16 count, struct ice_mir_rule_buf *mr_buf,
1612 struct ice_sq_cd *cd, u16 *rule_id)
1614 struct ice_aqc_add_update_mir_rule *cmd;
1615 struct ice_aq_desc desc;
1616 enum ice_status status;
1617 __le16 *mr_list = NULL;
1620 switch (rule_type) {
1621 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1622 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1623 /* Make sure count and mr_buf are set for these rule_types */
1624 if (!(count && mr_buf))
1625 return ICE_ERR_PARAM;
1627 buf_size = count * sizeof(__le16);
1628 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1630 return ICE_ERR_NO_MEMORY;
1632 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1633 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1634 /* Make sure count and mr_buf are not set for these
1637 if (count || mr_buf)
1638 return ICE_ERR_PARAM;
1641 ice_debug(hw, ICE_DBG_SW,
1642 "Error due to unsupported rule_type %u\n", rule_type);
1643 return ICE_ERR_OUT_OF_RANGE;
1646 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1648 /* Pre-process 'mr_buf' items for add/update of virtual port
1649 * ingress/egress mirroring (but not physical port ingress/egress
1655 for (i = 0; i < count; i++) {
1658 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1660 /* Validate specified VSI number, make sure it is less
1661 * than ICE_MAX_VSI, if not return with error.
1663 if (id >= ICE_MAX_VSI) {
1664 ice_debug(hw, ICE_DBG_SW,
1665 "Error VSI index (%u) out-of-range\n",
1667 ice_free(hw, mr_list);
1668 return ICE_ERR_OUT_OF_RANGE;
1671 /* add VSI to mirror rule */
1674 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1675 else /* remove VSI from mirror rule */
1676 mr_list[i] = CPU_TO_LE16(id);
1680 cmd = &desc.params.add_update_rule;
1681 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1682 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1683 ICE_AQC_RULE_ID_VALID_M);
1684 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1685 cmd->num_entries = CPU_TO_LE16(count);
1686 cmd->dest = CPU_TO_LE16(dest_vsi);
1688 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1690 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1692 ice_free(hw, mr_list);
1698 * ice_aq_delete_mir_rule - delete a mirror rule
1699 * @hw: pointer to the HW struct
1700 * @rule_id: Mirror rule ID (to be deleted)
1701 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1702 * otherwise it is returned to the shared pool
1703 * @cd: pointer to command details structure or NULL
1705 * Delete Mirror Rule (0x261).
1708 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1709 struct ice_sq_cd *cd)
1711 struct ice_aqc_delete_mir_rule *cmd;
1712 struct ice_aq_desc desc;
1714 /* rule_id should be in the range 0...63 */
1715 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1716 return ICE_ERR_OUT_OF_RANGE;
1718 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1720 cmd = &desc.params.del_rule;
1721 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1722 cmd->rule_id = CPU_TO_LE16(rule_id);
1725 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1727 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1731 * ice_aq_alloc_free_vsi_list
1732 * @hw: pointer to the HW struct
1733 * @vsi_list_id: VSI list ID returned or used for lookup
1734 * @lkup_type: switch rule filter lookup type
1735 * @opc: switch rules population command type - pass in the command opcode
1737 * allocates or free a VSI list resource
1739 static enum ice_status
1740 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1741 enum ice_sw_lkup_type lkup_type,
1742 enum ice_adminq_opc opc)
1744 struct ice_aqc_alloc_free_res_elem *sw_buf;
1745 struct ice_aqc_res_elem *vsi_ele;
1746 enum ice_status status;
1749 buf_len = sizeof(*sw_buf);
1750 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1751 ice_malloc(hw, buf_len);
1753 return ICE_ERR_NO_MEMORY;
1754 sw_buf->num_elems = CPU_TO_LE16(1);
1756 if (lkup_type == ICE_SW_LKUP_MAC ||
1757 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1758 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1759 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1760 lkup_type == ICE_SW_LKUP_PROMISC ||
1761 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1762 lkup_type == ICE_SW_LKUP_LAST) {
1763 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1764 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1766 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1768 status = ICE_ERR_PARAM;
1769 goto ice_aq_alloc_free_vsi_list_exit;
1772 if (opc == ice_aqc_opc_free_res)
1773 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1775 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1777 goto ice_aq_alloc_free_vsi_list_exit;
1779 if (opc == ice_aqc_opc_alloc_res) {
1780 vsi_ele = &sw_buf->elem[0];
1781 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1784 ice_aq_alloc_free_vsi_list_exit:
1785 ice_free(hw, sw_buf);
1790 * ice_aq_set_storm_ctrl - Sets storm control configuration
1791 * @hw: pointer to the HW struct
1792 * @bcast_thresh: represents the upper threshold for broadcast storm control
1793 * @mcast_thresh: represents the upper threshold for multicast storm control
1794 * @ctl_bitmask: storm control control knobs
1796 * Sets the storm control configuration (0x0280)
1799 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1802 struct ice_aqc_storm_cfg *cmd;
1803 struct ice_aq_desc desc;
1805 cmd = &desc.params.storm_conf;
1807 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1809 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1810 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1811 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1813 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1817 * ice_aq_get_storm_ctrl - gets storm control configuration
1818 * @hw: pointer to the HW struct
1819 * @bcast_thresh: represents the upper threshold for broadcast storm control
1820 * @mcast_thresh: represents the upper threshold for multicast storm control
1821 * @ctl_bitmask: storm control control knobs
1823 * Gets the storm control configuration (0x0281)
1826 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1829 enum ice_status status;
1830 struct ice_aq_desc desc;
1832 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1834 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1836 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1839 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1842 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1845 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1852 * ice_aq_sw_rules - add/update/remove switch rules
1853 * @hw: pointer to the HW struct
1854 * @rule_list: pointer to switch rule population list
1855 * @rule_list_sz: total size of the rule list in bytes
1856 * @num_rules: number of switch rules in the rule_list
1857 * @opc: switch rules population command type - pass in the command opcode
1858 * @cd: pointer to command details structure or NULL
1860 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1862 static enum ice_status
1863 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1864 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1866 struct ice_aq_desc desc;
1868 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1870 if (opc != ice_aqc_opc_add_sw_rules &&
1871 opc != ice_aqc_opc_update_sw_rules &&
1872 opc != ice_aqc_opc_remove_sw_rules)
1873 return ICE_ERR_PARAM;
1875 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1877 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1878 desc.params.sw_rules.num_rules_fltr_entry_index =
1879 CPU_TO_LE16(num_rules);
1880 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1884 * ice_aq_add_recipe - add switch recipe
1885 * @hw: pointer to the HW struct
1886 * @s_recipe_list: pointer to switch rule population list
1887 * @num_recipes: number of switch recipes in the list
1888 * @cd: pointer to command details structure or NULL
1893 ice_aq_add_recipe(struct ice_hw *hw,
1894 struct ice_aqc_recipe_data_elem *s_recipe_list,
1895 u16 num_recipes, struct ice_sq_cd *cd)
1897 struct ice_aqc_add_get_recipe *cmd;
1898 struct ice_aq_desc desc;
1901 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1902 cmd = &desc.params.add_get_recipe;
1903 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1905 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1906 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1908 buf_size = num_recipes * sizeof(*s_recipe_list);
1910 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1914 * ice_aq_get_recipe - get switch recipe
1915 * @hw: pointer to the HW struct
1916 * @s_recipe_list: pointer to switch rule population list
1917 * @num_recipes: pointer to the number of recipes (input and output)
1918 * @recipe_root: root recipe number of recipe(s) to retrieve
1919 * @cd: pointer to command details structure or NULL
1923 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1924 * On output, *num_recipes will equal the number of entries returned in
1927 * The caller must supply enough space in s_recipe_list to hold all possible
1928 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1931 ice_aq_get_recipe(struct ice_hw *hw,
1932 struct ice_aqc_recipe_data_elem *s_recipe_list,
1933 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1935 struct ice_aqc_add_get_recipe *cmd;
1936 struct ice_aq_desc desc;
1937 enum ice_status status;
1940 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1941 return ICE_ERR_PARAM;
1943 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1944 cmd = &desc.params.add_get_recipe;
1945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1947 cmd->return_index = CPU_TO_LE16(recipe_root);
1948 cmd->num_sub_recipes = 0;
1950 buf_size = *num_recipes * sizeof(*s_recipe_list);
1952 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1953 /* cppcheck-suppress constArgument */
1954 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1960 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1961 * @hw: pointer to the HW struct
1962 * @profile_id: package profile ID to associate the recipe with
1963 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1964 * @cd: pointer to command details structure or NULL
1965 * Recipe to profile association (0x0291)
1968 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1969 struct ice_sq_cd *cd)
1971 struct ice_aqc_recipe_to_profile *cmd;
1972 struct ice_aq_desc desc;
1974 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1975 cmd = &desc.params.recipe_to_profile;
1976 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1977 cmd->profile_id = CPU_TO_LE16(profile_id);
1978 /* Set the recipe ID bit in the bitmask to let the device know which
1979 * profile we are associating the recipe to
1981 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1982 ICE_NONDMA_TO_NONDMA);
1984 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1988 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1989 * @hw: pointer to the HW struct
1990 * @profile_id: package profile ID to associate the recipe with
1991 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1992 * @cd: pointer to command details structure or NULL
1993 * Associate profile ID with given recipe (0x0293)
1996 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1997 struct ice_sq_cd *cd)
1999 struct ice_aqc_recipe_to_profile *cmd;
2000 struct ice_aq_desc desc;
2001 enum ice_status status;
2003 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2004 cmd = &desc.params.recipe_to_profile;
2005 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2006 cmd->profile_id = CPU_TO_LE16(profile_id);
2008 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2010 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2011 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2017 * ice_alloc_recipe - add recipe resource
2018 * @hw: pointer to the hardware structure
2019 * @rid: recipe ID returned as response to AQ call
2021 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2023 struct ice_aqc_alloc_free_res_elem *sw_buf;
2024 enum ice_status status;
2027 buf_len = sizeof(*sw_buf);
2028 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2030 return ICE_ERR_NO_MEMORY;
2032 sw_buf->num_elems = CPU_TO_LE16(1);
2033 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2034 ICE_AQC_RES_TYPE_S) |
2035 ICE_AQC_RES_TYPE_FLAG_SHARED);
2036 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2037 ice_aqc_opc_alloc_res, NULL);
2039 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2040 ice_free(hw, sw_buf);
2045 /* ice_init_port_info - Initialize port_info with switch configuration data
2046 * @pi: pointer to port_info
2047 * @vsi_port_num: VSI number or port number
2048 * @type: Type of switch element (port or VSI)
2049 * @swid: switch ID of the switch the element is attached to
2050 * @pf_vf_num: PF or VF number
2051 * @is_vf: true if the element is a VF, false otherwise
2054 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2055 u16 swid, u16 pf_vf_num, bool is_vf)
2058 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2059 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2061 pi->pf_vf_num = pf_vf_num;
2063 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2064 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2067 ice_debug(pi->hw, ICE_DBG_SW,
2068 "incorrect VSI/port type received\n");
2073 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2074 * @hw: pointer to the hardware structure
2076 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2078 struct ice_aqc_get_sw_cfg_resp *rbuf;
2079 enum ice_status status;
2086 num_total_ports = 1;
2088 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2089 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2092 return ICE_ERR_NO_MEMORY;
2094 /* Multiple calls to ice_aq_get_sw_cfg may be required
2095 * to get all the switch configuration information. The need
2096 * for additional calls is indicated by ice_aq_get_sw_cfg
2097 * writing a non-zero value in req_desc
2100 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2101 &req_desc, &num_elems, NULL);
2106 for (i = 0; i < num_elems; i++) {
2107 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2108 u16 pf_vf_num, swid, vsi_port_num;
2112 ele = rbuf[i].elements;
2113 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2114 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2116 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2117 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2119 swid = LE16_TO_CPU(ele->swid);
2121 if (LE16_TO_CPU(ele->pf_vf_num) &
2122 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2125 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2126 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2129 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2130 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2131 if (j == num_total_ports) {
2132 ice_debug(hw, ICE_DBG_SW,
2133 "more ports than expected\n");
2134 status = ICE_ERR_CFG;
2137 ice_init_port_info(hw->port_info,
2138 vsi_port_num, res_type, swid,
2146 } while (req_desc && !status);
2149 ice_free(hw, (void *)rbuf);
2154 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2155 * @hw: pointer to the hardware structure
2156 * @fi: filter info structure to fill/update
2158 * This helper function populates the lb_en and lan_en elements of the provided
2159 * ice_fltr_info struct using the switch's type and characteristics of the
2160 * switch rule being configured.
2162 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2166 if ((fi->flag & ICE_FLTR_TX) &&
2167 (fi->fltr_act == ICE_FWD_TO_VSI ||
2168 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2169 fi->fltr_act == ICE_FWD_TO_Q ||
2170 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2171 /* Setting LB for prune actions will result in replicated
2172 * packets to the internal switch that will be dropped.
2174 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2177 /* Set lan_en to TRUE if
2178 * 1. The switch is a VEB AND
2180 * 2.1 The lookup is a directional lookup like ethertype,
2181 * promiscuous, ethertype-MAC, promiscuous-VLAN
2182 * and default-port OR
2183 * 2.2 The lookup is VLAN, OR
2184 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2185 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2189 * The switch is a VEPA.
2191 * In all other cases, the LAN enable has to be set to false.
2194 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2195 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2196 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2197 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2198 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2199 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2200 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2201 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2202 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2203 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2212 * ice_fill_sw_rule - Helper function to fill switch rule structure
2213 * @hw: pointer to the hardware structure
2214 * @f_info: entry containing packet forwarding information
2215 * @s_rule: switch rule structure to be filled in based on mac_entry
2216 * @opc: switch rules population command type - pass in the command opcode
2219 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2220 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2222 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2230 if (opc == ice_aqc_opc_remove_sw_rules) {
2231 s_rule->pdata.lkup_tx_rx.act = 0;
2232 s_rule->pdata.lkup_tx_rx.index =
2233 CPU_TO_LE16(f_info->fltr_rule_id);
2234 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2238 eth_hdr_sz = sizeof(dummy_eth_header);
2239 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2241 /* initialize the ether header with a dummy header */
2242 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2243 ice_fill_sw_info(hw, f_info);
2245 switch (f_info->fltr_act) {
2246 case ICE_FWD_TO_VSI:
2247 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2248 ICE_SINGLE_ACT_VSI_ID_M;
2249 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2250 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2251 ICE_SINGLE_ACT_VALID_BIT;
2253 case ICE_FWD_TO_VSI_LIST:
2254 act |= ICE_SINGLE_ACT_VSI_LIST;
2255 act |= (f_info->fwd_id.vsi_list_id <<
2256 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2257 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2258 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2259 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2260 ICE_SINGLE_ACT_VALID_BIT;
2263 act |= ICE_SINGLE_ACT_TO_Q;
2264 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2265 ICE_SINGLE_ACT_Q_INDEX_M;
2267 case ICE_DROP_PACKET:
2268 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2269 ICE_SINGLE_ACT_VALID_BIT;
2271 case ICE_FWD_TO_QGRP:
2272 q_rgn = f_info->qgrp_size > 0 ?
2273 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2274 act |= ICE_SINGLE_ACT_TO_Q;
2275 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2276 ICE_SINGLE_ACT_Q_INDEX_M;
2277 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2278 ICE_SINGLE_ACT_Q_REGION_M;
2285 act |= ICE_SINGLE_ACT_LB_ENABLE;
2287 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2289 switch (f_info->lkup_type) {
2290 case ICE_SW_LKUP_MAC:
2291 daddr = f_info->l_data.mac.mac_addr;
2293 case ICE_SW_LKUP_VLAN:
2294 vlan_id = f_info->l_data.vlan.vlan_id;
2295 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2296 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2297 act |= ICE_SINGLE_ACT_PRUNE;
2298 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2301 case ICE_SW_LKUP_ETHERTYPE_MAC:
2302 daddr = f_info->l_data.ethertype_mac.mac_addr;
2304 case ICE_SW_LKUP_ETHERTYPE:
2305 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2306 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2308 case ICE_SW_LKUP_MAC_VLAN:
2309 daddr = f_info->l_data.mac_vlan.mac_addr;
2310 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2312 case ICE_SW_LKUP_PROMISC_VLAN:
2313 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2315 case ICE_SW_LKUP_PROMISC:
2316 daddr = f_info->l_data.mac_vlan.mac_addr;
2322 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2323 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2324 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2326 /* Recipe set depending on lookup type */
2327 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2328 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2329 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2332 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2333 ICE_NONDMA_TO_NONDMA);
2335 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2336 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2337 *off = CPU_TO_BE16(vlan_id);
2340 /* Create the switch rule with the final dummy Ethernet header */
2341 if (opc != ice_aqc_opc_update_sw_rules)
2342 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2346 * ice_add_marker_act
2347 * @hw: pointer to the hardware structure
2348 * @m_ent: the management entry for which sw marker needs to be added
2349 * @sw_marker: sw marker to tag the Rx descriptor with
2350 * @l_id: large action resource ID
2352 * Create a large action to hold software marker and update the switch rule
2353 * entry pointed by m_ent with newly created large action
2355 static enum ice_status
2356 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2357 u16 sw_marker, u16 l_id)
2359 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2360 /* For software marker we need 3 large actions
2361 * 1. FWD action: FWD TO VSI or VSI LIST
2362 * 2. GENERIC VALUE action to hold the profile ID
2363 * 3. GENERIC VALUE action to hold the software marker ID
2365 const u16 num_lg_acts = 3;
2366 enum ice_status status;
2372 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2373 return ICE_ERR_PARAM;
2375 /* Create two back-to-back switch rules and submit them to the HW using
2376 * one memory buffer:
2380 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2381 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2382 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2384 return ICE_ERR_NO_MEMORY;
2386 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2388 /* Fill in the first switch rule i.e. large action */
2389 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2390 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2391 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2393 /* First action VSI forwarding or VSI list forwarding depending on how
2396 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2397 m_ent->fltr_info.fwd_id.hw_vsi_id;
2399 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2400 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2401 ICE_LG_ACT_VSI_LIST_ID_M;
2402 if (m_ent->vsi_count > 1)
2403 act |= ICE_LG_ACT_VSI_LIST;
2404 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2406 /* Second action descriptor type */
2407 act = ICE_LG_ACT_GENERIC;
2409 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2410 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2412 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2413 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2415 /* Third action Marker value */
2416 act |= ICE_LG_ACT_GENERIC;
2417 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2418 ICE_LG_ACT_GENERIC_VALUE_M;
2420 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2422 /* call the fill switch rule to fill the lookup Tx Rx structure */
2423 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2424 ice_aqc_opc_update_sw_rules);
2426 /* Update the action to point to the large action ID */
2427 rx_tx->pdata.lkup_tx_rx.act =
2428 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2429 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2430 ICE_SINGLE_ACT_PTR_VAL_M));
2432 /* Use the filter rule ID of the previously created rule with single
2433 * act. Once the update happens, hardware will treat this as large
2436 rx_tx->pdata.lkup_tx_rx.index =
2437 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2439 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2440 ice_aqc_opc_update_sw_rules, NULL);
2442 m_ent->lg_act_idx = l_id;
2443 m_ent->sw_marker_id = sw_marker;
2446 ice_free(hw, lg_act);
2451 * ice_add_counter_act - add/update filter rule with counter action
2452 * @hw: pointer to the hardware structure
2453 * @m_ent: the management entry for which counter needs to be added
2454 * @counter_id: VLAN counter ID returned as part of allocate resource
2455 * @l_id: large action resource ID
2457 static enum ice_status
2458 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2459 u16 counter_id, u16 l_id)
2461 struct ice_aqc_sw_rules_elem *lg_act;
2462 struct ice_aqc_sw_rules_elem *rx_tx;
2463 enum ice_status status;
2464 /* 2 actions will be added while adding a large action counter */
2465 const int num_acts = 2;
2472 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2473 return ICE_ERR_PARAM;
2475 /* Create two back-to-back switch rules and submit them to the HW using
2476 * one memory buffer:
2480 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2481 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2482 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2485 return ICE_ERR_NO_MEMORY;
2487 rx_tx = (struct ice_aqc_sw_rules_elem *)
2488 ((u8 *)lg_act + lg_act_size);
2490 /* Fill in the first switch rule i.e. large action */
2491 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2492 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2493 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2495 /* First action VSI forwarding or VSI list forwarding depending on how
2498 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2499 m_ent->fltr_info.fwd_id.hw_vsi_id;
2501 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2502 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2503 ICE_LG_ACT_VSI_LIST_ID_M;
2504 if (m_ent->vsi_count > 1)
2505 act |= ICE_LG_ACT_VSI_LIST;
2506 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2508 /* Second action counter ID */
2509 act = ICE_LG_ACT_STAT_COUNT;
2510 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2511 ICE_LG_ACT_STAT_COUNT_M;
2512 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2514 /* call the fill switch rule to fill the lookup Tx Rx structure */
2515 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2516 ice_aqc_opc_update_sw_rules);
2518 act = ICE_SINGLE_ACT_PTR;
2519 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2520 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2522 /* Use the filter rule ID of the previously created rule with single
2523 * act. Once the update happens, hardware will treat this as large
2526 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2527 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2529 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2530 ice_aqc_opc_update_sw_rules, NULL);
2532 m_ent->lg_act_idx = l_id;
2533 m_ent->counter_index = counter_id;
2536 ice_free(hw, lg_act);
2541 * ice_create_vsi_list_map
2542 * @hw: pointer to the hardware structure
2543 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2544 * @num_vsi: number of VSI handles in the array
2545 * @vsi_list_id: VSI list ID generated as part of allocate resource
2547 * Helper function to create a new entry of VSI list ID to VSI mapping
2548 * using the given VSI list ID
2550 static struct ice_vsi_list_map_info *
2551 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2554 struct ice_switch_info *sw = hw->switch_info;
2555 struct ice_vsi_list_map_info *v_map;
2558 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2563 v_map->vsi_list_id = vsi_list_id;
2565 for (i = 0; i < num_vsi; i++)
2566 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2568 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2573 * ice_update_vsi_list_rule
2574 * @hw: pointer to the hardware structure
2575 * @vsi_handle_arr: array of VSI handles to form a VSI list
2576 * @num_vsi: number of VSI handles in the array
2577 * @vsi_list_id: VSI list ID generated as part of allocate resource
2578 * @remove: Boolean value to indicate if this is a remove action
2579 * @opc: switch rules population command type - pass in the command opcode
2580 * @lkup_type: lookup type of the filter
2582 * Call AQ command to add a new switch rule or update existing switch rule
2583 * using the given VSI list ID
2585 static enum ice_status
2586 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2587 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2588 enum ice_sw_lkup_type lkup_type)
2590 struct ice_aqc_sw_rules_elem *s_rule;
2591 enum ice_status status;
2597 return ICE_ERR_PARAM;
2599 if (lkup_type == ICE_SW_LKUP_MAC ||
2600 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2601 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2602 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2603 lkup_type == ICE_SW_LKUP_PROMISC ||
2604 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2605 lkup_type == ICE_SW_LKUP_LAST)
2606 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2607 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2608 else if (lkup_type == ICE_SW_LKUP_VLAN)
2609 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2610 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2612 return ICE_ERR_PARAM;
2614 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2615 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2617 return ICE_ERR_NO_MEMORY;
2618 for (i = 0; i < num_vsi; i++) {
2619 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2620 status = ICE_ERR_PARAM;
2623 /* AQ call requires hw_vsi_id(s) */
2624 s_rule->pdata.vsi_list.vsi[i] =
2625 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2628 s_rule->type = CPU_TO_LE16(rule_type);
2629 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2630 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2632 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2635 ice_free(hw, s_rule);
2640 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2641 * @hw: pointer to the HW struct
2642 * @vsi_handle_arr: array of VSI handles to form a VSI list
2643 * @num_vsi: number of VSI handles in the array
2644 * @vsi_list_id: stores the ID of the VSI list to be created
2645 * @lkup_type: switch rule filter's lookup type
2647 static enum ice_status
2648 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2649 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2651 enum ice_status status;
2653 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2654 ice_aqc_opc_alloc_res);
2658 /* Update the newly created VSI list to include the specified VSIs */
2659 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2660 *vsi_list_id, false,
2661 ice_aqc_opc_add_sw_rules, lkup_type);
2665 * ice_create_pkt_fwd_rule
2666 * @hw: pointer to the hardware structure
2667 * @recp_list: corresponding filter management list
2668 * @f_entry: entry containing packet forwarding information
2670 * Create switch rule with given filter information and add an entry
2671 * to the corresponding filter management list to track this switch rule
2674 static enum ice_status
2675 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2676 struct ice_fltr_list_entry *f_entry)
2678 struct ice_fltr_mgmt_list_entry *fm_entry;
2679 struct ice_aqc_sw_rules_elem *s_rule;
2680 enum ice_status status;
2682 s_rule = (struct ice_aqc_sw_rules_elem *)
2683 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2685 return ICE_ERR_NO_MEMORY;
2686 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2687 ice_malloc(hw, sizeof(*fm_entry));
2689 status = ICE_ERR_NO_MEMORY;
2690 goto ice_create_pkt_fwd_rule_exit;
2693 fm_entry->fltr_info = f_entry->fltr_info;
2695 /* Initialize all the fields for the management entry */
2696 fm_entry->vsi_count = 1;
2697 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2698 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2699 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2701 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2702 ice_aqc_opc_add_sw_rules);
2704 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2705 ice_aqc_opc_add_sw_rules, NULL);
2707 ice_free(hw, fm_entry);
2708 goto ice_create_pkt_fwd_rule_exit;
2711 f_entry->fltr_info.fltr_rule_id =
2712 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2713 fm_entry->fltr_info.fltr_rule_id =
2714 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2716 /* The book keeping entries will get removed when base driver
2717 * calls remove filter AQ command
2719 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2721 ice_create_pkt_fwd_rule_exit:
2722 ice_free(hw, s_rule);
2727 * ice_update_pkt_fwd_rule
2728 * @hw: pointer to the hardware structure
2729 * @f_info: filter information for switch rule
2731 * Call AQ command to update a previously created switch rule with a
2734 static enum ice_status
2735 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2737 struct ice_aqc_sw_rules_elem *s_rule;
2738 enum ice_status status;
2740 s_rule = (struct ice_aqc_sw_rules_elem *)
2741 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2743 return ICE_ERR_NO_MEMORY;
2745 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2747 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2749 /* Update switch rule with new rule set to forward VSI list */
2750 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2751 ice_aqc_opc_update_sw_rules, NULL);
2753 ice_free(hw, s_rule);
2758 * ice_update_sw_rule_bridge_mode
2759 * @hw: pointer to the HW struct
2761 * Updates unicast switch filter rules based on VEB/VEPA mode
2763 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2765 struct ice_switch_info *sw = hw->switch_info;
2766 struct ice_fltr_mgmt_list_entry *fm_entry;
2767 enum ice_status status = ICE_SUCCESS;
2768 struct LIST_HEAD_TYPE *rule_head;
2769 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2771 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2772 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2774 ice_acquire_lock(rule_lock);
2775 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2777 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2778 u8 *addr = fi->l_data.mac.mac_addr;
2780 /* Update unicast Tx rules to reflect the selected
2783 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2784 (fi->fltr_act == ICE_FWD_TO_VSI ||
2785 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2786 fi->fltr_act == ICE_FWD_TO_Q ||
2787 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2788 status = ice_update_pkt_fwd_rule(hw, fi);
2794 ice_release_lock(rule_lock);
2800 * ice_add_update_vsi_list
2801 * @hw: pointer to the hardware structure
2802 * @m_entry: pointer to current filter management list entry
2803 * @cur_fltr: filter information from the book keeping entry
2804 * @new_fltr: filter information with the new VSI to be added
2806 * Call AQ command to add or update previously created VSI list with new VSI.
2808 * Helper function to do book keeping associated with adding filter information
2809 * The algorithm to do the book keeping is described below :
2810 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2811 * if only one VSI has been added till now
2812 * Allocate a new VSI list and add two VSIs
2813 * to this list using switch rule command
2814 * Update the previously created switch rule with the
2815 * newly created VSI list ID
2816 * if a VSI list was previously created
2817 * Add the new VSI to the previously created VSI list set
2818 * using the update switch rule command
2820 static enum ice_status
2821 ice_add_update_vsi_list(struct ice_hw *hw,
2822 struct ice_fltr_mgmt_list_entry *m_entry,
2823 struct ice_fltr_info *cur_fltr,
2824 struct ice_fltr_info *new_fltr)
2826 enum ice_status status = ICE_SUCCESS;
2827 u16 vsi_list_id = 0;
2829 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2830 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2831 return ICE_ERR_NOT_IMPL;
2833 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2834 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2835 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2836 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2837 return ICE_ERR_NOT_IMPL;
2839 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2840 /* Only one entry existed in the mapping and it was not already
2841 * a part of a VSI list. So, create a VSI list with the old and
2844 struct ice_fltr_info tmp_fltr;
2845 u16 vsi_handle_arr[2];
2847 /* A rule already exists with the new VSI being added */
2848 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2849 return ICE_ERR_ALREADY_EXISTS;
2851 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2852 vsi_handle_arr[1] = new_fltr->vsi_handle;
2853 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2855 new_fltr->lkup_type);
2859 tmp_fltr = *new_fltr;
2860 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2861 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2862 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2863 /* Update the previous switch rule of "MAC forward to VSI" to
2864 * "MAC fwd to VSI list"
2866 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2870 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2871 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2872 m_entry->vsi_list_info =
2873 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2876 /* If this entry was large action then the large action needs
2877 * to be updated to point to FWD to VSI list
2879 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2881 ice_add_marker_act(hw, m_entry,
2882 m_entry->sw_marker_id,
2883 m_entry->lg_act_idx);
2885 u16 vsi_handle = new_fltr->vsi_handle;
2886 enum ice_adminq_opc opcode;
2888 if (!m_entry->vsi_list_info)
2891 /* A rule already exists with the new VSI being added */
2892 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2895 /* Update the previously created VSI list set with
2896 * the new VSI ID passed in
2898 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2899 opcode = ice_aqc_opc_update_sw_rules;
2901 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2902 vsi_list_id, false, opcode,
2903 new_fltr->lkup_type);
2904 /* update VSI list mapping info with new VSI ID */
2906 ice_set_bit(vsi_handle,
2907 m_entry->vsi_list_info->vsi_map);
2910 m_entry->vsi_count++;
2915 * ice_find_rule_entry - Search a rule entry
2916 * @list_head: head of rule list
2917 * @f_info: rule information
2919 * Helper function to search for a given rule entry
2920 * Returns pointer to entry storing the rule if found
2922 static struct ice_fltr_mgmt_list_entry *
2923 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2924 struct ice_fltr_info *f_info)
2926 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2928 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2930 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2931 sizeof(f_info->l_data)) &&
2932 f_info->flag == list_itr->fltr_info.flag) {
2941 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2942 * @recp_list: VSI lists needs to be searched
2943 * @vsi_handle: VSI handle to be found in VSI list
2944 * @vsi_list_id: VSI list ID found containing vsi_handle
2946 * Helper function to search a VSI list with single entry containing given VSI
2947 * handle element. This can be extended further to search VSI list with more
2948 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2950 static struct ice_vsi_list_map_info *
2951 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2954 struct ice_vsi_list_map_info *map_info = NULL;
2955 struct LIST_HEAD_TYPE *list_head;
2957 list_head = &recp_list->filt_rules;
2958 if (recp_list->adv_rule) {
2959 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2961 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2962 ice_adv_fltr_mgmt_list_entry,
2964 if (list_itr->vsi_list_info) {
2965 map_info = list_itr->vsi_list_info;
2966 if (ice_is_bit_set(map_info->vsi_map,
2968 *vsi_list_id = map_info->vsi_list_id;
2974 struct ice_fltr_mgmt_list_entry *list_itr;
2976 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2977 ice_fltr_mgmt_list_entry,
2979 if (list_itr->vsi_count == 1 &&
2980 list_itr->vsi_list_info) {
2981 map_info = list_itr->vsi_list_info;
2982 if (ice_is_bit_set(map_info->vsi_map,
2984 *vsi_list_id = map_info->vsi_list_id;
2994 * ice_add_rule_internal - add rule for a given lookup type
2995 * @hw: pointer to the hardware structure
2996 * @recp_list: recipe list for which rule has to be added
2997 * @lport: logic port number on which function add rule
2998 * @f_entry: structure containing MAC forwarding information
3000 * Adds or updates the rule lists for a given recipe
3002 static enum ice_status
3003 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3004 u8 lport, struct ice_fltr_list_entry *f_entry)
3006 struct ice_fltr_info *new_fltr, *cur_fltr;
3007 struct ice_fltr_mgmt_list_entry *m_entry;
3008 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3009 enum ice_status status = ICE_SUCCESS;
3011 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3012 return ICE_ERR_PARAM;
3014 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3015 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3016 f_entry->fltr_info.fwd_id.hw_vsi_id =
3017 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3019 rule_lock = &recp_list->filt_rule_lock;
3021 ice_acquire_lock(rule_lock);
3022 new_fltr = &f_entry->fltr_info;
3023 if (new_fltr->flag & ICE_FLTR_RX)
3024 new_fltr->src = lport;
3025 else if (new_fltr->flag & ICE_FLTR_TX)
3027 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3029 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3031 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3032 goto exit_add_rule_internal;
3035 cur_fltr = &m_entry->fltr_info;
3036 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3038 exit_add_rule_internal:
3039 ice_release_lock(rule_lock);
3044 * ice_remove_vsi_list_rule
3045 * @hw: pointer to the hardware structure
3046 * @vsi_list_id: VSI list ID generated as part of allocate resource
3047 * @lkup_type: switch rule filter lookup type
3049 * The VSI list should be emptied before this function is called to remove the
3052 static enum ice_status
3053 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3054 enum ice_sw_lkup_type lkup_type)
3056 struct ice_aqc_sw_rules_elem *s_rule;
3057 enum ice_status status;
3060 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3061 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3063 return ICE_ERR_NO_MEMORY;
3065 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3066 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3068 /* Free the vsi_list resource that we allocated. It is assumed that the
3069 * list is empty at this point.
3071 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3072 ice_aqc_opc_free_res);
3074 ice_free(hw, s_rule);
3079 * ice_rem_update_vsi_list
3080 * @hw: pointer to the hardware structure
3081 * @vsi_handle: VSI handle of the VSI to remove
3082 * @fm_list: filter management entry for which the VSI list management needs to
3085 static enum ice_status
3086 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3087 struct ice_fltr_mgmt_list_entry *fm_list)
3089 enum ice_sw_lkup_type lkup_type;
3090 enum ice_status status = ICE_SUCCESS;
3093 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3094 fm_list->vsi_count == 0)
3095 return ICE_ERR_PARAM;
3097 /* A rule with the VSI being removed does not exist */
3098 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3099 return ICE_ERR_DOES_NOT_EXIST;
3101 lkup_type = fm_list->fltr_info.lkup_type;
3102 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3103 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3104 ice_aqc_opc_update_sw_rules,
3109 fm_list->vsi_count--;
3110 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3112 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3113 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3114 struct ice_vsi_list_map_info *vsi_list_info =
3115 fm_list->vsi_list_info;
3118 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3120 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3121 return ICE_ERR_OUT_OF_RANGE;
3123 /* Make sure VSI list is empty before removing it below */
3124 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3126 ice_aqc_opc_update_sw_rules,
3131 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3132 tmp_fltr_info.fwd_id.hw_vsi_id =
3133 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3134 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3135 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3137 ice_debug(hw, ICE_DBG_SW,
3138 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3139 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3143 fm_list->fltr_info = tmp_fltr_info;
3146 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3147 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3148 struct ice_vsi_list_map_info *vsi_list_info =
3149 fm_list->vsi_list_info;
3151 /* Remove the VSI list since it is no longer used */
3152 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3154 ice_debug(hw, ICE_DBG_SW,
3155 "Failed to remove VSI list %d, error %d\n",
3156 vsi_list_id, status);
3160 LIST_DEL(&vsi_list_info->list_entry);
3161 ice_free(hw, vsi_list_info);
3162 fm_list->vsi_list_info = NULL;
3169 * ice_remove_rule_internal - Remove a filter rule of a given type
3171 * @hw: pointer to the hardware structure
3172 * @recp_list: recipe list for which the rule needs to removed
3173 * @f_entry: rule entry containing filter information
3175 static enum ice_status
3176 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3177 struct ice_fltr_list_entry *f_entry)
3179 struct ice_fltr_mgmt_list_entry *list_elem;
3180 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3181 enum ice_status status = ICE_SUCCESS;
3182 bool remove_rule = false;
3185 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3186 return ICE_ERR_PARAM;
3187 f_entry->fltr_info.fwd_id.hw_vsi_id =
3188 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3190 rule_lock = &recp_list->filt_rule_lock;
3191 ice_acquire_lock(rule_lock);
3192 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3193 &f_entry->fltr_info);
3195 status = ICE_ERR_DOES_NOT_EXIST;
3199 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3201 } else if (!list_elem->vsi_list_info) {
3202 status = ICE_ERR_DOES_NOT_EXIST;
3204 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3205 /* a ref_cnt > 1 indicates that the vsi_list is being
3206 * shared by multiple rules. Decrement the ref_cnt and
3207 * remove this rule, but do not modify the list, as it
3208 * is in-use by other rules.
3210 list_elem->vsi_list_info->ref_cnt--;
3213 /* a ref_cnt of 1 indicates the vsi_list is only used
3214 * by one rule. However, the original removal request is only
3215 * for a single VSI. Update the vsi_list first, and only
3216 * remove the rule if there are no further VSIs in this list.
3218 vsi_handle = f_entry->fltr_info.vsi_handle;
3219 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3222 /* if VSI count goes to zero after updating the VSI list */
3223 if (list_elem->vsi_count == 0)
3228 /* Remove the lookup rule */
3229 struct ice_aqc_sw_rules_elem *s_rule;
3231 s_rule = (struct ice_aqc_sw_rules_elem *)
3232 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3234 status = ICE_ERR_NO_MEMORY;
3238 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3239 ice_aqc_opc_remove_sw_rules);
3241 status = ice_aq_sw_rules(hw, s_rule,
3242 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3243 ice_aqc_opc_remove_sw_rules, NULL);
3245 /* Remove a book keeping from the list */
3246 ice_free(hw, s_rule);
3251 LIST_DEL(&list_elem->list_entry);
3252 ice_free(hw, list_elem);
3255 ice_release_lock(rule_lock);
3260 * ice_aq_get_res_alloc - get allocated resources
3261 * @hw: pointer to the HW struct
3262 * @num_entries: pointer to u16 to store the number of resource entries returned
3263 * @buf: pointer to user-supplied buffer
3264 * @buf_size: size of buff
3265 * @cd: pointer to command details structure or NULL
3267 * The user-supplied buffer must be large enough to store the resource
3268 * information for all resource types. Each resource type is an
3269 * ice_aqc_get_res_resp_data_elem structure.
3272 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3273 u16 buf_size, struct ice_sq_cd *cd)
3275 struct ice_aqc_get_res_alloc *resp;
3276 enum ice_status status;
3277 struct ice_aq_desc desc;
3280 return ICE_ERR_BAD_PTR;
3282 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3283 return ICE_ERR_INVAL_SIZE;
3285 resp = &desc.params.get_res;
3287 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3288 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3290 if (!status && num_entries)
3291 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3297 * ice_aq_get_res_descs - get allocated resource descriptors
3298 * @hw: pointer to the hardware structure
3299 * @num_entries: number of resource entries in buffer
3300 * @buf: Indirect buffer to hold data parameters and response
3301 * @buf_size: size of buffer for indirect commands
3302 * @res_type: resource type
3303 * @res_shared: is resource shared
3304 * @desc_id: input - first desc ID to start; output - next desc ID
3305 * @cd: pointer to command details structure or NULL
3308 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3309 struct ice_aqc_get_allocd_res_desc_resp *buf,
3310 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3311 struct ice_sq_cd *cd)
3313 struct ice_aqc_get_allocd_res_desc *cmd;
3314 struct ice_aq_desc desc;
3315 enum ice_status status;
3317 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3319 cmd = &desc.params.get_res_desc;
3322 return ICE_ERR_PARAM;
3324 if (buf_size != (num_entries * sizeof(*buf)))
3325 return ICE_ERR_PARAM;
3327 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3329 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3330 ICE_AQC_RES_TYPE_M) | (res_shared ?
3331 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3332 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3334 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3336 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3342 * ice_add_mac_rule - Add a MAC address based filter rule
3343 * @hw: pointer to the hardware structure
3344 * @m_list: list of MAC addresses and forwarding information
3345 * @sw: pointer to switch info struct for which function add rule
3346 * @lport: logic port number on which function add rule
3348 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3349 * multiple unicast addresses, the function assumes that all the
3350 * addresses are unique in a given add_mac call. It doesn't
3351 * check for duplicates in this case, removing duplicates from a given
3352 * list should be taken care of in the caller of this function.
3354 static enum ice_status
3355 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3356 struct ice_switch_info *sw, u8 lport)
3358 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3359 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3360 struct ice_fltr_list_entry *m_list_itr;
3361 struct LIST_HEAD_TYPE *rule_head;
3362 u16 total_elem_left, s_rule_size;
3363 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3364 enum ice_status status = ICE_SUCCESS;
3365 u16 num_unicast = 0;
3369 rule_lock = &recp_list->filt_rule_lock;
3370 rule_head = &recp_list->filt_rules;
3372 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3374 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3378 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3379 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3380 if (!ice_is_vsi_valid(hw, vsi_handle))
3381 return ICE_ERR_PARAM;
3382 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3383 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3384 /* update the src in case it is VSI num */
3385 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3386 return ICE_ERR_PARAM;
3387 m_list_itr->fltr_info.src = hw_vsi_id;
3388 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3389 IS_ZERO_ETHER_ADDR(add))
3390 return ICE_ERR_PARAM;
3391 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3392 /* Don't overwrite the unicast address */
3393 ice_acquire_lock(rule_lock);
3394 if (ice_find_rule_entry(rule_head,
3395 &m_list_itr->fltr_info)) {
3396 ice_release_lock(rule_lock);
3397 return ICE_ERR_ALREADY_EXISTS;
3399 ice_release_lock(rule_lock);
3401 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3402 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3403 m_list_itr->status =
3404 ice_add_rule_internal(hw, recp_list, lport,
3406 if (m_list_itr->status)
3407 return m_list_itr->status;
3411 ice_acquire_lock(rule_lock);
3412 /* Exit if no suitable entries were found for adding bulk switch rule */
3414 status = ICE_SUCCESS;
3415 goto ice_add_mac_exit;
3418 /* Allocate switch rule buffer for the bulk update for unicast */
3419 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3420 s_rule = (struct ice_aqc_sw_rules_elem *)
3421 ice_calloc(hw, num_unicast, s_rule_size);
3423 status = ICE_ERR_NO_MEMORY;
3424 goto ice_add_mac_exit;
3428 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3430 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3431 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3433 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3434 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3435 ice_aqc_opc_add_sw_rules);
3436 r_iter = (struct ice_aqc_sw_rules_elem *)
3437 ((u8 *)r_iter + s_rule_size);
3441 /* Call AQ bulk switch rule update for all unicast addresses */
3443 /* Call AQ switch rule in AQ_MAX chunk */
3444 for (total_elem_left = num_unicast; total_elem_left > 0;
3445 total_elem_left -= elem_sent) {
3446 struct ice_aqc_sw_rules_elem *entry = r_iter;
3448 elem_sent = MIN_T(u8, total_elem_left,
3449 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3450 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3451 elem_sent, ice_aqc_opc_add_sw_rules,
3454 goto ice_add_mac_exit;
3455 r_iter = (struct ice_aqc_sw_rules_elem *)
3456 ((u8 *)r_iter + (elem_sent * s_rule_size));
3459 /* Fill up rule ID based on the value returned from FW */
3461 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3463 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3464 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3465 struct ice_fltr_mgmt_list_entry *fm_entry;
3467 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3468 f_info->fltr_rule_id =
3469 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3470 f_info->fltr_act = ICE_FWD_TO_VSI;
3471 /* Create an entry to track this MAC address */
3472 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3473 ice_malloc(hw, sizeof(*fm_entry));
3475 status = ICE_ERR_NO_MEMORY;
3476 goto ice_add_mac_exit;
3478 fm_entry->fltr_info = *f_info;
3479 fm_entry->vsi_count = 1;
3480 /* The book keeping entries will get removed when
3481 * base driver calls remove filter AQ command
3484 LIST_ADD(&fm_entry->list_entry, rule_head);
3485 r_iter = (struct ice_aqc_sw_rules_elem *)
3486 ((u8 *)r_iter + s_rule_size);
3491 ice_release_lock(rule_lock);
3493 ice_free(hw, s_rule);
3498 * ice_add_mac - Add a MAC address based filter rule
3499 * @hw: pointer to the hardware structure
3500 * @m_list: list of MAC addresses and forwarding information
3502 * Function add MAC rule for logical port from HW struct
3505 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3508 return ICE_ERR_PARAM;
3510 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3511 hw->port_info->lport);
3515 * ice_add_vlan_internal - Add one VLAN based filter rule
3516 * @hw: pointer to the hardware structure
3517 * @recp_list: recipe list for which rule has to be added
3518 * @f_entry: filter entry containing one VLAN information
3520 static enum ice_status
3521 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3522 struct ice_fltr_list_entry *f_entry)
3524 struct ice_fltr_mgmt_list_entry *v_list_itr;
3525 struct ice_fltr_info *new_fltr, *cur_fltr;
3526 enum ice_sw_lkup_type lkup_type;
3527 u16 vsi_list_id = 0, vsi_handle;
3528 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3529 enum ice_status status = ICE_SUCCESS;
3531 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3532 return ICE_ERR_PARAM;
3534 f_entry->fltr_info.fwd_id.hw_vsi_id =
3535 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3536 new_fltr = &f_entry->fltr_info;
3538 /* VLAN ID should only be 12 bits */
3539 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3540 return ICE_ERR_PARAM;
3542 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3543 return ICE_ERR_PARAM;
3545 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3546 lkup_type = new_fltr->lkup_type;
3547 vsi_handle = new_fltr->vsi_handle;
3548 rule_lock = &recp_list->filt_rule_lock;
3549 ice_acquire_lock(rule_lock);
3550 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3552 struct ice_vsi_list_map_info *map_info = NULL;
3554 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3555 /* All VLAN pruning rules use a VSI list. Check if
3556 * there is already a VSI list containing VSI that we
3557 * want to add. If found, use the same vsi_list_id for
3558 * this new VLAN rule or else create a new list.
3560 map_info = ice_find_vsi_list_entry(recp_list,
3564 status = ice_create_vsi_list_rule(hw,
3572 /* Convert the action to forwarding to a VSI list. */
3573 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3574 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3577 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3579 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3582 status = ICE_ERR_DOES_NOT_EXIST;
3585 /* reuse VSI list for new rule and increment ref_cnt */
3587 v_list_itr->vsi_list_info = map_info;
3588 map_info->ref_cnt++;
3590 v_list_itr->vsi_list_info =
3591 ice_create_vsi_list_map(hw, &vsi_handle,
3595 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3596 /* Update existing VSI list to add new VSI ID only if it used
3599 cur_fltr = &v_list_itr->fltr_info;
3600 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3603 /* If VLAN rule exists and VSI list being used by this rule is
3604 * referenced by more than 1 VLAN rule. Then create a new VSI
3605 * list appending previous VSI with new VSI and update existing
3606 * VLAN rule to point to new VSI list ID
3608 struct ice_fltr_info tmp_fltr;
3609 u16 vsi_handle_arr[2];
3612 /* Current implementation only supports reusing VSI list with
3613 * one VSI count. We should never hit below condition
3615 if (v_list_itr->vsi_count > 1 &&
3616 v_list_itr->vsi_list_info->ref_cnt > 1) {
3617 ice_debug(hw, ICE_DBG_SW,
3618 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3619 status = ICE_ERR_CFG;
3624 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3627 /* A rule already exists with the new VSI being added */
3628 if (cur_handle == vsi_handle) {
3629 status = ICE_ERR_ALREADY_EXISTS;
3633 vsi_handle_arr[0] = cur_handle;
3634 vsi_handle_arr[1] = vsi_handle;
3635 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3636 &vsi_list_id, lkup_type);
3640 tmp_fltr = v_list_itr->fltr_info;
3641 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3642 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3643 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3644 /* Update the previous switch rule to a new VSI list which
3645 * includes current VSI that is requested
3647 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3651 /* before overriding VSI list map info. decrement ref_cnt of
3654 v_list_itr->vsi_list_info->ref_cnt--;
3656 /* now update to newly created list */
3657 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3658 v_list_itr->vsi_list_info =
3659 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3661 v_list_itr->vsi_count++;
3665 ice_release_lock(rule_lock);
3670 * ice_add_vlan_rule - Add VLAN based filter rule
3671 * @hw: pointer to the hardware structure
3672 * @v_list: list of VLAN entries and forwarding information
3673 * @sw: pointer to switch info struct for which function add rule
3675 static enum ice_status
3676 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3677 struct ice_switch_info *sw)
3679 struct ice_fltr_list_entry *v_list_itr;
3680 struct ice_sw_recipe *recp_list;
3682 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3683 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3685 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3686 return ICE_ERR_PARAM;
3687 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3688 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3690 if (v_list_itr->status)
3691 return v_list_itr->status;
3697 * ice_add_vlan - Add a VLAN based filter rule
3698 * @hw: pointer to the hardware structure
3699 * @v_list: list of VLAN and forwarding information
3701 * Function add VLAN rule for logical port from HW struct
3704 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3707 return ICE_ERR_PARAM;
3709 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3713 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3714 * @hw: pointer to the hardware structure
3715 * @mv_list: list of MAC and VLAN filters
3716 * @sw: pointer to switch info struct for which function add rule
3717 * @lport: logic port number on which function add rule
3719 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3720 * pruning bits enabled, then it is the responsibility of the caller to make
3721 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3722 * VLAN won't be received on that VSI otherwise.
3724 static enum ice_status
3725 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3726 struct ice_switch_info *sw, u8 lport)
3728 struct ice_fltr_list_entry *mv_list_itr;
3729 struct ice_sw_recipe *recp_list;
3731 if (!mv_list || !hw)
3732 return ICE_ERR_PARAM;
3734 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3735 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3737 enum ice_sw_lkup_type l_type =
3738 mv_list_itr->fltr_info.lkup_type;
3740 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3741 return ICE_ERR_PARAM;
3742 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3743 mv_list_itr->status =
3744 ice_add_rule_internal(hw, recp_list, lport,
3746 if (mv_list_itr->status)
3747 return mv_list_itr->status;
3753 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3754 * @hw: pointer to the hardware structure
3755 * @mv_list: list of MAC VLAN addresses and forwarding information
3757 * Function add MAC VLAN rule for logical port from HW struct
3760 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3762 if (!mv_list || !hw)
3763 return ICE_ERR_PARAM;
3765 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3766 hw->port_info->lport);
3770 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3771 * @hw: pointer to the hardware structure
3772 * @em_list: list of ether type MAC filter, MAC is optional
3773 * @sw: pointer to switch info struct for which function add rule
3774 * @lport: logic port number on which function add rule
3776 * This function requires the caller to populate the entries in
3777 * the filter list with the necessary fields (including flags to
3778 * indicate Tx or Rx rules).
3780 static enum ice_status
3781 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3782 struct ice_switch_info *sw, u8 lport)
3784 struct ice_fltr_list_entry *em_list_itr;
3786 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3788 struct ice_sw_recipe *recp_list;
3789 enum ice_sw_lkup_type l_type;
3791 l_type = em_list_itr->fltr_info.lkup_type;
3792 recp_list = &sw->recp_list[l_type];
3794 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3795 l_type != ICE_SW_LKUP_ETHERTYPE)
3796 return ICE_ERR_PARAM;
3798 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3801 if (em_list_itr->status)
3802 return em_list_itr->status;
3809 * ice_add_eth_mac - Add a ethertype based filter rule
3810 * @hw: pointer to the hardware structure
3811 * @em_list: list of ethertype and forwarding information
3813 * Function add ethertype rule for logical port from HW struct
3815 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3817 if (!em_list || !hw)
3818 return ICE_ERR_PARAM;
3820 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3821 hw->port_info->lport);
3825 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3826 * @hw: pointer to the hardware structure
3827 * @em_list: list of ethertype or ethertype MAC entries
3828 * @sw: pointer to switch info struct for which function add rule
3830 static enum ice_status
3831 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3832 struct ice_switch_info *sw)
3834 struct ice_fltr_list_entry *em_list_itr, *tmp;
3836 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3838 struct ice_sw_recipe *recp_list;
3839 enum ice_sw_lkup_type l_type;
3841 l_type = em_list_itr->fltr_info.lkup_type;
3843 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3844 l_type != ICE_SW_LKUP_ETHERTYPE)
3845 return ICE_ERR_PARAM;
3847 recp_list = &sw->recp_list[l_type];
3848 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3850 if (em_list_itr->status)
3851 return em_list_itr->status;
3857 * ice_remove_eth_mac - remove a ethertype based filter rule
3858 * @hw: pointer to the hardware structure
3859 * @em_list: list of ethertype and forwarding information
3863 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3865 if (!em_list || !hw)
3866 return ICE_ERR_PARAM;
3868 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3872 * ice_rem_sw_rule_info
3873 * @hw: pointer to the hardware structure
3874 * @rule_head: pointer to the switch list structure that we want to delete
3877 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3879 if (!LIST_EMPTY(rule_head)) {
3880 struct ice_fltr_mgmt_list_entry *entry;
3881 struct ice_fltr_mgmt_list_entry *tmp;
3883 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3884 ice_fltr_mgmt_list_entry, list_entry) {
3885 LIST_DEL(&entry->list_entry);
3886 ice_free(hw, entry);
3892 * ice_rem_adv_rule_info
3893 * @hw: pointer to the hardware structure
3894 * @rule_head: pointer to the switch list structure that we want to delete
3897 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3899 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3900 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3902 if (LIST_EMPTY(rule_head))
3905 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3906 ice_adv_fltr_mgmt_list_entry, list_entry) {
3907 LIST_DEL(&lst_itr->list_entry);
3908 ice_free(hw, lst_itr->lkups);
3909 ice_free(hw, lst_itr);
3914 * ice_rem_all_sw_rules_info
3915 * @hw: pointer to the hardware structure
3917 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3919 struct ice_switch_info *sw = hw->switch_info;
3922 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3923 struct LIST_HEAD_TYPE *rule_head;
3925 rule_head = &sw->recp_list[i].filt_rules;
3926 if (!sw->recp_list[i].adv_rule)
3927 ice_rem_sw_rule_info(hw, rule_head);
3929 ice_rem_adv_rule_info(hw, rule_head);
3934 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3935 * @pi: pointer to the port_info structure
3936 * @vsi_handle: VSI handle to set as default
3937 * @set: true to add the above mentioned switch rule, false to remove it
3938 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3940 * add filter rule to set/unset given VSI as default VSI for the switch
3941 * (represented by swid)
3944 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3947 struct ice_aqc_sw_rules_elem *s_rule;
3948 struct ice_fltr_info f_info;
3949 struct ice_hw *hw = pi->hw;
3950 enum ice_adminq_opc opcode;
3951 enum ice_status status;
3955 if (!ice_is_vsi_valid(hw, vsi_handle))
3956 return ICE_ERR_PARAM;
3957 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3959 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3960 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3961 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3963 return ICE_ERR_NO_MEMORY;
3965 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3967 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3968 f_info.flag = direction;
3969 f_info.fltr_act = ICE_FWD_TO_VSI;
3970 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3972 if (f_info.flag & ICE_FLTR_RX) {
3973 f_info.src = pi->lport;
3974 f_info.src_id = ICE_SRC_ID_LPORT;
3976 f_info.fltr_rule_id =
3977 pi->dflt_rx_vsi_rule_id;
3978 } else if (f_info.flag & ICE_FLTR_TX) {
3979 f_info.src_id = ICE_SRC_ID_VSI;
3980 f_info.src = hw_vsi_id;
3982 f_info.fltr_rule_id =
3983 pi->dflt_tx_vsi_rule_id;
3987 opcode = ice_aqc_opc_add_sw_rules;
3989 opcode = ice_aqc_opc_remove_sw_rules;
3991 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3993 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3994 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3997 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3999 if (f_info.flag & ICE_FLTR_TX) {
4000 pi->dflt_tx_vsi_num = hw_vsi_id;
4001 pi->dflt_tx_vsi_rule_id = index;
4002 } else if (f_info.flag & ICE_FLTR_RX) {
4003 pi->dflt_rx_vsi_num = hw_vsi_id;
4004 pi->dflt_rx_vsi_rule_id = index;
4007 if (f_info.flag & ICE_FLTR_TX) {
4008 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4009 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4010 } else if (f_info.flag & ICE_FLTR_RX) {
4011 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4012 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4017 ice_free(hw, s_rule);
4022 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4023 * @list_head: head of rule list
4024 * @f_info: rule information
4026 * Helper function to search for a unicast rule entry - this is to be used
4027 * to remove unicast MAC filter that is not shared with other VSIs on the
4030 * Returns pointer to entry storing the rule if found
4032 static struct ice_fltr_mgmt_list_entry *
4033 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4034 struct ice_fltr_info *f_info)
4036 struct ice_fltr_mgmt_list_entry *list_itr;
4038 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4040 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4041 sizeof(f_info->l_data)) &&
4042 f_info->fwd_id.hw_vsi_id ==
4043 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4044 f_info->flag == list_itr->fltr_info.flag)
4051 * ice_remove_mac_rule - remove a MAC based filter rule
4052 * @hw: pointer to the hardware structure
4053 * @m_list: list of MAC addresses and forwarding information
4054 * @recp_list: list from which function remove MAC address
4056 * This function removes either a MAC filter rule or a specific VSI from a
4057 * VSI list for a multicast MAC address.
4059 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4060 * ice_add_mac. Caller should be aware that this call will only work if all
4061 * the entries passed into m_list were added previously. It will not attempt to
4062 * do a partial remove of entries that were found.
4064 static enum ice_status
4065 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4066 struct ice_sw_recipe *recp_list)
4068 struct ice_fltr_list_entry *list_itr, *tmp;
4069 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4072 return ICE_ERR_PARAM;
4074 rule_lock = &recp_list->filt_rule_lock;
4075 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4077 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4078 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4081 if (l_type != ICE_SW_LKUP_MAC)
4082 return ICE_ERR_PARAM;
4084 vsi_handle = list_itr->fltr_info.vsi_handle;
4085 if (!ice_is_vsi_valid(hw, vsi_handle))
4086 return ICE_ERR_PARAM;
4088 list_itr->fltr_info.fwd_id.hw_vsi_id =
4089 ice_get_hw_vsi_num(hw, vsi_handle);
4090 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4091 /* Don't remove the unicast address that belongs to
4092 * another VSI on the switch, since it is not being
4095 ice_acquire_lock(rule_lock);
4096 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4097 &list_itr->fltr_info)) {
4098 ice_release_lock(rule_lock);
4099 return ICE_ERR_DOES_NOT_EXIST;
4101 ice_release_lock(rule_lock);
4103 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4105 if (list_itr->status)
4106 return list_itr->status;
4112 * ice_remove_mac - remove a MAC address based filter rule
4113 * @hw: pointer to the hardware structure
4114 * @m_list: list of MAC addresses and forwarding information
4118 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4120 struct ice_sw_recipe *recp_list;
4122 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4123 return ice_remove_mac_rule(hw, m_list, recp_list);
4127 * ice_remove_vlan_rule - Remove VLAN based filter rule
4128 * @hw: pointer to the hardware structure
4129 * @v_list: list of VLAN entries and forwarding information
4130 * @recp_list: list from which function remove VLAN
4132 static enum ice_status
4133 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4134 struct ice_sw_recipe *recp_list)
4136 struct ice_fltr_list_entry *v_list_itr, *tmp;
4138 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4140 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4142 if (l_type != ICE_SW_LKUP_VLAN)
4143 return ICE_ERR_PARAM;
4144 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4146 if (v_list_itr->status)
4147 return v_list_itr->status;
4153 * ice_remove_vlan - remove a VLAN address based filter rule
4154 * @hw: pointer to the hardware structure
4155 * @v_list: list of VLAN and forwarding information
4159 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4161 struct ice_sw_recipe *recp_list;
4164 return ICE_ERR_PARAM;
4166 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4167 return ice_remove_vlan_rule(hw, v_list, recp_list);
4171 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4172 * @hw: pointer to the hardware structure
4173 * @v_list: list of MAC VLAN entries and forwarding information
4174 * @recp_list: list from which function remove MAC VLAN
4176 static enum ice_status
4177 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4178 struct ice_sw_recipe *recp_list)
4180 struct ice_fltr_list_entry *v_list_itr, *tmp;
4182 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4183 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4185 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4187 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4188 return ICE_ERR_PARAM;
4189 v_list_itr->status =
4190 ice_remove_rule_internal(hw, recp_list,
4192 if (v_list_itr->status)
4193 return v_list_itr->status;
4199 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4200 * @hw: pointer to the hardware structure
4201 * @mv_list: list of MAC VLAN and forwarding information
4204 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4206 struct ice_sw_recipe *recp_list;
4208 if (!mv_list || !hw)
4209 return ICE_ERR_PARAM;
4211 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4212 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4216 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4217 * @fm_entry: filter entry to inspect
4218 * @vsi_handle: VSI handle to compare with filter info
4221 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4223 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4224 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4225 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4226 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4231 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4232 * @hw: pointer to the hardware structure
4233 * @vsi_handle: VSI handle to remove filters from
4234 * @vsi_list_head: pointer to the list to add entry to
4235 * @fi: pointer to fltr_info of filter entry to copy & add
4237 * Helper function, used when creating a list of filters to remove from
4238 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4239 * original filter entry, with the exception of fltr_info.fltr_act and
4240 * fltr_info.fwd_id fields. These are set such that later logic can
4241 * extract which VSI to remove the fltr from, and pass on that information.
4243 static enum ice_status
4244 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4245 struct LIST_HEAD_TYPE *vsi_list_head,
4246 struct ice_fltr_info *fi)
4248 struct ice_fltr_list_entry *tmp;
4250 /* this memory is freed up in the caller function
4251 * once filters for this VSI are removed
4253 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4255 return ICE_ERR_NO_MEMORY;
4257 tmp->fltr_info = *fi;
4259 /* Overwrite these fields to indicate which VSI to remove filter from,
4260 * so find and remove logic can extract the information from the
4261 * list entries. Note that original entries will still have proper
4264 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4265 tmp->fltr_info.vsi_handle = vsi_handle;
4266 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4268 LIST_ADD(&tmp->list_entry, vsi_list_head);
4274 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4275 * @hw: pointer to the hardware structure
4276 * @vsi_handle: VSI handle to remove filters from
4277 * @lkup_list_head: pointer to the list that has certain lookup type filters
4278 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4280 * Locates all filters in lkup_list_head that are used by the given VSI,
4281 * and adds COPIES of those entries to vsi_list_head (intended to be used
4282 * to remove the listed filters).
4283 * Note that this means all entries in vsi_list_head must be explicitly
4284 * deallocated by the caller when done with list.
4286 static enum ice_status
4287 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4288 struct LIST_HEAD_TYPE *lkup_list_head,
4289 struct LIST_HEAD_TYPE *vsi_list_head)
4291 struct ice_fltr_mgmt_list_entry *fm_entry;
4292 enum ice_status status = ICE_SUCCESS;
4294 /* check to make sure VSI ID is valid and within boundary */
4295 if (!ice_is_vsi_valid(hw, vsi_handle))
4296 return ICE_ERR_PARAM;
4298 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4299 ice_fltr_mgmt_list_entry, list_entry) {
4300 struct ice_fltr_info *fi;
4302 fi = &fm_entry->fltr_info;
4303 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4306 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4315 * ice_determine_promisc_mask
4316 * @fi: filter info to parse
4318 * Helper function to determine which ICE_PROMISC_ mask corresponds
4319 * to given filter into.
4321 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4323 u16 vid = fi->l_data.mac_vlan.vlan_id;
4324 u8 *macaddr = fi->l_data.mac.mac_addr;
4325 bool is_tx_fltr = false;
4326 u8 promisc_mask = 0;
4328 if (fi->flag == ICE_FLTR_TX)
4331 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4332 promisc_mask |= is_tx_fltr ?
4333 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4334 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4335 promisc_mask |= is_tx_fltr ?
4336 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4337 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4338 promisc_mask |= is_tx_fltr ?
4339 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4341 promisc_mask |= is_tx_fltr ?
4342 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4344 return promisc_mask;
4348 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4349 * @hw: pointer to the hardware structure
4350 * @vsi_handle: VSI handle to retrieve info from
4351 * @promisc_mask: pointer to mask to be filled in
4352 * @vid: VLAN ID of promisc VLAN VSI
4355 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4358 struct ice_switch_info *sw = hw->switch_info;
4359 struct ice_fltr_mgmt_list_entry *itr;
4360 struct LIST_HEAD_TYPE *rule_head;
4361 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4363 if (!ice_is_vsi_valid(hw, vsi_handle))
4364 return ICE_ERR_PARAM;
4368 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4369 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4371 ice_acquire_lock(rule_lock);
4372 LIST_FOR_EACH_ENTRY(itr, rule_head,
4373 ice_fltr_mgmt_list_entry, list_entry) {
4374 /* Continue if this filter doesn't apply to this VSI or the
4375 * VSI ID is not in the VSI map for this filter
4377 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4380 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4382 ice_release_lock(rule_lock);
4388 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4389 * @hw: pointer to the hardware structure
4390 * @vsi_handle: VSI handle to retrieve info from
4391 * @promisc_mask: pointer to mask to be filled in
4392 * @vid: VLAN ID of promisc VLAN VSI
4395 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4398 struct ice_switch_info *sw = hw->switch_info;
4399 struct ice_fltr_mgmt_list_entry *itr;
4400 struct LIST_HEAD_TYPE *rule_head;
4401 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4403 if (!ice_is_vsi_valid(hw, vsi_handle))
4404 return ICE_ERR_PARAM;
4408 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4409 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4411 ice_acquire_lock(rule_lock);
4412 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4414 /* Continue if this filter doesn't apply to this VSI or the
4415 * VSI ID is not in the VSI map for this filter
4417 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4420 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4422 ice_release_lock(rule_lock);
4428 * ice_remove_promisc - Remove promisc based filter rules
4429 * @hw: pointer to the hardware structure
4430 * @recp_id: recipe ID for which the rule needs to removed
4431 * @v_list: list of promisc entries
4433 static enum ice_status
4434 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4435 struct LIST_HEAD_TYPE *v_list)
4437 struct ice_fltr_list_entry *v_list_itr, *tmp;
4438 struct ice_sw_recipe *recp_list;
4440 recp_list = &hw->switch_info->recp_list[recp_id];
4441 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4443 v_list_itr->status =
4444 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4445 if (v_list_itr->status)
4446 return v_list_itr->status;
4452 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4453 * @hw: pointer to the hardware structure
4454 * @vsi_handle: VSI handle to clear mode
4455 * @promisc_mask: mask of promiscuous config bits to clear
4456 * @vid: VLAN ID to clear VLAN promiscuous
4459 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4462 struct ice_switch_info *sw = hw->switch_info;
4463 struct ice_fltr_list_entry *fm_entry, *tmp;
4464 struct LIST_HEAD_TYPE remove_list_head;
4465 struct ice_fltr_mgmt_list_entry *itr;
4466 struct LIST_HEAD_TYPE *rule_head;
4467 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4468 enum ice_status status = ICE_SUCCESS;
4471 if (!ice_is_vsi_valid(hw, vsi_handle))
4472 return ICE_ERR_PARAM;
4474 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4475 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4477 recipe_id = ICE_SW_LKUP_PROMISC;
4479 rule_head = &sw->recp_list[recipe_id].filt_rules;
4480 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4482 INIT_LIST_HEAD(&remove_list_head);
4484 ice_acquire_lock(rule_lock);
4485 LIST_FOR_EACH_ENTRY(itr, rule_head,
4486 ice_fltr_mgmt_list_entry, list_entry) {
4487 struct ice_fltr_info *fltr_info;
4488 u8 fltr_promisc_mask = 0;
4490 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4492 fltr_info = &itr->fltr_info;
4494 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4495 vid != fltr_info->l_data.mac_vlan.vlan_id)
4498 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4500 /* Skip if filter is not completely specified by given mask */
4501 if (fltr_promisc_mask & ~promisc_mask)
4504 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4508 ice_release_lock(rule_lock);
4509 goto free_fltr_list;
4512 ice_release_lock(rule_lock);
4514 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4517 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4518 ice_fltr_list_entry, list_entry) {
4519 LIST_DEL(&fm_entry->list_entry);
4520 ice_free(hw, fm_entry);
4527 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4528 * @hw: pointer to the hardware structure
4529 * @vsi_handle: VSI handle to configure
4530 * @promisc_mask: mask of promiscuous config bits
4531 * @vid: VLAN ID to set VLAN promiscuous
4534 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4536 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4537 struct ice_fltr_list_entry f_list_entry;
4538 struct ice_fltr_info new_fltr;
4539 enum ice_status status = ICE_SUCCESS;
4545 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4547 if (!ice_is_vsi_valid(hw, vsi_handle))
4548 return ICE_ERR_PARAM;
4549 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4551 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4553 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4554 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4555 new_fltr.l_data.mac_vlan.vlan_id = vid;
4556 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4558 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4559 recipe_id = ICE_SW_LKUP_PROMISC;
4562 /* Separate filters must be set for each direction/packet type
4563 * combination, so we will loop over the mask value, store the
4564 * individual type, and clear it out in the input mask as it
4567 while (promisc_mask) {
4568 struct ice_sw_recipe *recp_list;
4574 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4575 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4576 pkt_type = UCAST_FLTR;
4577 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4578 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4579 pkt_type = UCAST_FLTR;
4581 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4582 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4583 pkt_type = MCAST_FLTR;
4584 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4585 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4586 pkt_type = MCAST_FLTR;
4588 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4589 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4590 pkt_type = BCAST_FLTR;
4591 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4592 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4593 pkt_type = BCAST_FLTR;
4597 /* Check for VLAN promiscuous flag */
4598 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4599 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4600 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4601 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4605 /* Set filter DA based on packet type */
4606 mac_addr = new_fltr.l_data.mac.mac_addr;
4607 if (pkt_type == BCAST_FLTR) {
4608 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4609 } else if (pkt_type == MCAST_FLTR ||
4610 pkt_type == UCAST_FLTR) {
4611 /* Use the dummy ether header DA */
4612 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4613 ICE_NONDMA_TO_NONDMA);
4614 if (pkt_type == MCAST_FLTR)
4615 mac_addr[0] |= 0x1; /* Set multicast bit */
4618 /* Need to reset this to zero for all iterations */
4621 new_fltr.flag |= ICE_FLTR_TX;
4622 new_fltr.src = hw_vsi_id;
4624 new_fltr.flag |= ICE_FLTR_RX;
4625 new_fltr.src = hw->port_info->lport;
4628 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4629 new_fltr.vsi_handle = vsi_handle;
4630 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4631 f_list_entry.fltr_info = new_fltr;
4632 recp_list = &hw->switch_info->recp_list[recipe_id];
4634 status = ice_add_rule_internal(hw, recp_list,
4635 hw->port_info->lport,
4637 if (status != ICE_SUCCESS)
4638 goto set_promisc_exit;
4646 * ice_set_vlan_vsi_promisc
4647 * @hw: pointer to the hardware structure
4648 * @vsi_handle: VSI handle to configure
4649 * @promisc_mask: mask of promiscuous config bits
4650 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4652 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4655 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4656 bool rm_vlan_promisc)
4658 struct ice_switch_info *sw = hw->switch_info;
4659 struct ice_fltr_list_entry *list_itr, *tmp;
4660 struct LIST_HEAD_TYPE vsi_list_head;
4661 struct LIST_HEAD_TYPE *vlan_head;
4662 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4663 enum ice_status status;
4666 INIT_LIST_HEAD(&vsi_list_head);
4667 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4668 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4669 ice_acquire_lock(vlan_lock);
4670 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4672 ice_release_lock(vlan_lock);
4674 goto free_fltr_list;
4676 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4678 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4679 if (rm_vlan_promisc)
4680 status = ice_clear_vsi_promisc(hw, vsi_handle,
4681 promisc_mask, vlan_id);
4683 status = ice_set_vsi_promisc(hw, vsi_handle,
4684 promisc_mask, vlan_id);
4690 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4691 ice_fltr_list_entry, list_entry) {
4692 LIST_DEL(&list_itr->list_entry);
4693 ice_free(hw, list_itr);
4699 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4700 * @hw: pointer to the hardware structure
4701 * @vsi_handle: VSI handle to remove filters from
4702 * @recp_list: recipe list from which function remove fltr
4703 * @lkup: switch rule filter lookup type
4706 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4707 struct ice_sw_recipe *recp_list,
4708 enum ice_sw_lkup_type lkup)
4710 struct ice_fltr_list_entry *fm_entry;
4711 struct LIST_HEAD_TYPE remove_list_head;
4712 struct LIST_HEAD_TYPE *rule_head;
4713 struct ice_fltr_list_entry *tmp;
4714 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4715 enum ice_status status;
4717 INIT_LIST_HEAD(&remove_list_head);
4718 rule_lock = &recp_list[lkup].filt_rule_lock;
4719 rule_head = &recp_list[lkup].filt_rules;
4720 ice_acquire_lock(rule_lock);
4721 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4723 ice_release_lock(rule_lock);
4728 case ICE_SW_LKUP_MAC:
4729 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4731 case ICE_SW_LKUP_VLAN:
4732 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4734 case ICE_SW_LKUP_PROMISC:
4735 case ICE_SW_LKUP_PROMISC_VLAN:
4736 ice_remove_promisc(hw, lkup, &remove_list_head);
4738 case ICE_SW_LKUP_MAC_VLAN:
4739 ice_remove_mac_vlan(hw, &remove_list_head);
4741 case ICE_SW_LKUP_ETHERTYPE:
4742 case ICE_SW_LKUP_ETHERTYPE_MAC:
4743 ice_remove_eth_mac(hw, &remove_list_head);
4745 case ICE_SW_LKUP_DFLT:
4746 ice_debug(hw, ICE_DBG_SW,
4747 "Remove filters for this lookup type hasn't been implemented yet\n");
4749 case ICE_SW_LKUP_LAST:
4750 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4754 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4755 ice_fltr_list_entry, list_entry) {
4756 LIST_DEL(&fm_entry->list_entry);
4757 ice_free(hw, fm_entry);
4762 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4763 * @hw: pointer to the hardware structure
4764 * @vsi_handle: VSI handle to remove filters from
4765 * @sw: pointer to switch info struct
4768 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4769 struct ice_switch_info *sw)
4771 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4773 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4774 sw->recp_list, ICE_SW_LKUP_MAC);
4775 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4776 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4777 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4778 sw->recp_list, ICE_SW_LKUP_PROMISC);
4779 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4780 sw->recp_list, ICE_SW_LKUP_VLAN);
4781 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4782 sw->recp_list, ICE_SW_LKUP_DFLT);
4783 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4784 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4785 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4786 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4787 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4788 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4792 * ice_remove_vsi_fltr - Remove all filters for a VSI
4793 * @hw: pointer to the hardware structure
4794 * @vsi_handle: VSI handle to remove filters from
4796 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4798 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4802 * ice_alloc_res_cntr - allocating resource counter
4803 * @hw: pointer to the hardware structure
4804 * @type: type of resource
4805 * @alloc_shared: if set it is shared else dedicated
4806 * @num_items: number of entries requested for FD resource type
4807 * @counter_id: counter index returned by AQ call
4810 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4813 struct ice_aqc_alloc_free_res_elem *buf;
4814 enum ice_status status;
4817 /* Allocate resource */
4818 buf_len = sizeof(*buf);
4819 buf = (struct ice_aqc_alloc_free_res_elem *)
4820 ice_malloc(hw, buf_len);
4822 return ICE_ERR_NO_MEMORY;
4824 buf->num_elems = CPU_TO_LE16(num_items);
4825 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4826 ICE_AQC_RES_TYPE_M) | alloc_shared);
4828 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4829 ice_aqc_opc_alloc_res, NULL);
4833 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4841 * ice_free_res_cntr - free resource counter
4842 * @hw: pointer to the hardware structure
4843 * @type: type of resource
4844 * @alloc_shared: if set it is shared else dedicated
4845 * @num_items: number of entries to be freed for FD resource type
4846 * @counter_id: counter ID resource which needs to be freed
4849 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4852 struct ice_aqc_alloc_free_res_elem *buf;
4853 enum ice_status status;
4857 buf_len = sizeof(*buf);
4858 buf = (struct ice_aqc_alloc_free_res_elem *)
4859 ice_malloc(hw, buf_len);
4861 return ICE_ERR_NO_MEMORY;
4863 buf->num_elems = CPU_TO_LE16(num_items);
4864 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4865 ICE_AQC_RES_TYPE_M) | alloc_shared);
4866 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4868 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4869 ice_aqc_opc_free_res, NULL);
4871 ice_debug(hw, ICE_DBG_SW,
4872 "counter resource could not be freed\n");
4879 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4880 * @hw: pointer to the hardware structure
4881 * @counter_id: returns counter index
4883 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4885 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4886 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4891 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4892 * @hw: pointer to the hardware structure
4893 * @counter_id: counter index to be freed
4895 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4897 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4898 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4903 * ice_alloc_res_lg_act - add large action resource
4904 * @hw: pointer to the hardware structure
4905 * @l_id: large action ID to fill it in
4906 * @num_acts: number of actions to hold with a large action entry
4908 static enum ice_status
4909 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4911 struct ice_aqc_alloc_free_res_elem *sw_buf;
4912 enum ice_status status;
4915 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4916 return ICE_ERR_PARAM;
4918 /* Allocate resource for large action */
4919 buf_len = sizeof(*sw_buf);
4920 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4921 ice_malloc(hw, buf_len);
4923 return ICE_ERR_NO_MEMORY;
4925 sw_buf->num_elems = CPU_TO_LE16(1);
4927 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4928 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4929 * If num_acts is greater than 2, then use
4930 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4931 * The num_acts cannot exceed 4. This was ensured at the
4932 * beginning of the function.
4935 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4936 else if (num_acts == 2)
4937 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4939 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4941 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4942 ice_aqc_opc_alloc_res, NULL);
4944 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4946 ice_free(hw, sw_buf);
4951 * ice_add_mac_with_sw_marker - add filter with sw marker
4952 * @hw: pointer to the hardware structure
4953 * @f_info: filter info structure containing the MAC filter information
4954 * @sw_marker: sw marker to tag the Rx descriptor with
4957 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4960 struct ice_fltr_mgmt_list_entry *m_entry;
4961 struct ice_fltr_list_entry fl_info;
4962 struct ice_sw_recipe *recp_list;
4963 struct LIST_HEAD_TYPE l_head;
4964 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4965 enum ice_status ret;
4969 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4970 return ICE_ERR_PARAM;
4972 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4973 return ICE_ERR_PARAM;
4975 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4976 return ICE_ERR_PARAM;
4978 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4979 return ICE_ERR_PARAM;
4980 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4982 /* Add filter if it doesn't exist so then the adding of large
4983 * action always results in update
4986 INIT_LIST_HEAD(&l_head);
4987 fl_info.fltr_info = *f_info;
4988 LIST_ADD(&fl_info.list_entry, &l_head);
4990 entry_exists = false;
4991 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4992 hw->port_info->lport);
4993 if (ret == ICE_ERR_ALREADY_EXISTS)
4994 entry_exists = true;
4998 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4999 rule_lock = &recp_list->filt_rule_lock;
5000 ice_acquire_lock(rule_lock);
5001 /* Get the book keeping entry for the filter */
5002 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5006 /* If counter action was enabled for this rule then don't enable
5007 * sw marker large action
5009 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5010 ret = ICE_ERR_PARAM;
5014 /* if same marker was added before */
5015 if (m_entry->sw_marker_id == sw_marker) {
5016 ret = ICE_ERR_ALREADY_EXISTS;
5020 /* Allocate a hardware table entry to hold large act. Three actions
5021 * for marker based large action
5023 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5027 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5030 /* Update the switch rule to add the marker action */
5031 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5033 ice_release_lock(rule_lock);
5038 ice_release_lock(rule_lock);
5039 /* only remove entry if it did not exist previously */
5041 ret = ice_remove_mac(hw, &l_head);
5047 * ice_add_mac_with_counter - add filter with counter enabled
5048 * @hw: pointer to the hardware structure
5049 * @f_info: pointer to filter info structure containing the MAC filter
5053 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5055 struct ice_fltr_mgmt_list_entry *m_entry;
5056 struct ice_fltr_list_entry fl_info;
5057 struct ice_sw_recipe *recp_list;
5058 struct LIST_HEAD_TYPE l_head;
5059 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5060 enum ice_status ret;
5065 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5066 return ICE_ERR_PARAM;
5068 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5069 return ICE_ERR_PARAM;
5071 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5072 return ICE_ERR_PARAM;
5073 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5074 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5076 entry_exist = false;
5078 rule_lock = &recp_list->filt_rule_lock;
5080 /* Add filter if it doesn't exist so then the adding of large
5081 * action always results in update
5083 INIT_LIST_HEAD(&l_head);
5085 fl_info.fltr_info = *f_info;
5086 LIST_ADD(&fl_info.list_entry, &l_head);
5088 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5089 hw->port_info->lport);
5090 if (ret == ICE_ERR_ALREADY_EXISTS)
5095 ice_acquire_lock(rule_lock);
5096 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5098 ret = ICE_ERR_BAD_PTR;
5102 /* Don't enable counter for a filter for which sw marker was enabled */
5103 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5104 ret = ICE_ERR_PARAM;
5108 /* If a counter was already enabled then don't need to add again */
5109 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5110 ret = ICE_ERR_ALREADY_EXISTS;
5114 /* Allocate a hardware table entry to VLAN counter */
5115 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5119 /* Allocate a hardware table entry to hold large act. Two actions for
5120 * counter based large action
5122 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5126 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5129 /* Update the switch rule to add the counter action */
5130 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5132 ice_release_lock(rule_lock);
5137 ice_release_lock(rule_lock);
5138 /* only remove entry if it did not exist previously */
5140 ret = ice_remove_mac(hw, &l_head);
5145 /* This is mapping table entry that maps every word within a given protocol
5146 * structure to the real byte offset as per the specification of that
5148 * for example dst address is 3 words in ethertype header and corresponding
5149 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5150 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5151 * matching entry describing its field. This needs to be updated if new
5152 * structure is added to that union.
5154 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5155 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5156 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5157 { ICE_ETYPE_OL, { 0 } },
5158 { ICE_VLAN_OFOS, { 0, 2 } },
5159 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5160 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5161 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5162 26, 28, 30, 32, 34, 36, 38 } },
5163 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5164 26, 28, 30, 32, 34, 36, 38 } },
5165 { ICE_TCP_IL, { 0, 2 } },
5166 { ICE_UDP_OF, { 0, 2 } },
5167 { ICE_UDP_ILOS, { 0, 2 } },
5168 { ICE_SCTP_IL, { 0, 2 } },
5169 { ICE_VXLAN, { 8, 10, 12, 14 } },
5170 { ICE_GENEVE, { 8, 10, 12, 14 } },
5171 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5172 { ICE_NVGRE, { 0, 2, 4, 6 } },
5173 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5174 { ICE_PPPOE, { 0, 2, 4, 6 } },
5175 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5176 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5177 { ICE_ESP, { 0, 2, 4, 6 } },
5178 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5179 { ICE_NAT_T, { 8, 10, 12, 14 } },
5182 /* The following table describes preferred grouping of recipes.
5183 * If a recipe that needs to be programmed is a superset or matches one of the
5184 * following combinations, then the recipe needs to be chained as per the
5188 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5189 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5190 { ICE_MAC_IL, ICE_MAC_IL_HW },
5191 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5192 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5193 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5194 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5195 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5196 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5197 { ICE_TCP_IL, ICE_TCP_IL_HW },
5198 { ICE_UDP_OF, ICE_UDP_OF_HW },
5199 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5200 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5201 { ICE_VXLAN, ICE_UDP_OF_HW },
5202 { ICE_GENEVE, ICE_UDP_OF_HW },
5203 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5204 { ICE_NVGRE, ICE_GRE_OF_HW },
5205 { ICE_GTP, ICE_UDP_OF_HW },
5206 { ICE_PPPOE, ICE_PPPOE_HW },
5207 { ICE_PFCP, ICE_UDP_ILOS_HW },
5208 { ICE_L2TPV3, ICE_L2TPV3_HW },
5209 { ICE_ESP, ICE_ESP_HW },
5210 { ICE_AH, ICE_AH_HW },
5211 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5215 * ice_find_recp - find a recipe
5216 * @hw: pointer to the hardware structure
5217 * @lkup_exts: extension sequence to match
5219 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5221 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5222 enum ice_sw_tunnel_type tun_type)
5224 bool refresh_required = true;
5225 struct ice_sw_recipe *recp;
5228 /* Walk through existing recipes to find a match */
5229 recp = hw->switch_info->recp_list;
5230 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5231 /* If recipe was not created for this ID, in SW bookkeeping,
5232 * check if FW has an entry for this recipe. If the FW has an
5233 * entry update it in our SW bookkeeping and continue with the
5236 if (!recp[i].recp_created)
5237 if (ice_get_recp_frm_fw(hw,
5238 hw->switch_info->recp_list, i,
5242 /* Skip inverse action recipes */
5243 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5244 ICE_AQ_RECIPE_ACT_INV_ACT)
5247 /* if number of words we are looking for match */
5248 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5249 struct ice_fv_word *a = lkup_exts->fv_words;
5250 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
5251 u16 *c = recp[i].lkup_exts.field_mask;
5252 u16 *d = lkup_exts->field_mask;
5256 for (p = 0; p < lkup_exts->n_val_words; p++) {
5257 for (q = 0; q < recp[i].lkup_exts.n_val_words;
5259 if (a[p].off == b[q].off &&
5260 a[p].prot_id == b[q].prot_id &&
5262 /* Found the "p"th word in the
5267 /* After walking through all the words in the
5268 * "i"th recipe if "p"th word was not found then
5269 * this recipe is not what we are looking for.
5270 * So break out from this loop and try the next
5273 if (q >= recp[i].lkup_exts.n_val_words) {
5278 /* If for "i"th recipe the found was never set to false
5279 * then it means we found our match
5281 if (ice_is_prof_rule(tun_type) &&
5282 tun_type == recp[i].tun_type && found)
5283 return i; /* Return the recipe ID */
5284 else if (!ice_is_prof_rule(tun_type) && found)
5285 return i; /* Return the recipe ID */
5288 return ICE_MAX_NUM_RECIPES;
5292 * ice_prot_type_to_id - get protocol ID from protocol type
5293 * @type: protocol type
5294 * @id: pointer to variable that will receive the ID
5296 * Returns true if found, false otherwise
5298 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5302 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5303 if (ice_prot_id_tbl[i].type == type) {
5304 *id = ice_prot_id_tbl[i].protocol_id;
5311 * ice_find_valid_words - count valid words
5312 * @rule: advanced rule with lookup information
5313 * @lkup_exts: byte offset extractions of the words that are valid
5315 * calculate valid words in a lookup rule using mask value
5318 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5319 struct ice_prot_lkup_ext *lkup_exts)
5321 u8 j, word, prot_id, ret_val;
5323 if (!ice_prot_type_to_id(rule->type, &prot_id))
5326 word = lkup_exts->n_val_words;
5328 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5329 if (((u16 *)&rule->m_u)[j] &&
5330 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5331 /* No more space to accommodate */
5332 if (word >= ICE_MAX_CHAIN_WORDS)
5334 lkup_exts->fv_words[word].off =
5335 ice_prot_ext[rule->type].offs[j];
5336 lkup_exts->fv_words[word].prot_id =
5337 ice_prot_id_tbl[rule->type].protocol_id;
5338 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5342 ret_val = word - lkup_exts->n_val_words;
5343 lkup_exts->n_val_words = word;
5349 * ice_create_first_fit_recp_def - Create a recipe grouping
5350 * @hw: pointer to the hardware structure
5351 * @lkup_exts: an array of protocol header extractions
5352 * @rg_list: pointer to a list that stores new recipe groups
5353 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5355 * Using first fit algorithm, take all the words that are still not done
5356 * and start grouping them in 4-word groups. Each group makes up one
5359 static enum ice_status
5360 ice_create_first_fit_recp_def(struct ice_hw *hw,
5361 struct ice_prot_lkup_ext *lkup_exts,
5362 struct LIST_HEAD_TYPE *rg_list,
5365 struct ice_pref_recipe_group *grp = NULL;
5370 if (!lkup_exts->n_val_words) {
5371 struct ice_recp_grp_entry *entry;
5373 entry = (struct ice_recp_grp_entry *)
5374 ice_malloc(hw, sizeof(*entry));
5376 return ICE_ERR_NO_MEMORY;
5377 LIST_ADD(&entry->l_entry, rg_list);
5378 grp = &entry->r_group;
5380 grp->n_val_pairs = 0;
5383 /* Walk through every word in the rule to check if it is not done. If so
5384 * then this word needs to be part of a new recipe.
5386 for (j = 0; j < lkup_exts->n_val_words; j++)
5387 if (!ice_is_bit_set(lkup_exts->done, j)) {
5389 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5390 struct ice_recp_grp_entry *entry;
5392 entry = (struct ice_recp_grp_entry *)
5393 ice_malloc(hw, sizeof(*entry));
5395 return ICE_ERR_NO_MEMORY;
5396 LIST_ADD(&entry->l_entry, rg_list);
5397 grp = &entry->r_group;
5401 grp->pairs[grp->n_val_pairs].prot_id =
5402 lkup_exts->fv_words[j].prot_id;
5403 grp->pairs[grp->n_val_pairs].off =
5404 lkup_exts->fv_words[j].off;
5405 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5413 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5414 * @hw: pointer to the hardware structure
5415 * @fv_list: field vector with the extraction sequence information
5416 * @rg_list: recipe groupings with protocol-offset pairs
5418 * Helper function to fill in the field vector indices for protocol-offset
5419 * pairs. These indexes are then ultimately programmed into a recipe.
5421 static enum ice_status
5422 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5423 struct LIST_HEAD_TYPE *rg_list)
5425 struct ice_sw_fv_list_entry *fv;
5426 struct ice_recp_grp_entry *rg;
5427 struct ice_fv_word *fv_ext;
5429 if (LIST_EMPTY(fv_list))
5432 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5433 fv_ext = fv->fv_ptr->ew;
5435 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5438 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5439 struct ice_fv_word *pr;
5444 pr = &rg->r_group.pairs[i];
5445 mask = rg->r_group.mask[i];
5447 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5448 if (fv_ext[j].prot_id == pr->prot_id &&
5449 fv_ext[j].off == pr->off) {
5452 /* Store index of field vector */
5454 /* Mask is given by caller as big
5455 * endian, but sent to FW as little
5458 rg->fv_mask[i] = mask << 8 | mask >> 8;
5462 /* Protocol/offset could not be found, caller gave an
5466 return ICE_ERR_PARAM;
5474 * ice_find_free_recp_res_idx - find free result indexes for recipe
5475 * @hw: pointer to hardware structure
5476 * @profiles: bitmap of profiles that will be associated with the new recipe
5477 * @free_idx: pointer to variable to receive the free index bitmap
5479 * The algorithm used here is:
5480 * 1. When creating a new recipe, create a set P which contains all
5481 * Profiles that will be associated with our new recipe
5483 * 2. For each Profile p in set P:
5484 * a. Add all recipes associated with Profile p into set R
5485 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5486 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5487 * i. Or just assume they all have the same possible indexes:
5489 * i.e., PossibleIndexes = 0x0000F00000000000
5491 * 3. For each Recipe r in set R:
5492 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5493 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5495 * FreeIndexes will contain the bits indicating the indexes free for use,
5496 * then the code needs to update the recipe[r].used_result_idx_bits to
5497 * indicate which indexes were selected for use by this recipe.
5500 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5501 ice_bitmap_t *free_idx)
5503 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5504 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5505 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5509 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5510 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5511 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5512 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5514 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5515 ice_set_bit(count, possible_idx);
5517 /* For each profile we are going to associate the recipe with, add the
5518 * recipes that are associated with that profile. This will give us
5519 * the set of recipes that our recipe may collide with. Also, determine
5520 * what possible result indexes are usable given this set of profiles.
5523 while (ICE_MAX_NUM_PROFILES >
5524 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5525 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5526 ICE_MAX_NUM_RECIPES);
5527 ice_and_bitmap(possible_idx, possible_idx,
5528 hw->switch_info->prof_res_bm[bit],
5533 /* For each recipe that our new recipe may collide with, determine
5534 * which indexes have been used.
5536 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5537 if (ice_is_bit_set(recipes, bit)) {
5538 ice_or_bitmap(used_idx, used_idx,
5539 hw->switch_info->recp_list[bit].res_idxs,
5543 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5545 /* return number of free indexes */
5548 while (ICE_MAX_FV_WORDS >
5549 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5558 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5559 * @hw: pointer to hardware structure
5560 * @rm: recipe management list entry
5561 * @match_tun: if field vector index for tunnel needs to be programmed
5562 * @profiles: bitmap of profiles that will be assocated.
5564 static enum ice_status
5565 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5566 bool match_tun, ice_bitmap_t *profiles)
5568 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5569 struct ice_aqc_recipe_data_elem *tmp;
5570 struct ice_aqc_recipe_data_elem *buf;
5571 struct ice_recp_grp_entry *entry;
5572 enum ice_status status;
5578 /* When more than one recipe are required, another recipe is needed to
5579 * chain them together. Matching a tunnel metadata ID takes up one of
5580 * the match fields in the chaining recipe reducing the number of
5581 * chained recipes by one.
5583 /* check number of free result indices */
5584 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5585 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5587 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5588 free_res_idx, rm->n_grp_count);
5590 if (rm->n_grp_count > 1) {
5591 if (rm->n_grp_count > free_res_idx)
5592 return ICE_ERR_MAX_LIMIT;
5597 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5598 return ICE_ERR_MAX_LIMIT;
5600 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5601 ICE_MAX_NUM_RECIPES,
5604 return ICE_ERR_NO_MEMORY;
5606 buf = (struct ice_aqc_recipe_data_elem *)
5607 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5609 status = ICE_ERR_NO_MEMORY;
5613 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5614 recipe_count = ICE_MAX_NUM_RECIPES;
5615 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5617 if (status || recipe_count == 0)
5620 /* Allocate the recipe resources, and configure them according to the
5621 * match fields from protocol headers and extracted field vectors.
5623 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5624 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5627 status = ice_alloc_recipe(hw, &entry->rid);
5631 /* Clear the result index of the located recipe, as this will be
5632 * updated, if needed, later in the recipe creation process.
5634 tmp[0].content.result_indx = 0;
5636 buf[recps] = tmp[0];
5637 buf[recps].recipe_indx = (u8)entry->rid;
5638 /* if the recipe is a non-root recipe RID should be programmed
5639 * as 0 for the rules to be applied correctly.
5641 buf[recps].content.rid = 0;
5642 ice_memset(&buf[recps].content.lkup_indx, 0,
5643 sizeof(buf[recps].content.lkup_indx),
5646 /* All recipes use look-up index 0 to match switch ID. */
5647 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5648 buf[recps].content.mask[0] =
5649 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5650 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5653 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5654 buf[recps].content.lkup_indx[i] = 0x80;
5655 buf[recps].content.mask[i] = 0;
5658 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5659 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5660 buf[recps].content.mask[i + 1] =
5661 CPU_TO_LE16(entry->fv_mask[i]);
5664 if (rm->n_grp_count > 1) {
5665 /* Checks to see if there really is a valid result index
5668 if (chain_idx >= ICE_MAX_FV_WORDS) {
5669 ice_debug(hw, ICE_DBG_SW,
5670 "No chain index available\n");
5671 status = ICE_ERR_MAX_LIMIT;
5675 entry->chain_idx = chain_idx;
5676 buf[recps].content.result_indx =
5677 ICE_AQ_RECIPE_RESULT_EN |
5678 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5679 ICE_AQ_RECIPE_RESULT_DATA_M);
5680 ice_clear_bit(chain_idx, result_idx_bm);
5681 chain_idx = ice_find_first_bit(result_idx_bm,
5685 /* fill recipe dependencies */
5686 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5687 ICE_MAX_NUM_RECIPES);
5688 ice_set_bit(buf[recps].recipe_indx,
5689 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5690 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5694 if (rm->n_grp_count == 1) {
5695 rm->root_rid = buf[0].recipe_indx;
5696 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5697 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5698 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5699 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5700 sizeof(buf[0].recipe_bitmap),
5701 ICE_NONDMA_TO_NONDMA);
5703 status = ICE_ERR_BAD_PTR;
5706 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5707 * the recipe which is getting created if specified
5708 * by user. Usually any advanced switch filter, which results
5709 * into new extraction sequence, ended up creating a new recipe
5710 * of type ROOT and usually recipes are associated with profiles
5711 * Switch rule referreing newly created recipe, needs to have
5712 * either/or 'fwd' or 'join' priority, otherwise switch rule
5713 * evaluation will not happen correctly. In other words, if
5714 * switch rule to be evaluated on priority basis, then recipe
5715 * needs to have priority, otherwise it will be evaluated last.
5717 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5719 struct ice_recp_grp_entry *last_chain_entry;
5722 /* Allocate the last recipe that will chain the outcomes of the
5723 * other recipes together
5725 status = ice_alloc_recipe(hw, &rid);
5729 buf[recps].recipe_indx = (u8)rid;
5730 buf[recps].content.rid = (u8)rid;
5731 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5732 /* the new entry created should also be part of rg_list to
5733 * make sure we have complete recipe
5735 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5736 sizeof(*last_chain_entry));
5737 if (!last_chain_entry) {
5738 status = ICE_ERR_NO_MEMORY;
5741 last_chain_entry->rid = rid;
5742 ice_memset(&buf[recps].content.lkup_indx, 0,
5743 sizeof(buf[recps].content.lkup_indx),
5745 /* All recipes use look-up index 0 to match switch ID. */
5746 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5747 buf[recps].content.mask[0] =
5748 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5749 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5750 buf[recps].content.lkup_indx[i] =
5751 ICE_AQ_RECIPE_LKUP_IGNORE;
5752 buf[recps].content.mask[i] = 0;
5756 /* update r_bitmap with the recp that is used for chaining */
5757 ice_set_bit(rid, rm->r_bitmap);
5758 /* this is the recipe that chains all the other recipes so it
5759 * should not have a chaining ID to indicate the same
5761 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5762 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5764 last_chain_entry->fv_idx[i] = entry->chain_idx;
5765 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5766 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5767 ice_set_bit(entry->rid, rm->r_bitmap);
5769 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5770 if (sizeof(buf[recps].recipe_bitmap) >=
5771 sizeof(rm->r_bitmap)) {
5772 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5773 sizeof(buf[recps].recipe_bitmap),
5774 ICE_NONDMA_TO_NONDMA);
5776 status = ICE_ERR_BAD_PTR;
5779 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5781 /* To differentiate among different UDP tunnels, a meta data ID
5785 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5786 buf[recps].content.mask[i] =
5787 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5791 rm->root_rid = (u8)rid;
5793 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5797 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5798 ice_release_change_lock(hw);
5802 /* Every recipe that just got created add it to the recipe
5805 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5806 struct ice_switch_info *sw = hw->switch_info;
5807 bool is_root, idx_found = false;
5808 struct ice_sw_recipe *recp;
5809 u16 idx, buf_idx = 0;
5811 /* find buffer index for copying some data */
5812 for (idx = 0; idx < rm->n_grp_count; idx++)
5813 if (buf[idx].recipe_indx == entry->rid) {
5819 status = ICE_ERR_OUT_OF_RANGE;
5823 recp = &sw->recp_list[entry->rid];
5824 is_root = (rm->root_rid == entry->rid);
5825 recp->is_root = is_root;
5827 recp->root_rid = entry->rid;
5828 recp->big_recp = (is_root && rm->n_grp_count > 1);
5830 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5831 entry->r_group.n_val_pairs *
5832 sizeof(struct ice_fv_word),
5833 ICE_NONDMA_TO_NONDMA);
5835 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5836 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5838 /* Copy non-result fv index values and masks to recipe. This
5839 * call will also update the result recipe bitmask.
5841 ice_collect_result_idx(&buf[buf_idx], recp);
5843 /* for non-root recipes, also copy to the root, this allows
5844 * easier matching of a complete chained recipe
5847 ice_collect_result_idx(&buf[buf_idx],
5848 &sw->recp_list[rm->root_rid]);
5850 recp->n_ext_words = entry->r_group.n_val_pairs;
5851 recp->chain_idx = entry->chain_idx;
5852 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5853 recp->n_grp_count = rm->n_grp_count;
5854 recp->tun_type = rm->tun_type;
5855 recp->recp_created = true;
5870 * ice_create_recipe_group - creates recipe group
5871 * @hw: pointer to hardware structure
5872 * @rm: recipe management list entry
5873 * @lkup_exts: lookup elements
5875 static enum ice_status
5876 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5877 struct ice_prot_lkup_ext *lkup_exts)
5879 enum ice_status status;
5882 rm->n_grp_count = 0;
5884 /* Create recipes for words that are marked not done by packing them
5887 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5888 &rm->rg_list, &recp_count);
5890 rm->n_grp_count += recp_count;
5891 rm->n_ext_words = lkup_exts->n_val_words;
5892 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5893 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5894 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5895 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5902 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5903 * @hw: pointer to hardware structure
5904 * @lkups: lookup elements or match criteria for the advanced recipe, one
5905 * structure per protocol header
5906 * @lkups_cnt: number of protocols
5907 * @bm: bitmap of field vectors to consider
5908 * @fv_list: pointer to a list that holds the returned field vectors
5910 static enum ice_status
5911 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5912 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5914 enum ice_status status;
5921 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5923 return ICE_ERR_NO_MEMORY;
5925 for (i = 0; i < lkups_cnt; i++)
5926 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5927 status = ICE_ERR_CFG;
5931 /* Find field vectors that include all specified protocol types */
5932 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5935 ice_free(hw, prot_ids);
5940 * ice_add_special_words - Add words that are not protocols, such as metadata
5941 * @rinfo: other information regarding the rule e.g. priority and action info
5942 * @lkup_exts: lookup word structure
5944 static enum ice_status
5945 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5946 struct ice_prot_lkup_ext *lkup_exts)
5948 /* If this is a tunneled packet, then add recipe index to match the
5949 * tunnel bit in the packet metadata flags.
5951 if (rinfo->tun_type != ICE_NON_TUN) {
5952 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5953 u8 word = lkup_exts->n_val_words++;
5955 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5956 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5958 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5960 return ICE_ERR_MAX_LIMIT;
5967 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5968 * @hw: pointer to hardware structure
5969 * @rinfo: other information regarding the rule e.g. priority and action info
5970 * @bm: pointer to memory for returning the bitmap of field vectors
5973 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5976 enum ice_prof_type prof_type;
5978 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
5980 switch (rinfo->tun_type) {
5982 prof_type = ICE_PROF_NON_TUN;
5984 case ICE_ALL_TUNNELS:
5985 prof_type = ICE_PROF_TUN_ALL;
5987 case ICE_SW_TUN_VXLAN_GPE:
5988 case ICE_SW_TUN_GENEVE:
5989 case ICE_SW_TUN_VXLAN:
5990 case ICE_SW_TUN_UDP:
5991 case ICE_SW_TUN_GTP:
5992 prof_type = ICE_PROF_TUN_UDP;
5994 case ICE_SW_TUN_NVGRE:
5995 prof_type = ICE_PROF_TUN_GRE;
5997 case ICE_SW_TUN_PPPOE:
5998 prof_type = ICE_PROF_TUN_PPPOE;
6000 case ICE_SW_TUN_PROFID_IPV6_ESP:
6001 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6003 case ICE_SW_TUN_PROFID_IPV6_AH:
6004 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6006 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6007 case ICE_SW_TUN_IPV6_L2TPV3:
6008 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6010 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6011 case ICE_SW_TUN_IPV6_NAT_T:
6012 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6014 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6015 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6017 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6018 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6020 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6021 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6023 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6024 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6026 case ICE_SW_TUN_IPV4_NAT_T:
6027 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6029 case ICE_SW_TUN_IPV4_L2TPV3:
6030 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6032 case ICE_SW_TUN_AND_NON_TUN:
6034 prof_type = ICE_PROF_ALL;
6038 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6042 * ice_is_prof_rule - determine if rule type is a profile rule
6043 * @type: the rule type
6045 * if the rule type is a profile rule, that means that there no field value
6046 * match required, in this case just a profile hit is required.
6048 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6051 case ICE_SW_TUN_PROFID_IPV6_ESP:
6052 case ICE_SW_TUN_PROFID_IPV6_AH:
6053 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6054 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6055 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6056 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6057 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6058 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6068 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6069 * @hw: pointer to hardware structure
6070 * @lkups: lookup elements or match criteria for the advanced recipe, one
6071 * structure per protocol header
6072 * @lkups_cnt: number of protocols
6073 * @rinfo: other information regarding the rule e.g. priority and action info
6074 * @rid: return the recipe ID of the recipe created
6076 static enum ice_status
6077 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6078 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6080 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6081 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6082 struct ice_prot_lkup_ext *lkup_exts;
6083 struct ice_recp_grp_entry *r_entry;
6084 struct ice_sw_fv_list_entry *fvit;
6085 struct ice_recp_grp_entry *r_tmp;
6086 struct ice_sw_fv_list_entry *tmp;
6087 enum ice_status status = ICE_SUCCESS;
6088 struct ice_sw_recipe *rm;
6089 bool match_tun = false;
6092 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6093 return ICE_ERR_PARAM;
6095 lkup_exts = (struct ice_prot_lkup_ext *)
6096 ice_malloc(hw, sizeof(*lkup_exts));
6098 return ICE_ERR_NO_MEMORY;
6100 /* Determine the number of words to be matched and if it exceeds a
6101 * recipe's restrictions
6103 for (i = 0; i < lkups_cnt; i++) {
6106 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6107 status = ICE_ERR_CFG;
6108 goto err_free_lkup_exts;
6111 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6113 status = ICE_ERR_CFG;
6114 goto err_free_lkup_exts;
6118 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6120 status = ICE_ERR_NO_MEMORY;
6121 goto err_free_lkup_exts;
6124 /* Get field vectors that contain fields extracted from all the protocol
6125 * headers being programmed.
6127 INIT_LIST_HEAD(&rm->fv_list);
6128 INIT_LIST_HEAD(&rm->rg_list);
6130 /* Get bitmap of field vectors (profiles) that are compatible with the
6131 * rule request; only these will be searched in the subsequent call to
6134 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6136 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6140 /* Group match words into recipes using preferred recipe grouping
6143 status = ice_create_recipe_group(hw, rm, lkup_exts);
6147 /* There is only profile for UDP tunnels. So, it is necessary to use a
6148 * metadata ID flag to differentiate different tunnel types. A separate
6149 * recipe needs to be used for the metadata.
6151 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
6152 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
6153 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
6156 /* set the recipe priority if specified */
6157 rm->priority = (u8)rinfo->priority;
6159 /* Find offsets from the field vector. Pick the first one for all the
6162 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6166 /* An empty FV list means to use all the profiles returned in the
6169 if (LIST_EMPTY(&rm->fv_list)) {
6172 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6173 if (ice_is_bit_set(fv_bitmap, j)) {
6174 struct ice_sw_fv_list_entry *fvl;
6176 fvl = (struct ice_sw_fv_list_entry *)
6177 ice_malloc(hw, sizeof(*fvl));
6181 fvl->profile_id = j;
6182 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6186 /* get bitmap of all profiles the recipe will be associated with */
6187 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6188 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6190 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6191 ice_set_bit((u16)fvit->profile_id, profiles);
6194 /* Create any special protocol/offset pairs, such as looking at tunnel
6195 * bits by extracting metadata
6197 status = ice_add_special_words(rinfo, lkup_exts);
6199 goto err_free_lkup_exts;
6201 /* Look for a recipe which matches our requested fv / mask list */
6202 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6203 if (*rid < ICE_MAX_NUM_RECIPES)
6204 /* Success if found a recipe that match the existing criteria */
6207 rm->tun_type = rinfo->tun_type;
6208 /* Recipe we need does not exist, add a recipe */
6209 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
6213 /* Associate all the recipes created with all the profiles in the
6214 * common field vector.
6216 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6218 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6221 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6222 (u8 *)r_bitmap, NULL);
6226 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6227 ICE_MAX_NUM_RECIPES);
6228 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6232 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6235 ice_release_change_lock(hw);
6240 /* Update profile to recipe bitmap array */
6241 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6242 ICE_MAX_NUM_RECIPES);
6244 /* Update recipe to profile bitmap array */
6245 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6246 if (ice_is_bit_set(r_bitmap, j))
6247 ice_set_bit((u16)fvit->profile_id,
6248 recipe_to_profile[j]);
6251 *rid = rm->root_rid;
6252 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6253 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6255 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6256 ice_recp_grp_entry, l_entry) {
6257 LIST_DEL(&r_entry->l_entry);
6258 ice_free(hw, r_entry);
6261 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6263 LIST_DEL(&fvit->list_entry);
6268 ice_free(hw, rm->root_buf);
6273 ice_free(hw, lkup_exts);
6279 * ice_find_dummy_packet - find dummy packet by tunnel type
6281 * @lkups: lookup elements or match criteria for the advanced recipe, one
6282 * structure per protocol header
6283 * @lkups_cnt: number of protocols
6284 * @tun_type: tunnel type from the match criteria
6285 * @pkt: dummy packet to fill according to filter match criteria
6286 * @pkt_len: packet length of dummy packet
6287 * @offsets: pointer to receive the pointer to the offsets for the packet
6290 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6291 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6293 const struct ice_dummy_pkt_offsets **offsets)
6295 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6299 for (i = 0; i < lkups_cnt; i++) {
6300 if (lkups[i].type == ICE_UDP_ILOS)
6302 else if (lkups[i].type == ICE_TCP_IL)
6304 else if (lkups[i].type == ICE_IPV6_OFOS)
6306 else if (lkups[i].type == ICE_VLAN_OFOS)
6308 else if (lkups[i].type == ICE_IPV4_OFOS &&
6309 lkups[i].h_u.ipv4_hdr.protocol ==
6310 ICE_IPV4_NVGRE_PROTO_ID &&
6311 lkups[i].m_u.ipv4_hdr.protocol ==
6314 else if (lkups[i].type == ICE_PPPOE &&
6315 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6316 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6317 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6320 else if (lkups[i].type == ICE_ETYPE_OL &&
6321 lkups[i].h_u.ethertype.ethtype_id ==
6322 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6323 lkups[i].m_u.ethertype.ethtype_id ==
6328 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6329 *pkt = dummy_ipv4_esp_pkt;
6330 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6331 *offsets = dummy_ipv4_esp_packet_offsets;
6335 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6336 *pkt = dummy_ipv6_esp_pkt;
6337 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6338 *offsets = dummy_ipv6_esp_packet_offsets;
6342 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6343 *pkt = dummy_ipv4_ah_pkt;
6344 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6345 *offsets = dummy_ipv4_ah_packet_offsets;
6349 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6350 *pkt = dummy_ipv6_ah_pkt;
6351 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6352 *offsets = dummy_ipv6_ah_packet_offsets;
6356 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6357 *pkt = dummy_ipv4_nat_pkt;
6358 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6359 *offsets = dummy_ipv4_nat_packet_offsets;
6363 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6364 *pkt = dummy_ipv6_nat_pkt;
6365 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6366 *offsets = dummy_ipv6_nat_packet_offsets;
6370 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6371 *pkt = dummy_ipv4_l2tpv3_pkt;
6372 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6373 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6377 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6378 *pkt = dummy_ipv6_l2tpv3_pkt;
6379 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6380 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6384 if (tun_type == ICE_SW_TUN_GTP) {
6385 *pkt = dummy_udp_gtp_packet;
6386 *pkt_len = sizeof(dummy_udp_gtp_packet);
6387 *offsets = dummy_udp_gtp_packet_offsets;
6390 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6391 *pkt = dummy_pppoe_ipv6_packet;
6392 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6393 *offsets = dummy_pppoe_packet_offsets;
6395 } else if (tun_type == ICE_SW_TUN_PPPOE) {
6396 *pkt = dummy_pppoe_ipv4_packet;
6397 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6398 *offsets = dummy_pppoe_packet_offsets;
6402 if (tun_type == ICE_ALL_TUNNELS) {
6403 *pkt = dummy_gre_udp_packet;
6404 *pkt_len = sizeof(dummy_gre_udp_packet);
6405 *offsets = dummy_gre_udp_packet_offsets;
6409 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6411 *pkt = dummy_gre_tcp_packet;
6412 *pkt_len = sizeof(dummy_gre_tcp_packet);
6413 *offsets = dummy_gre_tcp_packet_offsets;
6417 *pkt = dummy_gre_udp_packet;
6418 *pkt_len = sizeof(dummy_gre_udp_packet);
6419 *offsets = dummy_gre_udp_packet_offsets;
6423 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6424 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
6426 *pkt = dummy_udp_tun_tcp_packet;
6427 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6428 *offsets = dummy_udp_tun_tcp_packet_offsets;
6432 *pkt = dummy_udp_tun_udp_packet;
6433 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6434 *offsets = dummy_udp_tun_udp_packet_offsets;
6440 *pkt = dummy_vlan_udp_packet;
6441 *pkt_len = sizeof(dummy_vlan_udp_packet);
6442 *offsets = dummy_vlan_udp_packet_offsets;
6445 *pkt = dummy_udp_packet;
6446 *pkt_len = sizeof(dummy_udp_packet);
6447 *offsets = dummy_udp_packet_offsets;
6449 } else if (udp && ipv6) {
6451 *pkt = dummy_vlan_udp_ipv6_packet;
6452 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6453 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6456 *pkt = dummy_udp_ipv6_packet;
6457 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6458 *offsets = dummy_udp_ipv6_packet_offsets;
6460 } else if ((tcp && ipv6) || ipv6) {
6462 *pkt = dummy_vlan_tcp_ipv6_packet;
6463 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6464 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6467 *pkt = dummy_tcp_ipv6_packet;
6468 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6469 *offsets = dummy_tcp_ipv6_packet_offsets;
6474 *pkt = dummy_vlan_tcp_packet;
6475 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6476 *offsets = dummy_vlan_tcp_packet_offsets;
6478 *pkt = dummy_tcp_packet;
6479 *pkt_len = sizeof(dummy_tcp_packet);
6480 *offsets = dummy_tcp_packet_offsets;
6485 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6487 * @lkups: lookup elements or match criteria for the advanced recipe, one
6488 * structure per protocol header
6489 * @lkups_cnt: number of protocols
6490 * @s_rule: stores rule information from the match criteria
6491 * @dummy_pkt: dummy packet to fill according to filter match criteria
6492 * @pkt_len: packet length of dummy packet
6493 * @offsets: offset info for the dummy packet
6495 static enum ice_status
6496 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6497 struct ice_aqc_sw_rules_elem *s_rule,
6498 const u8 *dummy_pkt, u16 pkt_len,
6499 const struct ice_dummy_pkt_offsets *offsets)
6504 /* Start with a packet with a pre-defined/dummy content. Then, fill
6505 * in the header values to be looked up or matched.
6507 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6509 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6511 for (i = 0; i < lkups_cnt; i++) {
6512 enum ice_protocol_type type;
6513 u16 offset = 0, len = 0, j;
6516 /* find the start of this layer; it should be found since this
6517 * was already checked when search for the dummy packet
6519 type = lkups[i].type;
6520 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6521 if (type == offsets[j].type) {
6522 offset = offsets[j].offset;
6527 /* this should never happen in a correct calling sequence */
6529 return ICE_ERR_PARAM;
6531 switch (lkups[i].type) {
6534 len = sizeof(struct ice_ether_hdr);
6537 len = sizeof(struct ice_ethtype_hdr);
6540 len = sizeof(struct ice_vlan_hdr);
6544 len = sizeof(struct ice_ipv4_hdr);
6548 len = sizeof(struct ice_ipv6_hdr);
6553 len = sizeof(struct ice_l4_hdr);
6556 len = sizeof(struct ice_sctp_hdr);
6559 len = sizeof(struct ice_nvgre);
6564 len = sizeof(struct ice_udp_tnl_hdr);
6568 len = sizeof(struct ice_udp_gtp_hdr);
6571 len = sizeof(struct ice_pppoe_hdr);
6574 len = sizeof(struct ice_esp_hdr);
6577 len = sizeof(struct ice_nat_t_hdr);
6580 len = sizeof(struct ice_ah_hdr);
6583 len = sizeof(struct ice_l2tpv3_sess_hdr);
6586 return ICE_ERR_PARAM;
6589 /* the length should be a word multiple */
6590 if (len % ICE_BYTES_PER_WORD)
6593 /* We have the offset to the header start, the length, the
6594 * caller's header values and mask. Use this information to
6595 * copy the data into the dummy packet appropriately based on
6596 * the mask. Note that we need to only write the bits as
6597 * indicated by the mask to make sure we don't improperly write
6598 * over any significant packet data.
6600 for (j = 0; j < len / sizeof(u16); j++)
6601 if (((u16 *)&lkups[i].m_u)[j])
6602 ((u16 *)(pkt + offset))[j] =
6603 (((u16 *)(pkt + offset))[j] &
6604 ~((u16 *)&lkups[i].m_u)[j]) |
6605 (((u16 *)&lkups[i].h_u)[j] &
6606 ((u16 *)&lkups[i].m_u)[j]);
6609 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6615 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6616 * @hw: pointer to the hardware structure
6617 * @tun_type: tunnel type
6618 * @pkt: dummy packet to fill in
6619 * @offsets: offset info for the dummy packet
6621 static enum ice_status
6622 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6623 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6628 case ICE_SW_TUN_AND_NON_TUN:
6629 case ICE_SW_TUN_VXLAN_GPE:
6630 case ICE_SW_TUN_VXLAN:
6631 case ICE_SW_TUN_UDP:
6632 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6636 case ICE_SW_TUN_GENEVE:
6637 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6642 /* Nothing needs to be done for this tunnel type */
6646 /* Find the outer UDP protocol header and insert the port number */
6647 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6648 if (offsets[i].type == ICE_UDP_OF) {
6649 struct ice_l4_hdr *hdr;
6652 offset = offsets[i].offset;
6653 hdr = (struct ice_l4_hdr *)&pkt[offset];
6654 hdr->dst_port = CPU_TO_BE16(open_port);
6664 * ice_find_adv_rule_entry - Search a rule entry
6665 * @hw: pointer to the hardware structure
6666 * @lkups: lookup elements or match criteria for the advanced recipe, one
6667 * structure per protocol header
6668 * @lkups_cnt: number of protocols
6669 * @recp_id: recipe ID for which we are finding the rule
6670 * @rinfo: other information regarding the rule e.g. priority and action info
6672 * Helper function to search for a given advance rule entry
6673 * Returns pointer to entry storing the rule if found
6675 static struct ice_adv_fltr_mgmt_list_entry *
6676 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6677 u16 lkups_cnt, u16 recp_id,
6678 struct ice_adv_rule_info *rinfo)
6680 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6681 struct ice_switch_info *sw = hw->switch_info;
6684 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6685 ice_adv_fltr_mgmt_list_entry, list_entry) {
6686 bool lkups_matched = true;
6688 if (lkups_cnt != list_itr->lkups_cnt)
6690 for (i = 0; i < list_itr->lkups_cnt; i++)
6691 if (memcmp(&list_itr->lkups[i], &lkups[i],
6693 lkups_matched = false;
6696 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6697 rinfo->tun_type == list_itr->rule_info.tun_type &&
6705 * ice_adv_add_update_vsi_list
6706 * @hw: pointer to the hardware structure
6707 * @m_entry: pointer to current adv filter management list entry
6708 * @cur_fltr: filter information from the book keeping entry
6709 * @new_fltr: filter information with the new VSI to be added
6711 * Call AQ command to add or update previously created VSI list with new VSI.
6713 * Helper function to do book keeping associated with adding filter information
6714 * The algorithm to do the booking keeping is described below :
6715 * When a VSI needs to subscribe to a given advanced filter
6716 * if only one VSI has been added till now
6717 * Allocate a new VSI list and add two VSIs
6718 * to this list using switch rule command
6719 * Update the previously created switch rule with the
6720 * newly created VSI list ID
6721 * if a VSI list was previously created
6722 * Add the new VSI to the previously created VSI list set
6723 * using the update switch rule command
6725 static enum ice_status
6726 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6727 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6728 struct ice_adv_rule_info *cur_fltr,
6729 struct ice_adv_rule_info *new_fltr)
6731 enum ice_status status;
6732 u16 vsi_list_id = 0;
6734 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6735 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6736 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6737 return ICE_ERR_NOT_IMPL;
6739 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6740 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6741 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6742 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6743 return ICE_ERR_NOT_IMPL;
6745 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6746 /* Only one entry existed in the mapping and it was not already
6747 * a part of a VSI list. So, create a VSI list with the old and
6750 struct ice_fltr_info tmp_fltr;
6751 u16 vsi_handle_arr[2];
6753 /* A rule already exists with the new VSI being added */
6754 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6755 new_fltr->sw_act.fwd_id.hw_vsi_id)
6756 return ICE_ERR_ALREADY_EXISTS;
6758 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6759 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6760 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6766 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6767 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6768 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6769 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6770 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6772 /* Update the previous switch rule of "forward to VSI" to
6775 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6779 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6780 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6781 m_entry->vsi_list_info =
6782 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6785 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6787 if (!m_entry->vsi_list_info)
6790 /* A rule already exists with the new VSI being added */
6791 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6794 /* Update the previously created VSI list set with
6795 * the new VSI ID passed in
6797 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6799 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6801 ice_aqc_opc_update_sw_rules,
6803 /* update VSI list mapping info with new VSI ID */
6805 ice_set_bit(vsi_handle,
6806 m_entry->vsi_list_info->vsi_map);
6809 m_entry->vsi_count++;
6814 * ice_add_adv_rule - helper function to create an advanced switch rule
6815 * @hw: pointer to the hardware structure
6816 * @lkups: information on the words that needs to be looked up. All words
6817 * together makes one recipe
6818 * @lkups_cnt: num of entries in the lkups array
6819 * @rinfo: other information related to the rule that needs to be programmed
6820 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6821 * ignored is case of error.
6823 * This function can program only 1 rule at a time. The lkups is used to
6824 * describe the all the words that forms the "lookup" portion of the recipe.
6825 * These words can span multiple protocols. Callers to this function need to
6826 * pass in a list of protocol headers with lookup information along and mask
6827 * that determines which words are valid from the given protocol header.
6828 * rinfo describes other information related to this rule such as forwarding
6829 * IDs, priority of this rule, etc.
6832 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6833 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6834 struct ice_rule_query_data *added_entry)
6836 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6837 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6838 const struct ice_dummy_pkt_offsets *pkt_offsets;
6839 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6840 struct LIST_HEAD_TYPE *rule_head;
6841 struct ice_switch_info *sw;
6842 enum ice_status status;
6843 const u8 *pkt = NULL;
6849 /* Initialize profile to result index bitmap */
6850 if (!hw->switch_info->prof_res_bm_init) {
6851 hw->switch_info->prof_res_bm_init = 1;
6852 ice_init_prof_result_bm(hw);
6855 prof_rule = ice_is_prof_rule(rinfo->tun_type);
6856 if (!prof_rule && !lkups_cnt)
6857 return ICE_ERR_PARAM;
6859 /* get # of words we need to match */
6861 for (i = 0; i < lkups_cnt; i++) {
6864 ptr = (u16 *)&lkups[i].m_u;
6865 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6871 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6872 return ICE_ERR_PARAM;
6874 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6875 return ICE_ERR_PARAM;
6878 /* make sure that we can locate a dummy packet */
6879 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6882 status = ICE_ERR_PARAM;
6883 goto err_ice_add_adv_rule;
6886 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6887 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6888 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6889 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6892 vsi_handle = rinfo->sw_act.vsi_handle;
6893 if (!ice_is_vsi_valid(hw, vsi_handle))
6894 return ICE_ERR_PARAM;
6896 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6897 rinfo->sw_act.fwd_id.hw_vsi_id =
6898 ice_get_hw_vsi_num(hw, vsi_handle);
6899 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6900 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6902 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6905 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6907 /* we have to add VSI to VSI_LIST and increment vsi_count.
6908 * Also Update VSI list so that we can change forwarding rule
6909 * if the rule already exists, we will check if it exists with
6910 * same vsi_id, if not then add it to the VSI list if it already
6911 * exists if not then create a VSI list and add the existing VSI
6912 * ID and the new VSI ID to the list
6913 * We will add that VSI to the list
6915 status = ice_adv_add_update_vsi_list(hw, m_entry,
6916 &m_entry->rule_info,
6919 added_entry->rid = rid;
6920 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6921 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6925 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6926 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6928 return ICE_ERR_NO_MEMORY;
6929 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6930 switch (rinfo->sw_act.fltr_act) {
6931 case ICE_FWD_TO_VSI:
6932 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6933 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6934 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6937 act |= ICE_SINGLE_ACT_TO_Q;
6938 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6939 ICE_SINGLE_ACT_Q_INDEX_M;
6941 case ICE_FWD_TO_QGRP:
6942 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6943 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6944 act |= ICE_SINGLE_ACT_TO_Q;
6945 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6946 ICE_SINGLE_ACT_Q_INDEX_M;
6947 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6948 ICE_SINGLE_ACT_Q_REGION_M;
6950 case ICE_DROP_PACKET:
6951 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6952 ICE_SINGLE_ACT_VALID_BIT;
6955 status = ICE_ERR_CFG;
6956 goto err_ice_add_adv_rule;
6959 /* set the rule LOOKUP type based on caller specified 'RX'
6960 * instead of hardcoding it to be either LOOKUP_TX/RX
6962 * for 'RX' set the source to be the port number
6963 * for 'TX' set the source to be the source HW VSI number (determined
6967 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6968 s_rule->pdata.lkup_tx_rx.src =
6969 CPU_TO_LE16(hw->port_info->lport);
6971 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6972 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6975 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6976 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6978 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6979 pkt_len, pkt_offsets);
6981 goto err_ice_add_adv_rule;
6983 if (rinfo->tun_type != ICE_NON_TUN &&
6984 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6985 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6986 s_rule->pdata.lkup_tx_rx.hdr,
6989 goto err_ice_add_adv_rule;
6992 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6993 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6996 goto err_ice_add_adv_rule;
6997 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6998 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7000 status = ICE_ERR_NO_MEMORY;
7001 goto err_ice_add_adv_rule;
7004 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7005 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7006 ICE_NONDMA_TO_NONDMA);
7007 if (!adv_fltr->lkups && !prof_rule) {
7008 status = ICE_ERR_NO_MEMORY;
7009 goto err_ice_add_adv_rule;
7012 adv_fltr->lkups_cnt = lkups_cnt;
7013 adv_fltr->rule_info = *rinfo;
7014 adv_fltr->rule_info.fltr_rule_id =
7015 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7016 sw = hw->switch_info;
7017 sw->recp_list[rid].adv_rule = true;
7018 rule_head = &sw->recp_list[rid].filt_rules;
7020 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7021 adv_fltr->vsi_count = 1;
7023 /* Add rule entry to book keeping list */
7024 LIST_ADD(&adv_fltr->list_entry, rule_head);
7026 added_entry->rid = rid;
7027 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7028 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7030 err_ice_add_adv_rule:
7031 if (status && adv_fltr) {
7032 ice_free(hw, adv_fltr->lkups);
7033 ice_free(hw, adv_fltr);
7036 ice_free(hw, s_rule);
7042 * ice_adv_rem_update_vsi_list
7043 * @hw: pointer to the hardware structure
7044 * @vsi_handle: VSI handle of the VSI to remove
7045 * @fm_list: filter management entry for which the VSI list management needs to
7048 static enum ice_status
7049 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7050 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7052 struct ice_vsi_list_map_info *vsi_list_info;
7053 enum ice_sw_lkup_type lkup_type;
7054 enum ice_status status;
7057 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7058 fm_list->vsi_count == 0)
7059 return ICE_ERR_PARAM;
7061 /* A rule with the VSI being removed does not exist */
7062 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7063 return ICE_ERR_DOES_NOT_EXIST;
7065 lkup_type = ICE_SW_LKUP_LAST;
7066 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7067 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7068 ice_aqc_opc_update_sw_rules,
7073 fm_list->vsi_count--;
7074 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7075 vsi_list_info = fm_list->vsi_list_info;
7076 if (fm_list->vsi_count == 1) {
7077 struct ice_fltr_info tmp_fltr;
7080 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7082 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7083 return ICE_ERR_OUT_OF_RANGE;
7085 /* Make sure VSI list is empty before removing it below */
7086 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7088 ice_aqc_opc_update_sw_rules,
7093 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7094 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7095 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7096 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7097 tmp_fltr.fwd_id.hw_vsi_id =
7098 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7099 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7100 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7102 /* Update the previous switch rule of "MAC forward to VSI" to
7103 * "MAC fwd to VSI list"
7105 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7107 ice_debug(hw, ICE_DBG_SW,
7108 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7109 tmp_fltr.fwd_id.hw_vsi_id, status);
7113 /* Remove the VSI list since it is no longer used */
7114 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7116 ice_debug(hw, ICE_DBG_SW,
7117 "Failed to remove VSI list %d, error %d\n",
7118 vsi_list_id, status);
7122 LIST_DEL(&vsi_list_info->list_entry);
7123 ice_free(hw, vsi_list_info);
7124 fm_list->vsi_list_info = NULL;
7131 * ice_rem_adv_rule - removes existing advanced switch rule
7132 * @hw: pointer to the hardware structure
7133 * @lkups: information on the words that needs to be looked up. All words
7134 * together makes one recipe
7135 * @lkups_cnt: num of entries in the lkups array
7136 * @rinfo: Its the pointer to the rule information for the rule
7138 * This function can be used to remove 1 rule at a time. The lkups is
7139 * used to describe all the words that forms the "lookup" portion of the
7140 * rule. These words can span multiple protocols. Callers to this function
7141 * need to pass in a list of protocol headers with lookup information along
7142 * and mask that determines which words are valid from the given protocol
7143 * header. rinfo describes other information related to this rule such as
7144 * forwarding IDs, priority of this rule, etc.
7147 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7148 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7150 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7151 struct ice_prot_lkup_ext lkup_exts;
7152 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7153 enum ice_status status = ICE_SUCCESS;
7154 bool remove_rule = false;
7155 u16 i, rid, vsi_handle;
7157 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7158 for (i = 0; i < lkups_cnt; i++) {
7161 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7164 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7169 /* Create any special protocol/offset pairs, such as looking at tunnel
7170 * bits by extracting metadata
7172 status = ice_add_special_words(rinfo, &lkup_exts);
7176 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7177 /* If did not find a recipe that match the existing criteria */
7178 if (rid == ICE_MAX_NUM_RECIPES)
7179 return ICE_ERR_PARAM;
7181 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7182 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7183 /* the rule is already removed */
7186 ice_acquire_lock(rule_lock);
7187 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7189 } else if (list_elem->vsi_count > 1) {
7190 list_elem->vsi_list_info->ref_cnt--;
7191 remove_rule = false;
7192 vsi_handle = rinfo->sw_act.vsi_handle;
7193 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7195 vsi_handle = rinfo->sw_act.vsi_handle;
7196 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7198 ice_release_lock(rule_lock);
7201 if (list_elem->vsi_count == 0)
7204 ice_release_lock(rule_lock);
7206 struct ice_aqc_sw_rules_elem *s_rule;
7209 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7211 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7214 return ICE_ERR_NO_MEMORY;
7215 s_rule->pdata.lkup_tx_rx.act = 0;
7216 s_rule->pdata.lkup_tx_rx.index =
7217 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7218 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7219 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7221 ice_aqc_opc_remove_sw_rules, NULL);
7222 if (status == ICE_SUCCESS) {
7223 ice_acquire_lock(rule_lock);
7224 LIST_DEL(&list_elem->list_entry);
7225 ice_free(hw, list_elem->lkups);
7226 ice_free(hw, list_elem);
7227 ice_release_lock(rule_lock);
7229 ice_free(hw, s_rule);
7235 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7236 * @hw: pointer to the hardware structure
7237 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7239 * This function is used to remove 1 rule at a time. The removal is based on
7240 * the remove_entry parameter. This function will remove rule for a given
7241 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7244 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7245 struct ice_rule_query_data *remove_entry)
7247 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7248 struct LIST_HEAD_TYPE *list_head;
7249 struct ice_adv_rule_info rinfo;
7250 struct ice_switch_info *sw;
7252 sw = hw->switch_info;
7253 if (!sw->recp_list[remove_entry->rid].recp_created)
7254 return ICE_ERR_PARAM;
7255 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7256 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7258 if (list_itr->rule_info.fltr_rule_id ==
7259 remove_entry->rule_id) {
7260 rinfo = list_itr->rule_info;
7261 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7262 return ice_rem_adv_rule(hw, list_itr->lkups,
7263 list_itr->lkups_cnt, &rinfo);
7266 return ICE_ERR_PARAM;
7270 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7272 * @hw: pointer to the hardware structure
7273 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7275 * This function is used to remove all the rules for a given VSI and as soon
7276 * as removing a rule fails, it will return immediately with the error code,
7277 * else it will return ICE_SUCCESS
7280 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7282 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7283 struct ice_vsi_list_map_info *map_info;
7284 struct LIST_HEAD_TYPE *list_head;
7285 struct ice_adv_rule_info rinfo;
7286 struct ice_switch_info *sw;
7287 enum ice_status status;
7288 u16 vsi_list_id = 0;
7291 sw = hw->switch_info;
7292 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7293 if (!sw->recp_list[rid].recp_created)
7295 if (!sw->recp_list[rid].adv_rule)
7297 list_head = &sw->recp_list[rid].filt_rules;
7299 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7300 ice_adv_fltr_mgmt_list_entry, list_entry) {
7301 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7306 rinfo = list_itr->rule_info;
7307 rinfo.sw_act.vsi_handle = vsi_handle;
7308 status = ice_rem_adv_rule(hw, list_itr->lkups,
7309 list_itr->lkups_cnt, &rinfo);
7319 * ice_replay_fltr - Replay all the filters stored by a specific list head
7320 * @hw: pointer to the hardware structure
7321 * @list_head: list for which filters needs to be replayed
7322 * @recp_id: Recipe ID for which rules need to be replayed
7324 static enum ice_status
7325 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7327 struct ice_fltr_mgmt_list_entry *itr;
7328 enum ice_status status = ICE_SUCCESS;
7329 struct ice_sw_recipe *recp_list;
7330 u8 lport = hw->port_info->lport;
7331 struct LIST_HEAD_TYPE l_head;
7333 if (LIST_EMPTY(list_head))
7336 recp_list = &hw->switch_info->recp_list[recp_id];
7337 /* Move entries from the given list_head to a temporary l_head so that
7338 * they can be replayed. Otherwise when trying to re-add the same
7339 * filter, the function will return already exists
7341 LIST_REPLACE_INIT(list_head, &l_head);
7343 /* Mark the given list_head empty by reinitializing it so filters
7344 * could be added again by *handler
7346 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7348 struct ice_fltr_list_entry f_entry;
7350 f_entry.fltr_info = itr->fltr_info;
7351 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7352 status = ice_add_rule_internal(hw, recp_list, lport,
7354 if (status != ICE_SUCCESS)
7359 /* Add a filter per VSI separately */
7364 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7366 if (!ice_is_vsi_valid(hw, vsi_handle))
7369 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7370 f_entry.fltr_info.vsi_handle = vsi_handle;
7371 f_entry.fltr_info.fwd_id.hw_vsi_id =
7372 ice_get_hw_vsi_num(hw, vsi_handle);
7373 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7374 if (recp_id == ICE_SW_LKUP_VLAN)
7375 status = ice_add_vlan_internal(hw, recp_list,
7378 status = ice_add_rule_internal(hw, recp_list,
7381 if (status != ICE_SUCCESS)
7386 /* Clear the filter management list */
7387 ice_rem_sw_rule_info(hw, &l_head);
7392 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7393 * @hw: pointer to the hardware structure
7395 * NOTE: This function does not clean up partially added filters on error.
7396 * It is up to caller of the function to issue a reset or fail early.
7398 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7400 struct ice_switch_info *sw = hw->switch_info;
7401 enum ice_status status = ICE_SUCCESS;
7404 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7405 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7407 status = ice_replay_fltr(hw, i, head);
7408 if (status != ICE_SUCCESS)
7415 * ice_replay_vsi_fltr - Replay filters for requested VSI
7416 * @hw: pointer to the hardware structure
7417 * @vsi_handle: driver VSI handle
7418 * @recp_id: Recipe ID for which rules need to be replayed
7419 * @list_head: list for which filters need to be replayed
7421 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7422 * It is required to pass valid VSI handle.
7424 static enum ice_status
7425 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
7426 struct LIST_HEAD_TYPE *list_head)
7428 struct ice_fltr_mgmt_list_entry *itr;
7429 enum ice_status status = ICE_SUCCESS;
7430 struct ice_sw_recipe *recp_list;
7433 if (LIST_EMPTY(list_head))
7435 recp_list = &hw->switch_info->recp_list[recp_id];
7436 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7438 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7440 struct ice_fltr_list_entry f_entry;
7442 f_entry.fltr_info = itr->fltr_info;
7443 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7444 itr->fltr_info.vsi_handle == vsi_handle) {
7445 /* update the src in case it is VSI num */
7446 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7447 f_entry.fltr_info.src = hw_vsi_id;
7448 status = ice_add_rule_internal(hw, recp_list,
7449 hw->port_info->lport,
7451 if (status != ICE_SUCCESS)
7455 if (!itr->vsi_list_info ||
7456 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7458 /* Clearing it so that the logic can add it back */
7459 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7460 f_entry.fltr_info.vsi_handle = vsi_handle;
7461 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7462 /* update the src in case it is VSI num */
7463 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7464 f_entry.fltr_info.src = hw_vsi_id;
7465 if (recp_id == ICE_SW_LKUP_VLAN)
7466 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7468 status = ice_add_rule_internal(hw, recp_list,
7469 hw->port_info->lport,
7471 if (status != ICE_SUCCESS)
7479 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7480 * @hw: pointer to the hardware structure
7481 * @vsi_handle: driver VSI handle
7482 * @list_head: list for which filters need to be replayed
7484 * Replay the advanced rule for the given VSI.
7486 static enum ice_status
7487 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7488 struct LIST_HEAD_TYPE *list_head)
7490 struct ice_rule_query_data added_entry = { 0 };
7491 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7492 enum ice_status status = ICE_SUCCESS;
7494 if (LIST_EMPTY(list_head))
7496 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7498 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7499 u16 lk_cnt = adv_fltr->lkups_cnt;
7501 if (vsi_handle != rinfo->sw_act.vsi_handle)
7503 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7512 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7513 * @hw: pointer to the hardware structure
7514 * @vsi_handle: driver VSI handle
7516 * Replays filters for requested VSI via vsi_handle.
7518 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7520 struct ice_switch_info *sw = hw->switch_info;
7521 enum ice_status status;
7524 /* Update the recipes that were created */
7525 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7526 struct LIST_HEAD_TYPE *head;
7528 head = &sw->recp_list[i].filt_replay_rules;
7529 if (!sw->recp_list[i].adv_rule)
7530 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7532 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7533 if (status != ICE_SUCCESS)
7541 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7542 * @hw: pointer to the HW struct
7544 * Deletes the filter replay rules.
7546 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7548 struct ice_switch_info *sw = hw->switch_info;
7554 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7555 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7556 struct LIST_HEAD_TYPE *l_head;
7558 l_head = &sw->recp_list[i].filt_replay_rules;
7559 if (!sw->recp_list[i].adv_rule)
7560 ice_rem_sw_rule_info(hw, l_head);
7562 ice_rem_adv_rule_info(hw, l_head);