1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x10, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
463 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, /* 2 bytes for 4 byte alignment */
468 /* C-tag (802.1Q): IPv6 + UDP */
469 static const struct ice_dummy_pkt_offsets
470 dummy_vlan_udp_ipv6_packet_offsets[] = {
472 { ICE_ETYPE_OL, 12 },
473 { ICE_VLAN_OFOS, 14 },
474 { ICE_IPV6_OFOS, 18 },
475 { ICE_UDP_ILOS, 58 },
476 { ICE_PROTOCOL_LAST, 0 },
479 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
480 static const u8 dummy_vlan_udp_ipv6_packet[] = {
481 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
485 0x81, 0x00, /* ICE_ETYPE_OL 12 */
487 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
490 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
501 0x00, 0x08, 0x00, 0x00,
503 0x00, 0x00, /* 2 bytes for 4 byte alignment */
506 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508 { ICE_IPV4_OFOS, 14 },
511 { ICE_PROTOCOL_LAST, 0 },
514 static const u8 dummy_udp_gtp_packet[] = {
515 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
520 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x11, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
526 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
527 0x00, 0x1c, 0x00, 0x00,
529 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
530 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x00, 0x85,
533 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
534 0x00, 0x00, 0x00, 0x00,
537 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
539 { ICE_ETYPE_OL, 12 },
540 { ICE_VLAN_OFOS, 14},
542 { ICE_PROTOCOL_LAST, 0 },
545 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
547 { ICE_ETYPE_OL, 12 },
548 { ICE_VLAN_OFOS, 14},
550 { ICE_IPV4_OFOS, 26 },
551 { ICE_PROTOCOL_LAST, 0 },
554 static const u8 dummy_pppoe_ipv4_packet[] = {
555 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
556 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
559 0x81, 0x00, /* ICE_ETYPE_OL 12 */
561 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
563 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
566 0x00, 0x21, /* PPP Link Layer 24 */
568 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
578 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
580 { ICE_ETYPE_OL, 12 },
581 { ICE_VLAN_OFOS, 14},
583 { ICE_IPV4_OFOS, 26 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
593 0x81, 0x00, /* ICE_ETYPE_OL 12 */
595 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
597 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
600 0x00, 0x21, /* PPP Link Layer 24 */
602 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
603 0x00, 0x01, 0x00, 0x00,
604 0x00, 0x06, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
611 0x50, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
618 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
620 { ICE_ETYPE_OL, 12 },
621 { ICE_VLAN_OFOS, 14},
623 { ICE_IPV4_OFOS, 26 },
624 { ICE_UDP_ILOS, 46 },
625 { ICE_PROTOCOL_LAST, 0 },
628 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
629 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
630 0x00, 0x00, 0x00, 0x00,
631 0x00, 0x00, 0x00, 0x00,
633 0x81, 0x00, /* ICE_ETYPE_OL 12 */
635 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
637 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
640 0x00, 0x21, /* PPP Link Layer 24 */
642 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
643 0x00, 0x01, 0x00, 0x00,
644 0x00, 0x11, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
649 0x00, 0x08, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
654 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
656 { ICE_ETYPE_OL, 12 },
657 { ICE_VLAN_OFOS, 14},
659 { ICE_IPV6_OFOS, 26 },
660 { ICE_PROTOCOL_LAST, 0 },
663 static const u8 dummy_pppoe_ipv6_packet[] = {
664 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
665 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00,
668 0x81, 0x00, /* ICE_ETYPE_OL 12 */
670 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
672 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
675 0x00, 0x57, /* PPP Link Layer 24 */
677 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
678 0x00, 0x00, 0x3b, 0x00,
679 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
685 0x00, 0x00, 0x00, 0x00,
686 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
692 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
694 { ICE_ETYPE_OL, 12 },
695 { ICE_VLAN_OFOS, 14},
697 { ICE_IPV6_OFOS, 26 },
699 { ICE_PROTOCOL_LAST, 0 },
702 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
703 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
704 0x00, 0x00, 0x00, 0x00,
705 0x00, 0x00, 0x00, 0x00,
707 0x81, 0x00, /* ICE_ETYPE_OL 12 */
709 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
711 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
714 0x00, 0x57, /* PPP Link Layer 24 */
716 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
717 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
728 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, 0x00, 0x00,
730 0x50, 0x00, 0x00, 0x00,
731 0x00, 0x00, 0x00, 0x00,
733 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
737 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
739 { ICE_ETYPE_OL, 12 },
740 { ICE_VLAN_OFOS, 14},
742 { ICE_IPV6_OFOS, 26 },
743 { ICE_UDP_ILOS, 66 },
744 { ICE_PROTOCOL_LAST, 0 },
747 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
748 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x81, 0x00, /* ICE_ETYPE_OL 12 */
754 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
756 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
759 0x00, 0x57, /* PPP Link Layer 24 */
761 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
762 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
763 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
773 0x00, 0x08, 0x00, 0x00,
775 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
778 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
780 { ICE_IPV4_OFOS, 14 },
782 { ICE_PROTOCOL_LAST, 0 },
785 static const u8 dummy_ipv4_esp_pkt[] = {
786 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
787 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x00, 0x00, 0x00,
791 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
792 0x00, 0x00, 0x40, 0x00,
793 0x40, 0x32, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
798 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
802 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
804 { ICE_IPV6_OFOS, 14 },
806 { ICE_PROTOCOL_LAST, 0 },
809 static const u8 dummy_ipv6_esp_pkt[] = {
810 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
815 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
816 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
831 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
833 { ICE_IPV4_OFOS, 14 },
835 { ICE_PROTOCOL_LAST, 0 },
838 static const u8 dummy_ipv4_ah_pkt[] = {
839 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
840 0x00, 0x00, 0x00, 0x00,
841 0x00, 0x00, 0x00, 0x00,
844 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
845 0x00, 0x00, 0x40, 0x00,
846 0x40, 0x33, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
851 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
856 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
858 { ICE_IPV6_OFOS, 14 },
860 { ICE_PROTOCOL_LAST, 0 },
863 static const u8 dummy_ipv6_ah_pkt[] = {
864 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
865 0x00, 0x00, 0x00, 0x00,
866 0x00, 0x00, 0x00, 0x00,
869 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
870 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
871 0x00, 0x00, 0x00, 0x00,
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
886 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
888 { ICE_IPV4_OFOS, 14 },
889 { ICE_UDP_ILOS, 34 },
891 { ICE_PROTOCOL_LAST, 0 },
894 static const u8 dummy_ipv4_nat_pkt[] = {
895 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
896 0x00, 0x00, 0x00, 0x00,
897 0x00, 0x00, 0x00, 0x00,
900 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
901 0x00, 0x00, 0x40, 0x00,
902 0x40, 0x11, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
914 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
916 { ICE_IPV6_OFOS, 14 },
917 { ICE_UDP_ILOS, 54 },
919 { ICE_PROTOCOL_LAST, 0 },
922 static const u8 dummy_ipv6_nat_pkt[] = {
923 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
928 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
929 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x00, 0x00,
936 0x00, 0x00, 0x00, 0x00,
937 0x00, 0x00, 0x00, 0x00,
939 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
940 0x00, 0x00, 0x00, 0x00,
942 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
948 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
950 { ICE_IPV4_OFOS, 14 },
952 { ICE_PROTOCOL_LAST, 0 },
955 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
956 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
957 0x00, 0x00, 0x00, 0x00,
958 0x00, 0x00, 0x00, 0x00,
961 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
962 0x00, 0x00, 0x40, 0x00,
963 0x40, 0x73, 0x00, 0x00,
964 0x00, 0x00, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
968 0x00, 0x00, 0x00, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
973 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
975 { ICE_IPV6_OFOS, 14 },
977 { ICE_PROTOCOL_LAST, 0 },
980 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
981 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
982 0x00, 0x00, 0x00, 0x00,
983 0x00, 0x00, 0x00, 0x00,
986 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
987 0x00, 0x0c, 0x73, 0x40,
988 0x00, 0x00, 0x00, 0x00,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
997 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
998 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1003 /* this is a recipe to profile association bitmap */
1004 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1005 ICE_MAX_NUM_PROFILES);
1007 /* this is a profile to recipe association bitmap */
1008 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1009 ICE_MAX_NUM_RECIPES);
1011 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1014 * ice_collect_result_idx - copy result index values
1015 * @buf: buffer that contains the result index
1016 * @recp: the recipe struct to copy data into
1018 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1019 struct ice_sw_recipe *recp)
1021 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1022 ice_set_bit(buf->content.result_indx &
1023 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1027 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1028 * @hw: pointer to hardware structure
1029 * @recps: struct that we need to populate
1030 * @rid: recipe ID that we are populating
1031 * @refresh_required: true if we should get recipe to profile mapping from FW
1033 * This function is used to populate all the necessary entries into our
1034 * bookkeeping so that we have a current list of all the recipes that are
1035 * programmed in the firmware.
1037 static enum ice_status
1038 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1039 bool *refresh_required)
1041 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1042 struct ice_aqc_recipe_data_elem *tmp;
1043 u16 num_recps = ICE_MAX_NUM_RECIPES;
1044 struct ice_prot_lkup_ext *lkup_exts;
1045 enum ice_status status;
1049 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1051 /* we need a buffer big enough to accommodate all the recipes */
1052 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1053 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1055 return ICE_ERR_NO_MEMORY;
1057 tmp[0].recipe_indx = rid;
1058 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1059 /* non-zero status meaning recipe doesn't exist */
1063 /* Get recipe to profile map so that we can get the fv from lkups that
1064 * we read for a recipe from FW. Since we want to minimize the number of
1065 * times we make this FW call, just make one call and cache the copy
1066 * until a new recipe is added. This operation is only required the
1067 * first time to get the changes from FW. Then to search existing
1068 * entries we don't need to update the cache again until another recipe
1071 if (*refresh_required) {
1072 ice_get_recp_to_prof_map(hw);
1073 *refresh_required = false;
1076 /* Start populating all the entries for recps[rid] based on lkups from
1077 * firmware. Note that we are only creating the root recipe in our
1080 lkup_exts = &recps[rid].lkup_exts;
1082 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1083 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1084 struct ice_recp_grp_entry *rg_entry;
1085 u8 i, prof, idx, prot = 0;
1089 rg_entry = (struct ice_recp_grp_entry *)
1090 ice_malloc(hw, sizeof(*rg_entry));
1092 status = ICE_ERR_NO_MEMORY;
1096 idx = root_bufs.recipe_indx;
1097 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1099 /* Mark all result indices in this chain */
1100 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1101 ice_set_bit(root_bufs.content.result_indx &
1102 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1104 /* get the first profile that is associated with rid */
1105 prof = ice_find_first_bit(recipe_to_profile[idx],
1106 ICE_MAX_NUM_PROFILES);
1107 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1108 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1110 rg_entry->fv_idx[i] = lkup_indx;
1111 rg_entry->fv_mask[i] =
1112 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1114 /* If the recipe is a chained recipe then all its
1115 * child recipe's result will have a result index.
1116 * To fill fv_words we should not use those result
1117 * index, we only need the protocol ids and offsets.
1118 * We will skip all the fv_idx which stores result
1119 * index in them. We also need to skip any fv_idx which
1120 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1121 * valid offset value.
1123 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1124 rg_entry->fv_idx[i]) ||
1125 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1126 rg_entry->fv_idx[i] == 0)
1129 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1130 rg_entry->fv_idx[i], &prot, &off);
1131 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1132 lkup_exts->fv_words[fv_word_idx].off = off;
1133 lkup_exts->field_mask[fv_word_idx] =
1134 rg_entry->fv_mask[i];
1137 /* populate rg_list with the data from the child entry of this
1140 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1142 /* Propagate some data to the recipe database */
1143 recps[idx].is_root = !!is_root;
1144 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1145 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1146 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1147 recps[idx].chain_idx = root_bufs.content.result_indx &
1148 ~ICE_AQ_RECIPE_RESULT_EN;
1149 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1151 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1157 /* Only do the following for root recipes entries */
1158 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1159 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1160 recps[idx].root_rid = root_bufs.content.rid &
1161 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1162 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1165 /* Complete initialization of the root recipe entry */
1166 lkup_exts->n_val_words = fv_word_idx;
1167 recps[rid].big_recp = (num_recps > 1);
1168 recps[rid].n_grp_count = (u8)num_recps;
1169 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1170 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1171 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1172 if (!recps[rid].root_buf)
1175 /* Copy result indexes */
1176 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1177 recps[rid].recp_created = true;
1185 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1186 * @hw: pointer to hardware structure
1188 * This function is used to populate recipe_to_profile matrix where index to
1189 * this array is the recipe ID and the element is the mapping of which profiles
1190 * is this recipe mapped to.
1192 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1194 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1197 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1200 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1201 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1202 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1204 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1205 ICE_MAX_NUM_RECIPES);
1206 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1207 if (ice_is_bit_set(r_bitmap, j))
1208 ice_set_bit(i, recipe_to_profile[j]);
1213 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1214 * @hw: pointer to the HW struct
1215 * @recp_list: pointer to sw recipe list
1217 * Allocate memory for the entire recipe table and initialize the structures/
1218 * entries corresponding to basic recipes.
1221 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1223 struct ice_sw_recipe *recps;
1226 recps = (struct ice_sw_recipe *)
1227 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1229 return ICE_ERR_NO_MEMORY;
1231 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1232 recps[i].root_rid = i;
1233 INIT_LIST_HEAD(&recps[i].filt_rules);
1234 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1235 INIT_LIST_HEAD(&recps[i].rg_list);
1236 ice_init_lock(&recps[i].filt_rule_lock);
1245 * ice_aq_get_sw_cfg - get switch configuration
1246 * @hw: pointer to the hardware structure
1247 * @buf: pointer to the result buffer
1248 * @buf_size: length of the buffer available for response
1249 * @req_desc: pointer to requested descriptor
1250 * @num_elems: pointer to number of elements
1251 * @cd: pointer to command details structure or NULL
1253 * Get switch configuration (0x0200) to be placed in 'buff'.
1254 * This admin command returns information such as initial VSI/port number
1255 * and switch ID it belongs to.
1257 * NOTE: *req_desc is both an input/output parameter.
1258 * The caller of this function first calls this function with *request_desc set
1259 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1260 * configuration information has been returned; if non-zero (meaning not all
1261 * the information was returned), the caller should call this function again
1262 * with *req_desc set to the previous value returned by f/w to get the
1263 * next block of switch configuration information.
1265 * *num_elems is output only parameter. This reflects the number of elements
1266 * in response buffer. The caller of this function to use *num_elems while
1267 * parsing the response buffer.
1269 static enum ice_status
1270 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1271 u16 buf_size, u16 *req_desc, u16 *num_elems,
1272 struct ice_sq_cd *cd)
1274 struct ice_aqc_get_sw_cfg *cmd;
1275 enum ice_status status;
1276 struct ice_aq_desc desc;
1278 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1279 cmd = &desc.params.get_sw_conf;
1280 cmd->element = CPU_TO_LE16(*req_desc);
1282 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1284 *req_desc = LE16_TO_CPU(cmd->element);
1285 *num_elems = LE16_TO_CPU(cmd->num_elems);
1292 * ice_alloc_sw - allocate resources specific to switch
1293 * @hw: pointer to the HW struct
1294 * @ena_stats: true to turn on VEB stats
1295 * @shared_res: true for shared resource, false for dedicated resource
1296 * @sw_id: switch ID returned
1297 * @counter_id: VEB counter ID returned
1299 * allocates switch resources (SWID and VEB counter) (0x0208)
1302 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1305 struct ice_aqc_alloc_free_res_elem *sw_buf;
1306 struct ice_aqc_res_elem *sw_ele;
1307 enum ice_status status;
1310 buf_len = sizeof(*sw_buf);
1311 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1312 ice_malloc(hw, buf_len);
1314 return ICE_ERR_NO_MEMORY;
1316 /* Prepare buffer for switch ID.
1317 * The number of resource entries in buffer is passed as 1 since only a
1318 * single switch/VEB instance is allocated, and hence a single sw_id
1321 sw_buf->num_elems = CPU_TO_LE16(1);
1323 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1324 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1325 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1327 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1328 ice_aqc_opc_alloc_res, NULL);
1331 goto ice_alloc_sw_exit;
1333 sw_ele = &sw_buf->elem[0];
1334 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1337 /* Prepare buffer for VEB Counter */
1338 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1339 struct ice_aqc_alloc_free_res_elem *counter_buf;
1340 struct ice_aqc_res_elem *counter_ele;
1342 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1343 ice_malloc(hw, buf_len);
1345 status = ICE_ERR_NO_MEMORY;
1346 goto ice_alloc_sw_exit;
1349 /* The number of resource entries in buffer is passed as 1 since
1350 * only a single switch/VEB instance is allocated, and hence a
1351 * single VEB counter is requested.
1353 counter_buf->num_elems = CPU_TO_LE16(1);
1354 counter_buf->res_type =
1355 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1356 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1357 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1361 ice_free(hw, counter_buf);
1362 goto ice_alloc_sw_exit;
1364 counter_ele = &counter_buf->elem[0];
1365 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1366 ice_free(hw, counter_buf);
1370 ice_free(hw, sw_buf);
1375 * ice_free_sw - free resources specific to switch
1376 * @hw: pointer to the HW struct
1377 * @sw_id: switch ID returned
1378 * @counter_id: VEB counter ID returned
1380 * free switch resources (SWID and VEB counter) (0x0209)
1382 * NOTE: This function frees multiple resources. It continues
1383 * releasing other resources even after it encounters error.
1384 * The error code returned is the last error it encountered.
1386 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1388 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1389 enum ice_status status, ret_status;
1392 buf_len = sizeof(*sw_buf);
1393 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1394 ice_malloc(hw, buf_len);
1396 return ICE_ERR_NO_MEMORY;
1398 /* Prepare buffer to free for switch ID res.
1399 * The number of resource entries in buffer is passed as 1 since only a
1400 * single switch/VEB instance is freed, and hence a single sw_id
1403 sw_buf->num_elems = CPU_TO_LE16(1);
1404 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1405 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1407 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1408 ice_aqc_opc_free_res, NULL);
1411 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1413 /* Prepare buffer to free for VEB Counter resource */
1414 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1415 ice_malloc(hw, buf_len);
1417 ice_free(hw, sw_buf);
1418 return ICE_ERR_NO_MEMORY;
1421 /* The number of resource entries in buffer is passed as 1 since only a
1422 * single switch/VEB instance is freed, and hence a single VEB counter
1425 counter_buf->num_elems = CPU_TO_LE16(1);
1426 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1427 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1429 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1430 ice_aqc_opc_free_res, NULL);
1432 ice_debug(hw, ICE_DBG_SW,
1433 "VEB counter resource could not be freed\n");
1434 ret_status = status;
1437 ice_free(hw, counter_buf);
1438 ice_free(hw, sw_buf);
1444 * @hw: pointer to the HW struct
1445 * @vsi_ctx: pointer to a VSI context struct
1446 * @cd: pointer to command details structure or NULL
1448 * Add a VSI context to the hardware (0x0210)
1451 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1452 struct ice_sq_cd *cd)
1454 struct ice_aqc_add_update_free_vsi_resp *res;
1455 struct ice_aqc_add_get_update_free_vsi *cmd;
1456 struct ice_aq_desc desc;
1457 enum ice_status status;
1459 cmd = &desc.params.vsi_cmd;
1460 res = &desc.params.add_update_free_vsi_res;
1462 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1464 if (!vsi_ctx->alloc_from_pool)
1465 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1466 ICE_AQ_VSI_IS_VALID);
1468 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1470 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1472 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1473 sizeof(vsi_ctx->info), cd);
1476 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1477 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1478 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1486 * @hw: pointer to the HW struct
1487 * @vsi_ctx: pointer to a VSI context struct
1488 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1489 * @cd: pointer to command details structure or NULL
1491 * Free VSI context info from hardware (0x0213)
1494 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1495 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1497 struct ice_aqc_add_update_free_vsi_resp *resp;
1498 struct ice_aqc_add_get_update_free_vsi *cmd;
1499 struct ice_aq_desc desc;
1500 enum ice_status status;
1502 cmd = &desc.params.vsi_cmd;
1503 resp = &desc.params.add_update_free_vsi_res;
1505 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1507 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1509 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1511 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1513 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1514 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1522 * @hw: pointer to the HW struct
1523 * @vsi_ctx: pointer to a VSI context struct
1524 * @cd: pointer to command details structure or NULL
1526 * Update VSI context in the hardware (0x0211)
1529 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1530 struct ice_sq_cd *cd)
1532 struct ice_aqc_add_update_free_vsi_resp *resp;
1533 struct ice_aqc_add_get_update_free_vsi *cmd;
1534 struct ice_aq_desc desc;
1535 enum ice_status status;
1537 cmd = &desc.params.vsi_cmd;
1538 resp = &desc.params.add_update_free_vsi_res;
1540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1542 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1544 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1546 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1547 sizeof(vsi_ctx->info), cd);
1550 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1551 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1558 * ice_is_vsi_valid - check whether the VSI is valid or not
1559 * @hw: pointer to the HW struct
1560 * @vsi_handle: VSI handle
1562 * check whether the VSI is valid or not
1564 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1566 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1570 * ice_get_hw_vsi_num - return the HW VSI number
1571 * @hw: pointer to the HW struct
1572 * @vsi_handle: VSI handle
1574 * return the HW VSI number
1575 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1577 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1579 return hw->vsi_ctx[vsi_handle]->vsi_num;
1583 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1584 * @hw: pointer to the HW struct
1585 * @vsi_handle: VSI handle
1587 * return the VSI context entry for a given VSI handle
1589 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1591 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1595 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1596 * @hw: pointer to the HW struct
1597 * @vsi_handle: VSI handle
1598 * @vsi: VSI context pointer
1600 * save the VSI context entry for a given VSI handle
1603 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1605 hw->vsi_ctx[vsi_handle] = vsi;
1609 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1610 * @hw: pointer to the HW struct
1611 * @vsi_handle: VSI handle
1613 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1615 struct ice_vsi_ctx *vsi;
1618 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1621 ice_for_each_traffic_class(i) {
1622 if (vsi->lan_q_ctx[i]) {
1623 ice_free(hw, vsi->lan_q_ctx[i]);
1624 vsi->lan_q_ctx[i] = NULL;
1630 * ice_clear_vsi_ctx - clear the VSI context entry
1631 * @hw: pointer to the HW struct
1632 * @vsi_handle: VSI handle
1634 * clear the VSI context entry
1636 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1638 struct ice_vsi_ctx *vsi;
1640 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1642 ice_clear_vsi_q_ctx(hw, vsi_handle);
1644 hw->vsi_ctx[vsi_handle] = NULL;
1649 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1650 * @hw: pointer to the HW struct
1652 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1656 for (i = 0; i < ICE_MAX_VSI; i++)
1657 ice_clear_vsi_ctx(hw, i);
1661 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1662 * @hw: pointer to the HW struct
1663 * @vsi_handle: unique VSI handle provided by drivers
1664 * @vsi_ctx: pointer to a VSI context struct
1665 * @cd: pointer to command details structure or NULL
1667 * Add a VSI context to the hardware also add it into the VSI handle list.
1668 * If this function gets called after reset for existing VSIs then update
1669 * with the new HW VSI number in the corresponding VSI handle list entry.
1672 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1673 struct ice_sq_cd *cd)
1675 struct ice_vsi_ctx *tmp_vsi_ctx;
1676 enum ice_status status;
1678 if (vsi_handle >= ICE_MAX_VSI)
1679 return ICE_ERR_PARAM;
1680 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1683 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1685 /* Create a new VSI context */
1686 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1687 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1689 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1690 return ICE_ERR_NO_MEMORY;
1692 *tmp_vsi_ctx = *vsi_ctx;
1694 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1696 /* update with new HW VSI num */
1697 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1704 * ice_free_vsi- free VSI context from hardware and VSI handle list
1705 * @hw: pointer to the HW struct
1706 * @vsi_handle: unique VSI handle
1707 * @vsi_ctx: pointer to a VSI context struct
1708 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1709 * @cd: pointer to command details structure or NULL
1711 * Free VSI context info from hardware as well as from VSI handle list
1714 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1715 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1717 enum ice_status status;
1719 if (!ice_is_vsi_valid(hw, vsi_handle))
1720 return ICE_ERR_PARAM;
1721 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1722 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1724 ice_clear_vsi_ctx(hw, vsi_handle);
1730 * @hw: pointer to the HW struct
1731 * @vsi_handle: unique VSI handle
1732 * @vsi_ctx: pointer to a VSI context struct
1733 * @cd: pointer to command details structure or NULL
1735 * Update VSI context in the hardware
1738 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1739 struct ice_sq_cd *cd)
1741 if (!ice_is_vsi_valid(hw, vsi_handle))
1742 return ICE_ERR_PARAM;
1743 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1744 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1748 * ice_aq_get_vsi_params
1749 * @hw: pointer to the HW struct
1750 * @vsi_ctx: pointer to a VSI context struct
1751 * @cd: pointer to command details structure or NULL
1753 * Get VSI context info from hardware (0x0212)
1756 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1757 struct ice_sq_cd *cd)
1759 struct ice_aqc_add_get_update_free_vsi *cmd;
1760 struct ice_aqc_get_vsi_resp *resp;
1761 struct ice_aq_desc desc;
1762 enum ice_status status;
1764 cmd = &desc.params.vsi_cmd;
1765 resp = &desc.params.get_vsi_resp;
1767 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1769 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1771 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1772 sizeof(vsi_ctx->info), cd);
1774 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1776 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1777 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1784 * ice_aq_add_update_mir_rule - add/update a mirror rule
1785 * @hw: pointer to the HW struct
1786 * @rule_type: Rule Type
1787 * @dest_vsi: VSI number to which packets will be mirrored
1788 * @count: length of the list
1789 * @mr_buf: buffer for list of mirrored VSI numbers
1790 * @cd: pointer to command details structure or NULL
1793 * Add/Update Mirror Rule (0x260).
1796 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1797 u16 count, struct ice_mir_rule_buf *mr_buf,
1798 struct ice_sq_cd *cd, u16 *rule_id)
1800 struct ice_aqc_add_update_mir_rule *cmd;
1801 struct ice_aq_desc desc;
1802 enum ice_status status;
1803 __le16 *mr_list = NULL;
1806 switch (rule_type) {
1807 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1808 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1809 /* Make sure count and mr_buf are set for these rule_types */
1810 if (!(count && mr_buf))
1811 return ICE_ERR_PARAM;
1813 buf_size = count * sizeof(__le16);
1814 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1816 return ICE_ERR_NO_MEMORY;
1818 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1819 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1820 /* Make sure count and mr_buf are not set for these
1823 if (count || mr_buf)
1824 return ICE_ERR_PARAM;
1827 ice_debug(hw, ICE_DBG_SW,
1828 "Error due to unsupported rule_type %u\n", rule_type);
1829 return ICE_ERR_OUT_OF_RANGE;
1832 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1834 /* Pre-process 'mr_buf' items for add/update of virtual port
1835 * ingress/egress mirroring (but not physical port ingress/egress
1841 for (i = 0; i < count; i++) {
1844 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1846 /* Validate specified VSI number, make sure it is less
1847 * than ICE_MAX_VSI, if not return with error.
1849 if (id >= ICE_MAX_VSI) {
1850 ice_debug(hw, ICE_DBG_SW,
1851 "Error VSI index (%u) out-of-range\n",
1853 ice_free(hw, mr_list);
1854 return ICE_ERR_OUT_OF_RANGE;
1857 /* add VSI to mirror rule */
1860 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1861 else /* remove VSI from mirror rule */
1862 mr_list[i] = CPU_TO_LE16(id);
1866 cmd = &desc.params.add_update_rule;
1867 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1868 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1869 ICE_AQC_RULE_ID_VALID_M);
1870 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1871 cmd->num_entries = CPU_TO_LE16(count);
1872 cmd->dest = CPU_TO_LE16(dest_vsi);
1874 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1876 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1878 ice_free(hw, mr_list);
1884 * ice_aq_delete_mir_rule - delete a mirror rule
1885 * @hw: pointer to the HW struct
1886 * @rule_id: Mirror rule ID (to be deleted)
1887 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1888 * otherwise it is returned to the shared pool
1889 * @cd: pointer to command details structure or NULL
1891 * Delete Mirror Rule (0x261).
1894 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1895 struct ice_sq_cd *cd)
1897 struct ice_aqc_delete_mir_rule *cmd;
1898 struct ice_aq_desc desc;
1900 /* rule_id should be in the range 0...63 */
1901 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1902 return ICE_ERR_OUT_OF_RANGE;
1904 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1906 cmd = &desc.params.del_rule;
1907 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1908 cmd->rule_id = CPU_TO_LE16(rule_id);
1911 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1913 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1917 * ice_aq_alloc_free_vsi_list
1918 * @hw: pointer to the HW struct
1919 * @vsi_list_id: VSI list ID returned or used for lookup
1920 * @lkup_type: switch rule filter lookup type
1921 * @opc: switch rules population command type - pass in the command opcode
1923 * allocates or free a VSI list resource
1925 static enum ice_status
1926 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1927 enum ice_sw_lkup_type lkup_type,
1928 enum ice_adminq_opc opc)
1930 struct ice_aqc_alloc_free_res_elem *sw_buf;
1931 struct ice_aqc_res_elem *vsi_ele;
1932 enum ice_status status;
1935 buf_len = sizeof(*sw_buf);
1936 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1937 ice_malloc(hw, buf_len);
1939 return ICE_ERR_NO_MEMORY;
1940 sw_buf->num_elems = CPU_TO_LE16(1);
1942 if (lkup_type == ICE_SW_LKUP_MAC ||
1943 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1944 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1945 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1946 lkup_type == ICE_SW_LKUP_PROMISC ||
1947 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1948 lkup_type == ICE_SW_LKUP_LAST) {
1949 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1950 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1952 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1954 status = ICE_ERR_PARAM;
1955 goto ice_aq_alloc_free_vsi_list_exit;
1958 if (opc == ice_aqc_opc_free_res)
1959 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1961 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1963 goto ice_aq_alloc_free_vsi_list_exit;
1965 if (opc == ice_aqc_opc_alloc_res) {
1966 vsi_ele = &sw_buf->elem[0];
1967 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1970 ice_aq_alloc_free_vsi_list_exit:
1971 ice_free(hw, sw_buf);
1976 * ice_aq_set_storm_ctrl - Sets storm control configuration
1977 * @hw: pointer to the HW struct
1978 * @bcast_thresh: represents the upper threshold for broadcast storm control
1979 * @mcast_thresh: represents the upper threshold for multicast storm control
1980 * @ctl_bitmask: storm control control knobs
1982 * Sets the storm control configuration (0x0280)
1985 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1988 struct ice_aqc_storm_cfg *cmd;
1989 struct ice_aq_desc desc;
1991 cmd = &desc.params.storm_conf;
1993 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1995 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1996 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1997 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1999 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2003 * ice_aq_get_storm_ctrl - gets storm control configuration
2004 * @hw: pointer to the HW struct
2005 * @bcast_thresh: represents the upper threshold for broadcast storm control
2006 * @mcast_thresh: represents the upper threshold for multicast storm control
2007 * @ctl_bitmask: storm control control knobs
2009 * Gets the storm control configuration (0x0281)
2012 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2015 enum ice_status status;
2016 struct ice_aq_desc desc;
2018 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2020 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2022 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2025 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2028 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2031 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2038 * ice_aq_sw_rules - add/update/remove switch rules
2039 * @hw: pointer to the HW struct
2040 * @rule_list: pointer to switch rule population list
2041 * @rule_list_sz: total size of the rule list in bytes
2042 * @num_rules: number of switch rules in the rule_list
2043 * @opc: switch rules population command type - pass in the command opcode
2044 * @cd: pointer to command details structure or NULL
2046 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2048 static enum ice_status
2049 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2050 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2052 struct ice_aq_desc desc;
2053 enum ice_status status;
2055 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2057 if (opc != ice_aqc_opc_add_sw_rules &&
2058 opc != ice_aqc_opc_update_sw_rules &&
2059 opc != ice_aqc_opc_remove_sw_rules)
2060 return ICE_ERR_PARAM;
2062 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2064 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2065 desc.params.sw_rules.num_rules_fltr_entry_index =
2066 CPU_TO_LE16(num_rules);
2067 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2068 if (opc != ice_aqc_opc_add_sw_rules &&
2069 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2070 status = ICE_ERR_DOES_NOT_EXIST;
2076 * ice_aq_add_recipe - add switch recipe
2077 * @hw: pointer to the HW struct
2078 * @s_recipe_list: pointer to switch rule population list
2079 * @num_recipes: number of switch recipes in the list
2080 * @cd: pointer to command details structure or NULL
2085 ice_aq_add_recipe(struct ice_hw *hw,
2086 struct ice_aqc_recipe_data_elem *s_recipe_list,
2087 u16 num_recipes, struct ice_sq_cd *cd)
2089 struct ice_aqc_add_get_recipe *cmd;
2090 struct ice_aq_desc desc;
2093 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2094 cmd = &desc.params.add_get_recipe;
2095 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2097 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2098 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2100 buf_size = num_recipes * sizeof(*s_recipe_list);
2102 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2106 * ice_aq_get_recipe - get switch recipe
2107 * @hw: pointer to the HW struct
2108 * @s_recipe_list: pointer to switch rule population list
2109 * @num_recipes: pointer to the number of recipes (input and output)
2110 * @recipe_root: root recipe number of recipe(s) to retrieve
2111 * @cd: pointer to command details structure or NULL
2115 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2116 * On output, *num_recipes will equal the number of entries returned in
2119 * The caller must supply enough space in s_recipe_list to hold all possible
2120 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2123 ice_aq_get_recipe(struct ice_hw *hw,
2124 struct ice_aqc_recipe_data_elem *s_recipe_list,
2125 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2127 struct ice_aqc_add_get_recipe *cmd;
2128 struct ice_aq_desc desc;
2129 enum ice_status status;
2132 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2133 return ICE_ERR_PARAM;
2135 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2136 cmd = &desc.params.add_get_recipe;
2137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2139 cmd->return_index = CPU_TO_LE16(recipe_root);
2140 cmd->num_sub_recipes = 0;
2142 buf_size = *num_recipes * sizeof(*s_recipe_list);
2144 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2145 /* cppcheck-suppress constArgument */
2146 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2152 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2153 * @hw: pointer to the HW struct
2154 * @profile_id: package profile ID to associate the recipe with
2155 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2156 * @cd: pointer to command details structure or NULL
2157 * Recipe to profile association (0x0291)
2160 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2161 struct ice_sq_cd *cd)
2163 struct ice_aqc_recipe_to_profile *cmd;
2164 struct ice_aq_desc desc;
2166 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2167 cmd = &desc.params.recipe_to_profile;
2168 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2169 cmd->profile_id = CPU_TO_LE16(profile_id);
2170 /* Set the recipe ID bit in the bitmask to let the device know which
2171 * profile we are associating the recipe to
2173 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2174 ICE_NONDMA_TO_NONDMA);
2176 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2180 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2181 * @hw: pointer to the HW struct
2182 * @profile_id: package profile ID to associate the recipe with
2183 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2184 * @cd: pointer to command details structure or NULL
2185 * Associate profile ID with given recipe (0x0293)
2188 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2189 struct ice_sq_cd *cd)
2191 struct ice_aqc_recipe_to_profile *cmd;
2192 struct ice_aq_desc desc;
2193 enum ice_status status;
2195 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2196 cmd = &desc.params.recipe_to_profile;
2197 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2198 cmd->profile_id = CPU_TO_LE16(profile_id);
2200 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2202 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2203 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2209 * ice_alloc_recipe - add recipe resource
2210 * @hw: pointer to the hardware structure
2211 * @rid: recipe ID returned as response to AQ call
2213 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2215 struct ice_aqc_alloc_free_res_elem *sw_buf;
2216 enum ice_status status;
2219 buf_len = sizeof(*sw_buf);
2220 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2222 return ICE_ERR_NO_MEMORY;
2224 sw_buf->num_elems = CPU_TO_LE16(1);
2225 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2226 ICE_AQC_RES_TYPE_S) |
2227 ICE_AQC_RES_TYPE_FLAG_SHARED);
2228 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2229 ice_aqc_opc_alloc_res, NULL);
2231 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2232 ice_free(hw, sw_buf);
2237 /* ice_init_port_info - Initialize port_info with switch configuration data
2238 * @pi: pointer to port_info
2239 * @vsi_port_num: VSI number or port number
2240 * @type: Type of switch element (port or VSI)
2241 * @swid: switch ID of the switch the element is attached to
2242 * @pf_vf_num: PF or VF number
2243 * @is_vf: true if the element is a VF, false otherwise
2246 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2247 u16 swid, u16 pf_vf_num, bool is_vf)
2250 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2251 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2253 pi->pf_vf_num = pf_vf_num;
2255 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2256 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2259 ice_debug(pi->hw, ICE_DBG_SW,
2260 "incorrect VSI/port type received\n");
2265 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2266 * @hw: pointer to the hardware structure
2268 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2270 struct ice_aqc_get_sw_cfg_resp *rbuf;
2271 enum ice_status status;
2278 num_total_ports = 1;
2280 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2281 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2284 return ICE_ERR_NO_MEMORY;
2286 /* Multiple calls to ice_aq_get_sw_cfg may be required
2287 * to get all the switch configuration information. The need
2288 * for additional calls is indicated by ice_aq_get_sw_cfg
2289 * writing a non-zero value in req_desc
2292 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2293 &req_desc, &num_elems, NULL);
2298 for (i = 0; i < num_elems; i++) {
2299 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2300 u16 pf_vf_num, swid, vsi_port_num;
2304 ele = rbuf[i].elements;
2305 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2306 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2308 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2309 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2311 swid = LE16_TO_CPU(ele->swid);
2313 if (LE16_TO_CPU(ele->pf_vf_num) &
2314 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2317 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2318 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2321 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2322 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2323 if (j == num_total_ports) {
2324 ice_debug(hw, ICE_DBG_SW,
2325 "more ports than expected\n");
2326 status = ICE_ERR_CFG;
2329 ice_init_port_info(hw->port_info,
2330 vsi_port_num, res_type, swid,
2338 } while (req_desc && !status);
2341 ice_free(hw, (void *)rbuf);
2346 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2347 * @hw: pointer to the hardware structure
2348 * @fi: filter info structure to fill/update
2350 * This helper function populates the lb_en and lan_en elements of the provided
2351 * ice_fltr_info struct using the switch's type and characteristics of the
2352 * switch rule being configured.
2354 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2356 if ((fi->flag & ICE_FLTR_RX) &&
2357 (fi->fltr_act == ICE_FWD_TO_VSI ||
2358 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2359 fi->lkup_type == ICE_SW_LKUP_LAST)
2363 if ((fi->flag & ICE_FLTR_TX) &&
2364 (fi->fltr_act == ICE_FWD_TO_VSI ||
2365 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2366 fi->fltr_act == ICE_FWD_TO_Q ||
2367 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2368 /* Setting LB for prune actions will result in replicated
2369 * packets to the internal switch that will be dropped.
2371 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2374 /* Set lan_en to TRUE if
2375 * 1. The switch is a VEB AND
2377 * 2.1 The lookup is a directional lookup like ethertype,
2378 * promiscuous, ethertype-MAC, promiscuous-VLAN
2379 * and default-port OR
2380 * 2.2 The lookup is VLAN, OR
2381 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2382 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2386 * The switch is a VEPA.
2388 * In all other cases, the LAN enable has to be set to false.
2391 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2392 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2393 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2394 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2395 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2396 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2397 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2398 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2399 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2400 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2409 * ice_fill_sw_rule - Helper function to fill switch rule structure
2410 * @hw: pointer to the hardware structure
2411 * @f_info: entry containing packet forwarding information
2412 * @s_rule: switch rule structure to be filled in based on mac_entry
2413 * @opc: switch rules population command type - pass in the command opcode
2416 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2417 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2419 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2427 if (opc == ice_aqc_opc_remove_sw_rules) {
2428 s_rule->pdata.lkup_tx_rx.act = 0;
2429 s_rule->pdata.lkup_tx_rx.index =
2430 CPU_TO_LE16(f_info->fltr_rule_id);
2431 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2435 eth_hdr_sz = sizeof(dummy_eth_header);
2436 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2438 /* initialize the ether header with a dummy header */
2439 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2440 ice_fill_sw_info(hw, f_info);
2442 switch (f_info->fltr_act) {
2443 case ICE_FWD_TO_VSI:
2444 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2445 ICE_SINGLE_ACT_VSI_ID_M;
2446 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2447 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2448 ICE_SINGLE_ACT_VALID_BIT;
2450 case ICE_FWD_TO_VSI_LIST:
2451 act |= ICE_SINGLE_ACT_VSI_LIST;
2452 act |= (f_info->fwd_id.vsi_list_id <<
2453 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2454 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2455 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2456 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2457 ICE_SINGLE_ACT_VALID_BIT;
2460 act |= ICE_SINGLE_ACT_TO_Q;
2461 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2462 ICE_SINGLE_ACT_Q_INDEX_M;
2464 case ICE_DROP_PACKET:
2465 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2466 ICE_SINGLE_ACT_VALID_BIT;
2468 case ICE_FWD_TO_QGRP:
2469 q_rgn = f_info->qgrp_size > 0 ?
2470 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2471 act |= ICE_SINGLE_ACT_TO_Q;
2472 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2473 ICE_SINGLE_ACT_Q_INDEX_M;
2474 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2475 ICE_SINGLE_ACT_Q_REGION_M;
2482 act |= ICE_SINGLE_ACT_LB_ENABLE;
2484 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2486 switch (f_info->lkup_type) {
2487 case ICE_SW_LKUP_MAC:
2488 daddr = f_info->l_data.mac.mac_addr;
2490 case ICE_SW_LKUP_VLAN:
2491 vlan_id = f_info->l_data.vlan.vlan_id;
2492 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2493 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2494 act |= ICE_SINGLE_ACT_PRUNE;
2495 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2498 case ICE_SW_LKUP_ETHERTYPE_MAC:
2499 daddr = f_info->l_data.ethertype_mac.mac_addr;
2501 case ICE_SW_LKUP_ETHERTYPE:
2502 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2503 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2505 case ICE_SW_LKUP_MAC_VLAN:
2506 daddr = f_info->l_data.mac_vlan.mac_addr;
2507 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2509 case ICE_SW_LKUP_PROMISC_VLAN:
2510 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2512 case ICE_SW_LKUP_PROMISC:
2513 daddr = f_info->l_data.mac_vlan.mac_addr;
2519 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2520 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2521 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2523 /* Recipe set depending on lookup type */
2524 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2525 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2526 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2529 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2530 ICE_NONDMA_TO_NONDMA);
2532 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2533 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2534 *off = CPU_TO_BE16(vlan_id);
2537 /* Create the switch rule with the final dummy Ethernet header */
2538 if (opc != ice_aqc_opc_update_sw_rules)
2539 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2543 * ice_add_marker_act
2544 * @hw: pointer to the hardware structure
2545 * @m_ent: the management entry for which sw marker needs to be added
2546 * @sw_marker: sw marker to tag the Rx descriptor with
2547 * @l_id: large action resource ID
2549 * Create a large action to hold software marker and update the switch rule
2550 * entry pointed by m_ent with newly created large action
2552 static enum ice_status
2553 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2554 u16 sw_marker, u16 l_id)
2556 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2557 /* For software marker we need 3 large actions
2558 * 1. FWD action: FWD TO VSI or VSI LIST
2559 * 2. GENERIC VALUE action to hold the profile ID
2560 * 3. GENERIC VALUE action to hold the software marker ID
2562 const u16 num_lg_acts = 3;
2563 enum ice_status status;
2569 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2570 return ICE_ERR_PARAM;
2572 /* Create two back-to-back switch rules and submit them to the HW using
2573 * one memory buffer:
2577 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2578 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2579 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2581 return ICE_ERR_NO_MEMORY;
2583 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2585 /* Fill in the first switch rule i.e. large action */
2586 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2587 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2588 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2590 /* First action VSI forwarding or VSI list forwarding depending on how
2593 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2594 m_ent->fltr_info.fwd_id.hw_vsi_id;
2596 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2597 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2598 ICE_LG_ACT_VSI_LIST_ID_M;
2599 if (m_ent->vsi_count > 1)
2600 act |= ICE_LG_ACT_VSI_LIST;
2601 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2603 /* Second action descriptor type */
2604 act = ICE_LG_ACT_GENERIC;
2606 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2607 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2609 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2610 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2612 /* Third action Marker value */
2613 act |= ICE_LG_ACT_GENERIC;
2614 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2615 ICE_LG_ACT_GENERIC_VALUE_M;
2617 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2619 /* call the fill switch rule to fill the lookup Tx Rx structure */
2620 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2621 ice_aqc_opc_update_sw_rules);
2623 /* Update the action to point to the large action ID */
2624 rx_tx->pdata.lkup_tx_rx.act =
2625 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2626 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2627 ICE_SINGLE_ACT_PTR_VAL_M));
2629 /* Use the filter rule ID of the previously created rule with single
2630 * act. Once the update happens, hardware will treat this as large
2633 rx_tx->pdata.lkup_tx_rx.index =
2634 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2636 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2637 ice_aqc_opc_update_sw_rules, NULL);
2639 m_ent->lg_act_idx = l_id;
2640 m_ent->sw_marker_id = sw_marker;
2643 ice_free(hw, lg_act);
2648 * ice_add_counter_act - add/update filter rule with counter action
2649 * @hw: pointer to the hardware structure
2650 * @m_ent: the management entry for which counter needs to be added
2651 * @counter_id: VLAN counter ID returned as part of allocate resource
2652 * @l_id: large action resource ID
2654 static enum ice_status
2655 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2656 u16 counter_id, u16 l_id)
2658 struct ice_aqc_sw_rules_elem *lg_act;
2659 struct ice_aqc_sw_rules_elem *rx_tx;
2660 enum ice_status status;
2661 /* 2 actions will be added while adding a large action counter */
2662 const int num_acts = 2;
2669 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2670 return ICE_ERR_PARAM;
2672 /* Create two back-to-back switch rules and submit them to the HW using
2673 * one memory buffer:
2677 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2678 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2679 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2682 return ICE_ERR_NO_MEMORY;
2684 rx_tx = (struct ice_aqc_sw_rules_elem *)
2685 ((u8 *)lg_act + lg_act_size);
2687 /* Fill in the first switch rule i.e. large action */
2688 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2689 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2690 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2692 /* First action VSI forwarding or VSI list forwarding depending on how
2695 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2696 m_ent->fltr_info.fwd_id.hw_vsi_id;
2698 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2699 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2700 ICE_LG_ACT_VSI_LIST_ID_M;
2701 if (m_ent->vsi_count > 1)
2702 act |= ICE_LG_ACT_VSI_LIST;
2703 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2705 /* Second action counter ID */
2706 act = ICE_LG_ACT_STAT_COUNT;
2707 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2708 ICE_LG_ACT_STAT_COUNT_M;
2709 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2711 /* call the fill switch rule to fill the lookup Tx Rx structure */
2712 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2713 ice_aqc_opc_update_sw_rules);
2715 act = ICE_SINGLE_ACT_PTR;
2716 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2717 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2719 /* Use the filter rule ID of the previously created rule with single
2720 * act. Once the update happens, hardware will treat this as large
2723 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2724 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2726 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2727 ice_aqc_opc_update_sw_rules, NULL);
2729 m_ent->lg_act_idx = l_id;
2730 m_ent->counter_index = counter_id;
2733 ice_free(hw, lg_act);
2738 * ice_create_vsi_list_map
2739 * @hw: pointer to the hardware structure
2740 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2741 * @num_vsi: number of VSI handles in the array
2742 * @vsi_list_id: VSI list ID generated as part of allocate resource
2744 * Helper function to create a new entry of VSI list ID to VSI mapping
2745 * using the given VSI list ID
2747 static struct ice_vsi_list_map_info *
2748 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2751 struct ice_switch_info *sw = hw->switch_info;
2752 struct ice_vsi_list_map_info *v_map;
2755 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2760 v_map->vsi_list_id = vsi_list_id;
2762 for (i = 0; i < num_vsi; i++)
2763 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2765 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2770 * ice_update_vsi_list_rule
2771 * @hw: pointer to the hardware structure
2772 * @vsi_handle_arr: array of VSI handles to form a VSI list
2773 * @num_vsi: number of VSI handles in the array
2774 * @vsi_list_id: VSI list ID generated as part of allocate resource
2775 * @remove: Boolean value to indicate if this is a remove action
2776 * @opc: switch rules population command type - pass in the command opcode
2777 * @lkup_type: lookup type of the filter
2779 * Call AQ command to add a new switch rule or update existing switch rule
2780 * using the given VSI list ID
2782 static enum ice_status
2783 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2784 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2785 enum ice_sw_lkup_type lkup_type)
2787 struct ice_aqc_sw_rules_elem *s_rule;
2788 enum ice_status status;
2794 return ICE_ERR_PARAM;
2796 if (lkup_type == ICE_SW_LKUP_MAC ||
2797 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2798 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2799 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2800 lkup_type == ICE_SW_LKUP_PROMISC ||
2801 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2802 lkup_type == ICE_SW_LKUP_LAST)
2803 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2804 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2805 else if (lkup_type == ICE_SW_LKUP_VLAN)
2806 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2807 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2809 return ICE_ERR_PARAM;
2811 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2812 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2814 return ICE_ERR_NO_MEMORY;
2815 for (i = 0; i < num_vsi; i++) {
2816 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2817 status = ICE_ERR_PARAM;
2820 /* AQ call requires hw_vsi_id(s) */
2821 s_rule->pdata.vsi_list.vsi[i] =
2822 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2825 s_rule->type = CPU_TO_LE16(rule_type);
2826 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2827 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2829 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2832 ice_free(hw, s_rule);
2837 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2838 * @hw: pointer to the HW struct
2839 * @vsi_handle_arr: array of VSI handles to form a VSI list
2840 * @num_vsi: number of VSI handles in the array
2841 * @vsi_list_id: stores the ID of the VSI list to be created
2842 * @lkup_type: switch rule filter's lookup type
2844 static enum ice_status
2845 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2846 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2848 enum ice_status status;
2850 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2851 ice_aqc_opc_alloc_res);
2855 /* Update the newly created VSI list to include the specified VSIs */
2856 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2857 *vsi_list_id, false,
2858 ice_aqc_opc_add_sw_rules, lkup_type);
2862 * ice_create_pkt_fwd_rule
2863 * @hw: pointer to the hardware structure
2864 * @recp_list: corresponding filter management list
2865 * @f_entry: entry containing packet forwarding information
2867 * Create switch rule with given filter information and add an entry
2868 * to the corresponding filter management list to track this switch rule
2871 static enum ice_status
2872 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2873 struct ice_fltr_list_entry *f_entry)
2875 struct ice_fltr_mgmt_list_entry *fm_entry;
2876 struct ice_aqc_sw_rules_elem *s_rule;
2877 enum ice_status status;
2879 s_rule = (struct ice_aqc_sw_rules_elem *)
2880 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2882 return ICE_ERR_NO_MEMORY;
2883 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2884 ice_malloc(hw, sizeof(*fm_entry));
2886 status = ICE_ERR_NO_MEMORY;
2887 goto ice_create_pkt_fwd_rule_exit;
2890 fm_entry->fltr_info = f_entry->fltr_info;
2892 /* Initialize all the fields for the management entry */
2893 fm_entry->vsi_count = 1;
2894 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2895 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2896 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2898 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2899 ice_aqc_opc_add_sw_rules);
2901 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2902 ice_aqc_opc_add_sw_rules, NULL);
2904 ice_free(hw, fm_entry);
2905 goto ice_create_pkt_fwd_rule_exit;
2908 f_entry->fltr_info.fltr_rule_id =
2909 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2910 fm_entry->fltr_info.fltr_rule_id =
2911 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2913 /* The book keeping entries will get removed when base driver
2914 * calls remove filter AQ command
2916 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2918 ice_create_pkt_fwd_rule_exit:
2919 ice_free(hw, s_rule);
2924 * ice_update_pkt_fwd_rule
2925 * @hw: pointer to the hardware structure
2926 * @f_info: filter information for switch rule
2928 * Call AQ command to update a previously created switch rule with a
2931 static enum ice_status
2932 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2934 struct ice_aqc_sw_rules_elem *s_rule;
2935 enum ice_status status;
2937 s_rule = (struct ice_aqc_sw_rules_elem *)
2938 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2940 return ICE_ERR_NO_MEMORY;
2942 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2944 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2946 /* Update switch rule with new rule set to forward VSI list */
2947 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2948 ice_aqc_opc_update_sw_rules, NULL);
2950 ice_free(hw, s_rule);
2955 * ice_update_sw_rule_bridge_mode
2956 * @hw: pointer to the HW struct
2958 * Updates unicast switch filter rules based on VEB/VEPA mode
2960 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2962 struct ice_switch_info *sw = hw->switch_info;
2963 struct ice_fltr_mgmt_list_entry *fm_entry;
2964 enum ice_status status = ICE_SUCCESS;
2965 struct LIST_HEAD_TYPE *rule_head;
2966 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2968 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2969 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2971 ice_acquire_lock(rule_lock);
2972 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2974 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2975 u8 *addr = fi->l_data.mac.mac_addr;
2977 /* Update unicast Tx rules to reflect the selected
2980 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2981 (fi->fltr_act == ICE_FWD_TO_VSI ||
2982 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2983 fi->fltr_act == ICE_FWD_TO_Q ||
2984 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2985 status = ice_update_pkt_fwd_rule(hw, fi);
2991 ice_release_lock(rule_lock);
2997 * ice_add_update_vsi_list
2998 * @hw: pointer to the hardware structure
2999 * @m_entry: pointer to current filter management list entry
3000 * @cur_fltr: filter information from the book keeping entry
3001 * @new_fltr: filter information with the new VSI to be added
3003 * Call AQ command to add or update previously created VSI list with new VSI.
3005 * Helper function to do book keeping associated with adding filter information
3006 * The algorithm to do the book keeping is described below :
3007 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3008 * if only one VSI has been added till now
3009 * Allocate a new VSI list and add two VSIs
3010 * to this list using switch rule command
3011 * Update the previously created switch rule with the
3012 * newly created VSI list ID
3013 * if a VSI list was previously created
3014 * Add the new VSI to the previously created VSI list set
3015 * using the update switch rule command
3017 static enum ice_status
3018 ice_add_update_vsi_list(struct ice_hw *hw,
3019 struct ice_fltr_mgmt_list_entry *m_entry,
3020 struct ice_fltr_info *cur_fltr,
3021 struct ice_fltr_info *new_fltr)
3023 enum ice_status status = ICE_SUCCESS;
3024 u16 vsi_list_id = 0;
3026 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3027 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3028 return ICE_ERR_NOT_IMPL;
3030 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3031 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3032 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3033 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3034 return ICE_ERR_NOT_IMPL;
3036 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3037 /* Only one entry existed in the mapping and it was not already
3038 * a part of a VSI list. So, create a VSI list with the old and
3041 struct ice_fltr_info tmp_fltr;
3042 u16 vsi_handle_arr[2];
3044 /* A rule already exists with the new VSI being added */
3045 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3046 return ICE_ERR_ALREADY_EXISTS;
3048 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3049 vsi_handle_arr[1] = new_fltr->vsi_handle;
3050 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3052 new_fltr->lkup_type);
3056 tmp_fltr = *new_fltr;
3057 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3058 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3059 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3060 /* Update the previous switch rule of "MAC forward to VSI" to
3061 * "MAC fwd to VSI list"
3063 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3067 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3068 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3069 m_entry->vsi_list_info =
3070 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3073 /* If this entry was large action then the large action needs
3074 * to be updated to point to FWD to VSI list
3076 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3078 ice_add_marker_act(hw, m_entry,
3079 m_entry->sw_marker_id,
3080 m_entry->lg_act_idx);
3082 u16 vsi_handle = new_fltr->vsi_handle;
3083 enum ice_adminq_opc opcode;
3085 if (!m_entry->vsi_list_info)
3088 /* A rule already exists with the new VSI being added */
3089 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3092 /* Update the previously created VSI list set with
3093 * the new VSI ID passed in
3095 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3096 opcode = ice_aqc_opc_update_sw_rules;
3098 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3099 vsi_list_id, false, opcode,
3100 new_fltr->lkup_type);
3101 /* update VSI list mapping info with new VSI ID */
3103 ice_set_bit(vsi_handle,
3104 m_entry->vsi_list_info->vsi_map);
3107 m_entry->vsi_count++;
3112 * ice_find_rule_entry - Search a rule entry
3113 * @list_head: head of rule list
3114 * @f_info: rule information
3116 * Helper function to search for a given rule entry
3117 * Returns pointer to entry storing the rule if found
3119 static struct ice_fltr_mgmt_list_entry *
3120 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3121 struct ice_fltr_info *f_info)
3123 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3125 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3127 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3128 sizeof(f_info->l_data)) &&
3129 f_info->flag == list_itr->fltr_info.flag) {
3138 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3139 * @recp_list: VSI lists needs to be searched
3140 * @vsi_handle: VSI handle to be found in VSI list
3141 * @vsi_list_id: VSI list ID found containing vsi_handle
3143 * Helper function to search a VSI list with single entry containing given VSI
3144 * handle element. This can be extended further to search VSI list with more
3145 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3147 static struct ice_vsi_list_map_info *
3148 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3151 struct ice_vsi_list_map_info *map_info = NULL;
3152 struct LIST_HEAD_TYPE *list_head;
3154 list_head = &recp_list->filt_rules;
3155 if (recp_list->adv_rule) {
3156 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3158 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3159 ice_adv_fltr_mgmt_list_entry,
3161 if (list_itr->vsi_list_info) {
3162 map_info = list_itr->vsi_list_info;
3163 if (ice_is_bit_set(map_info->vsi_map,
3165 *vsi_list_id = map_info->vsi_list_id;
3171 struct ice_fltr_mgmt_list_entry *list_itr;
3173 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3174 ice_fltr_mgmt_list_entry,
3176 if (list_itr->vsi_count == 1 &&
3177 list_itr->vsi_list_info) {
3178 map_info = list_itr->vsi_list_info;
3179 if (ice_is_bit_set(map_info->vsi_map,
3181 *vsi_list_id = map_info->vsi_list_id;
3191 * ice_add_rule_internal - add rule for a given lookup type
3192 * @hw: pointer to the hardware structure
3193 * @recp_list: recipe list for which rule has to be added
3194 * @lport: logic port number on which function add rule
3195 * @f_entry: structure containing MAC forwarding information
3197 * Adds or updates the rule lists for a given recipe
3199 static enum ice_status
3200 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3201 u8 lport, struct ice_fltr_list_entry *f_entry)
3203 struct ice_fltr_info *new_fltr, *cur_fltr;
3204 struct ice_fltr_mgmt_list_entry *m_entry;
3205 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3206 enum ice_status status = ICE_SUCCESS;
3208 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3209 return ICE_ERR_PARAM;
3211 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3212 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3213 f_entry->fltr_info.fwd_id.hw_vsi_id =
3214 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3216 rule_lock = &recp_list->filt_rule_lock;
3218 ice_acquire_lock(rule_lock);
3219 new_fltr = &f_entry->fltr_info;
3220 if (new_fltr->flag & ICE_FLTR_RX)
3221 new_fltr->src = lport;
3222 else if (new_fltr->flag & ICE_FLTR_TX)
3224 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3226 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3228 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3229 goto exit_add_rule_internal;
3232 cur_fltr = &m_entry->fltr_info;
3233 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3235 exit_add_rule_internal:
3236 ice_release_lock(rule_lock);
3241 * ice_remove_vsi_list_rule
3242 * @hw: pointer to the hardware structure
3243 * @vsi_list_id: VSI list ID generated as part of allocate resource
3244 * @lkup_type: switch rule filter lookup type
3246 * The VSI list should be emptied before this function is called to remove the
3249 static enum ice_status
3250 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3251 enum ice_sw_lkup_type lkup_type)
3253 /* Free the vsi_list resource that we allocated. It is assumed that the
3254 * list is empty at this point.
3256 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3257 ice_aqc_opc_free_res);
3261 * ice_rem_update_vsi_list
3262 * @hw: pointer to the hardware structure
3263 * @vsi_handle: VSI handle of the VSI to remove
3264 * @fm_list: filter management entry for which the VSI list management needs to
3267 static enum ice_status
3268 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3269 struct ice_fltr_mgmt_list_entry *fm_list)
3271 enum ice_sw_lkup_type lkup_type;
3272 enum ice_status status = ICE_SUCCESS;
3275 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3276 fm_list->vsi_count == 0)
3277 return ICE_ERR_PARAM;
3279 /* A rule with the VSI being removed does not exist */
3280 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3281 return ICE_ERR_DOES_NOT_EXIST;
3283 lkup_type = fm_list->fltr_info.lkup_type;
3284 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3285 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3286 ice_aqc_opc_update_sw_rules,
3291 fm_list->vsi_count--;
3292 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3294 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3295 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3296 struct ice_vsi_list_map_info *vsi_list_info =
3297 fm_list->vsi_list_info;
3300 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3302 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3303 return ICE_ERR_OUT_OF_RANGE;
3305 /* Make sure VSI list is empty before removing it below */
3306 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3308 ice_aqc_opc_update_sw_rules,
3313 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3314 tmp_fltr_info.fwd_id.hw_vsi_id =
3315 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3316 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3317 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3319 ice_debug(hw, ICE_DBG_SW,
3320 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3321 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3325 fm_list->fltr_info = tmp_fltr_info;
3328 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3329 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3330 struct ice_vsi_list_map_info *vsi_list_info =
3331 fm_list->vsi_list_info;
3333 /* Remove the VSI list since it is no longer used */
3334 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3336 ice_debug(hw, ICE_DBG_SW,
3337 "Failed to remove VSI list %d, error %d\n",
3338 vsi_list_id, status);
3342 LIST_DEL(&vsi_list_info->list_entry);
3343 ice_free(hw, vsi_list_info);
3344 fm_list->vsi_list_info = NULL;
3351 * ice_remove_rule_internal - Remove a filter rule of a given type
3353 * @hw: pointer to the hardware structure
3354 * @recp_list: recipe list for which the rule needs to removed
3355 * @f_entry: rule entry containing filter information
3357 static enum ice_status
3358 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3359 struct ice_fltr_list_entry *f_entry)
3361 struct ice_fltr_mgmt_list_entry *list_elem;
3362 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3363 enum ice_status status = ICE_SUCCESS;
3364 bool remove_rule = false;
3367 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3368 return ICE_ERR_PARAM;
3369 f_entry->fltr_info.fwd_id.hw_vsi_id =
3370 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3372 rule_lock = &recp_list->filt_rule_lock;
3373 ice_acquire_lock(rule_lock);
3374 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3375 &f_entry->fltr_info);
3377 status = ICE_ERR_DOES_NOT_EXIST;
3381 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3383 } else if (!list_elem->vsi_list_info) {
3384 status = ICE_ERR_DOES_NOT_EXIST;
3386 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3387 /* a ref_cnt > 1 indicates that the vsi_list is being
3388 * shared by multiple rules. Decrement the ref_cnt and
3389 * remove this rule, but do not modify the list, as it
3390 * is in-use by other rules.
3392 list_elem->vsi_list_info->ref_cnt--;
3395 /* a ref_cnt of 1 indicates the vsi_list is only used
3396 * by one rule. However, the original removal request is only
3397 * for a single VSI. Update the vsi_list first, and only
3398 * remove the rule if there are no further VSIs in this list.
3400 vsi_handle = f_entry->fltr_info.vsi_handle;
3401 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3404 /* if VSI count goes to zero after updating the VSI list */
3405 if (list_elem->vsi_count == 0)
3410 /* Remove the lookup rule */
3411 struct ice_aqc_sw_rules_elem *s_rule;
3413 s_rule = (struct ice_aqc_sw_rules_elem *)
3414 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3416 status = ICE_ERR_NO_MEMORY;
3420 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3421 ice_aqc_opc_remove_sw_rules);
3423 status = ice_aq_sw_rules(hw, s_rule,
3424 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3425 ice_aqc_opc_remove_sw_rules, NULL);
3427 /* Remove a book keeping from the list */
3428 ice_free(hw, s_rule);
3433 LIST_DEL(&list_elem->list_entry);
3434 ice_free(hw, list_elem);
3437 ice_release_lock(rule_lock);
3442 * ice_aq_get_res_alloc - get allocated resources
3443 * @hw: pointer to the HW struct
3444 * @num_entries: pointer to u16 to store the number of resource entries returned
3445 * @buf: pointer to user-supplied buffer
3446 * @buf_size: size of buff
3447 * @cd: pointer to command details structure or NULL
3449 * The user-supplied buffer must be large enough to store the resource
3450 * information for all resource types. Each resource type is an
3451 * ice_aqc_get_res_resp_data_elem structure.
3454 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3455 u16 buf_size, struct ice_sq_cd *cd)
3457 struct ice_aqc_get_res_alloc *resp;
3458 enum ice_status status;
3459 struct ice_aq_desc desc;
3462 return ICE_ERR_BAD_PTR;
3464 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3465 return ICE_ERR_INVAL_SIZE;
3467 resp = &desc.params.get_res;
3469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3470 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3472 if (!status && num_entries)
3473 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3479 * ice_aq_get_res_descs - get allocated resource descriptors
3480 * @hw: pointer to the hardware structure
3481 * @num_entries: number of resource entries in buffer
3482 * @buf: Indirect buffer to hold data parameters and response
3483 * @buf_size: size of buffer for indirect commands
3484 * @res_type: resource type
3485 * @res_shared: is resource shared
3486 * @desc_id: input - first desc ID to start; output - next desc ID
3487 * @cd: pointer to command details structure or NULL
3490 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3491 struct ice_aqc_get_allocd_res_desc_resp *buf,
3492 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3493 struct ice_sq_cd *cd)
3495 struct ice_aqc_get_allocd_res_desc *cmd;
3496 struct ice_aq_desc desc;
3497 enum ice_status status;
3499 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3501 cmd = &desc.params.get_res_desc;
3504 return ICE_ERR_PARAM;
3506 if (buf_size != (num_entries * sizeof(*buf)))
3507 return ICE_ERR_PARAM;
3509 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3511 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3512 ICE_AQC_RES_TYPE_M) | (res_shared ?
3513 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3514 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3516 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3518 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3524 * ice_add_mac_rule - Add a MAC address based filter rule
3525 * @hw: pointer to the hardware structure
3526 * @m_list: list of MAC addresses and forwarding information
3527 * @sw: pointer to switch info struct for which function add rule
3528 * @lport: logic port number on which function add rule
3530 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3531 * multiple unicast addresses, the function assumes that all the
3532 * addresses are unique in a given add_mac call. It doesn't
3533 * check for duplicates in this case, removing duplicates from a given
3534 * list should be taken care of in the caller of this function.
3536 static enum ice_status
3537 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3538 struct ice_switch_info *sw, u8 lport)
3540 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3541 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3542 struct ice_fltr_list_entry *m_list_itr;
3543 struct LIST_HEAD_TYPE *rule_head;
3544 u16 total_elem_left, s_rule_size;
3545 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3546 enum ice_status status = ICE_SUCCESS;
3547 u16 num_unicast = 0;
3551 rule_lock = &recp_list->filt_rule_lock;
3552 rule_head = &recp_list->filt_rules;
3554 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3556 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3560 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3561 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3562 if (!ice_is_vsi_valid(hw, vsi_handle))
3563 return ICE_ERR_PARAM;
3564 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3565 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3566 /* update the src in case it is VSI num */
3567 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3568 return ICE_ERR_PARAM;
3569 m_list_itr->fltr_info.src = hw_vsi_id;
3570 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3571 IS_ZERO_ETHER_ADDR(add))
3572 return ICE_ERR_PARAM;
3573 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3574 /* Don't overwrite the unicast address */
3575 ice_acquire_lock(rule_lock);
3576 if (ice_find_rule_entry(rule_head,
3577 &m_list_itr->fltr_info)) {
3578 ice_release_lock(rule_lock);
3579 return ICE_ERR_ALREADY_EXISTS;
3581 ice_release_lock(rule_lock);
3583 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3584 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3585 m_list_itr->status =
3586 ice_add_rule_internal(hw, recp_list, lport,
3588 if (m_list_itr->status)
3589 return m_list_itr->status;
3593 ice_acquire_lock(rule_lock);
3594 /* Exit if no suitable entries were found for adding bulk switch rule */
3596 status = ICE_SUCCESS;
3597 goto ice_add_mac_exit;
3600 /* Allocate switch rule buffer for the bulk update for unicast */
3601 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3602 s_rule = (struct ice_aqc_sw_rules_elem *)
3603 ice_calloc(hw, num_unicast, s_rule_size);
3605 status = ICE_ERR_NO_MEMORY;
3606 goto ice_add_mac_exit;
3610 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3612 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3613 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3615 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3616 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3617 ice_aqc_opc_add_sw_rules);
3618 r_iter = (struct ice_aqc_sw_rules_elem *)
3619 ((u8 *)r_iter + s_rule_size);
3623 /* Call AQ bulk switch rule update for all unicast addresses */
3625 /* Call AQ switch rule in AQ_MAX chunk */
3626 for (total_elem_left = num_unicast; total_elem_left > 0;
3627 total_elem_left -= elem_sent) {
3628 struct ice_aqc_sw_rules_elem *entry = r_iter;
3630 elem_sent = MIN_T(u8, total_elem_left,
3631 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3632 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3633 elem_sent, ice_aqc_opc_add_sw_rules,
3636 goto ice_add_mac_exit;
3637 r_iter = (struct ice_aqc_sw_rules_elem *)
3638 ((u8 *)r_iter + (elem_sent * s_rule_size));
3641 /* Fill up rule ID based on the value returned from FW */
3643 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3645 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3646 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3647 struct ice_fltr_mgmt_list_entry *fm_entry;
3649 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3650 f_info->fltr_rule_id =
3651 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3652 f_info->fltr_act = ICE_FWD_TO_VSI;
3653 /* Create an entry to track this MAC address */
3654 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3655 ice_malloc(hw, sizeof(*fm_entry));
3657 status = ICE_ERR_NO_MEMORY;
3658 goto ice_add_mac_exit;
3660 fm_entry->fltr_info = *f_info;
3661 fm_entry->vsi_count = 1;
3662 /* The book keeping entries will get removed when
3663 * base driver calls remove filter AQ command
3666 LIST_ADD(&fm_entry->list_entry, rule_head);
3667 r_iter = (struct ice_aqc_sw_rules_elem *)
3668 ((u8 *)r_iter + s_rule_size);
3673 ice_release_lock(rule_lock);
3675 ice_free(hw, s_rule);
3680 * ice_add_mac - Add a MAC address based filter rule
3681 * @hw: pointer to the hardware structure
3682 * @m_list: list of MAC addresses and forwarding information
3684 * Function add MAC rule for logical port from HW struct
3686 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3689 return ICE_ERR_PARAM;
3691 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3692 hw->port_info->lport);
3696 * ice_add_vlan_internal - Add one VLAN based filter rule
3697 * @hw: pointer to the hardware structure
3698 * @recp_list: recipe list for which rule has to be added
3699 * @f_entry: filter entry containing one VLAN information
3701 static enum ice_status
3702 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3703 struct ice_fltr_list_entry *f_entry)
3705 struct ice_fltr_mgmt_list_entry *v_list_itr;
3706 struct ice_fltr_info *new_fltr, *cur_fltr;
3707 enum ice_sw_lkup_type lkup_type;
3708 u16 vsi_list_id = 0, vsi_handle;
3709 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3710 enum ice_status status = ICE_SUCCESS;
3712 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3713 return ICE_ERR_PARAM;
3715 f_entry->fltr_info.fwd_id.hw_vsi_id =
3716 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3717 new_fltr = &f_entry->fltr_info;
3719 /* VLAN ID should only be 12 bits */
3720 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3721 return ICE_ERR_PARAM;
3723 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3724 return ICE_ERR_PARAM;
3726 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3727 lkup_type = new_fltr->lkup_type;
3728 vsi_handle = new_fltr->vsi_handle;
3729 rule_lock = &recp_list->filt_rule_lock;
3730 ice_acquire_lock(rule_lock);
3731 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3733 struct ice_vsi_list_map_info *map_info = NULL;
3735 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3736 /* All VLAN pruning rules use a VSI list. Check if
3737 * there is already a VSI list containing VSI that we
3738 * want to add. If found, use the same vsi_list_id for
3739 * this new VLAN rule or else create a new list.
3741 map_info = ice_find_vsi_list_entry(recp_list,
3745 status = ice_create_vsi_list_rule(hw,
3753 /* Convert the action to forwarding to a VSI list. */
3754 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3755 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3758 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3760 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3763 status = ICE_ERR_DOES_NOT_EXIST;
3766 /* reuse VSI list for new rule and increment ref_cnt */
3768 v_list_itr->vsi_list_info = map_info;
3769 map_info->ref_cnt++;
3771 v_list_itr->vsi_list_info =
3772 ice_create_vsi_list_map(hw, &vsi_handle,
3776 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3777 /* Update existing VSI list to add new VSI ID only if it used
3780 cur_fltr = &v_list_itr->fltr_info;
3781 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3784 /* If VLAN rule exists and VSI list being used by this rule is
3785 * referenced by more than 1 VLAN rule. Then create a new VSI
3786 * list appending previous VSI with new VSI and update existing
3787 * VLAN rule to point to new VSI list ID
3789 struct ice_fltr_info tmp_fltr;
3790 u16 vsi_handle_arr[2];
3793 /* Current implementation only supports reusing VSI list with
3794 * one VSI count. We should never hit below condition
3796 if (v_list_itr->vsi_count > 1 &&
3797 v_list_itr->vsi_list_info->ref_cnt > 1) {
3798 ice_debug(hw, ICE_DBG_SW,
3799 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3800 status = ICE_ERR_CFG;
3805 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3808 /* A rule already exists with the new VSI being added */
3809 if (cur_handle == vsi_handle) {
3810 status = ICE_ERR_ALREADY_EXISTS;
3814 vsi_handle_arr[0] = cur_handle;
3815 vsi_handle_arr[1] = vsi_handle;
3816 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3817 &vsi_list_id, lkup_type);
3821 tmp_fltr = v_list_itr->fltr_info;
3822 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3823 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3824 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3825 /* Update the previous switch rule to a new VSI list which
3826 * includes current VSI that is requested
3828 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3832 /* before overriding VSI list map info. decrement ref_cnt of
3835 v_list_itr->vsi_list_info->ref_cnt--;
3837 /* now update to newly created list */
3838 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3839 v_list_itr->vsi_list_info =
3840 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3842 v_list_itr->vsi_count++;
3846 ice_release_lock(rule_lock);
3851 * ice_add_vlan_rule - Add VLAN based filter rule
3852 * @hw: pointer to the hardware structure
3853 * @v_list: list of VLAN entries and forwarding information
3854 * @sw: pointer to switch info struct for which function add rule
3856 static enum ice_status
3857 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3858 struct ice_switch_info *sw)
3860 struct ice_fltr_list_entry *v_list_itr;
3861 struct ice_sw_recipe *recp_list;
3863 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3864 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3866 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3867 return ICE_ERR_PARAM;
3868 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3869 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3871 if (v_list_itr->status)
3872 return v_list_itr->status;
3878 * ice_add_vlan - Add a VLAN based filter rule
3879 * @hw: pointer to the hardware structure
3880 * @v_list: list of VLAN and forwarding information
3882 * Function add VLAN rule for logical port from HW struct
3884 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3887 return ICE_ERR_PARAM;
3889 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3893 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3894 * @hw: pointer to the hardware structure
3895 * @mv_list: list of MAC and VLAN filters
3896 * @sw: pointer to switch info struct for which function add rule
3897 * @lport: logic port number on which function add rule
3899 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3900 * pruning bits enabled, then it is the responsibility of the caller to make
3901 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3902 * VLAN won't be received on that VSI otherwise.
3904 static enum ice_status
3905 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3906 struct ice_switch_info *sw, u8 lport)
3908 struct ice_fltr_list_entry *mv_list_itr;
3909 struct ice_sw_recipe *recp_list;
3911 if (!mv_list || !hw)
3912 return ICE_ERR_PARAM;
3914 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3915 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3917 enum ice_sw_lkup_type l_type =
3918 mv_list_itr->fltr_info.lkup_type;
3920 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3921 return ICE_ERR_PARAM;
3922 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3923 mv_list_itr->status =
3924 ice_add_rule_internal(hw, recp_list, lport,
3926 if (mv_list_itr->status)
3927 return mv_list_itr->status;
3933 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3934 * @hw: pointer to the hardware structure
3935 * @mv_list: list of MAC VLAN addresses and forwarding information
3937 * Function add MAC VLAN rule for logical port from HW struct
3940 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3942 if (!mv_list || !hw)
3943 return ICE_ERR_PARAM;
3945 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3946 hw->port_info->lport);
3950 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3951 * @hw: pointer to the hardware structure
3952 * @em_list: list of ether type MAC filter, MAC is optional
3953 * @sw: pointer to switch info struct for which function add rule
3954 * @lport: logic port number on which function add rule
3956 * This function requires the caller to populate the entries in
3957 * the filter list with the necessary fields (including flags to
3958 * indicate Tx or Rx rules).
3960 static enum ice_status
3961 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3962 struct ice_switch_info *sw, u8 lport)
3964 struct ice_fltr_list_entry *em_list_itr;
3966 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3968 struct ice_sw_recipe *recp_list;
3969 enum ice_sw_lkup_type l_type;
3971 l_type = em_list_itr->fltr_info.lkup_type;
3972 recp_list = &sw->recp_list[l_type];
3974 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3975 l_type != ICE_SW_LKUP_ETHERTYPE)
3976 return ICE_ERR_PARAM;
3978 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3981 if (em_list_itr->status)
3982 return em_list_itr->status;
3988 * ice_add_eth_mac - Add a ethertype based filter rule
3989 * @hw: pointer to the hardware structure
3990 * @em_list: list of ethertype and forwarding information
3992 * Function add ethertype rule for logical port from HW struct
3995 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3997 if (!em_list || !hw)
3998 return ICE_ERR_PARAM;
4000 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4001 hw->port_info->lport);
4005 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4006 * @hw: pointer to the hardware structure
4007 * @em_list: list of ethertype or ethertype MAC entries
4008 * @sw: pointer to switch info struct for which function add rule
4010 static enum ice_status
4011 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4012 struct ice_switch_info *sw)
4014 struct ice_fltr_list_entry *em_list_itr, *tmp;
4016 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4018 struct ice_sw_recipe *recp_list;
4019 enum ice_sw_lkup_type l_type;
4021 l_type = em_list_itr->fltr_info.lkup_type;
4023 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4024 l_type != ICE_SW_LKUP_ETHERTYPE)
4025 return ICE_ERR_PARAM;
4027 recp_list = &sw->recp_list[l_type];
4028 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4030 if (em_list_itr->status)
4031 return em_list_itr->status;
4037 * ice_remove_eth_mac - remove a ethertype based filter rule
4038 * @hw: pointer to the hardware structure
4039 * @em_list: list of ethertype and forwarding information
4043 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4045 if (!em_list || !hw)
4046 return ICE_ERR_PARAM;
4048 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4052 * ice_rem_sw_rule_info
4053 * @hw: pointer to the hardware structure
4054 * @rule_head: pointer to the switch list structure that we want to delete
4057 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4059 if (!LIST_EMPTY(rule_head)) {
4060 struct ice_fltr_mgmt_list_entry *entry;
4061 struct ice_fltr_mgmt_list_entry *tmp;
4063 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4064 ice_fltr_mgmt_list_entry, list_entry) {
4065 LIST_DEL(&entry->list_entry);
4066 ice_free(hw, entry);
4072 * ice_rem_adv_rule_info
4073 * @hw: pointer to the hardware structure
4074 * @rule_head: pointer to the switch list structure that we want to delete
4077 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4079 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4080 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4082 if (LIST_EMPTY(rule_head))
4085 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4086 ice_adv_fltr_mgmt_list_entry, list_entry) {
4087 LIST_DEL(&lst_itr->list_entry);
4088 ice_free(hw, lst_itr->lkups);
4089 ice_free(hw, lst_itr);
4094 * ice_rem_all_sw_rules_info
4095 * @hw: pointer to the hardware structure
4097 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4099 struct ice_switch_info *sw = hw->switch_info;
4102 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4103 struct LIST_HEAD_TYPE *rule_head;
4105 rule_head = &sw->recp_list[i].filt_rules;
4106 if (!sw->recp_list[i].adv_rule)
4107 ice_rem_sw_rule_info(hw, rule_head);
4109 ice_rem_adv_rule_info(hw, rule_head);
4110 if (sw->recp_list[i].adv_rule &&
4111 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4112 sw->recp_list[i].adv_rule = false;
4117 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4118 * @pi: pointer to the port_info structure
4119 * @vsi_handle: VSI handle to set as default
4120 * @set: true to add the above mentioned switch rule, false to remove it
4121 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4123 * add filter rule to set/unset given VSI as default VSI for the switch
4124 * (represented by swid)
4127 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4130 struct ice_aqc_sw_rules_elem *s_rule;
4131 struct ice_fltr_info f_info;
4132 struct ice_hw *hw = pi->hw;
4133 enum ice_adminq_opc opcode;
4134 enum ice_status status;
4138 if (!ice_is_vsi_valid(hw, vsi_handle))
4139 return ICE_ERR_PARAM;
4140 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4142 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4143 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4144 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4146 return ICE_ERR_NO_MEMORY;
4148 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4150 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4151 f_info.flag = direction;
4152 f_info.fltr_act = ICE_FWD_TO_VSI;
4153 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4155 if (f_info.flag & ICE_FLTR_RX) {
4156 f_info.src = pi->lport;
4157 f_info.src_id = ICE_SRC_ID_LPORT;
4159 f_info.fltr_rule_id =
4160 pi->dflt_rx_vsi_rule_id;
4161 } else if (f_info.flag & ICE_FLTR_TX) {
4162 f_info.src_id = ICE_SRC_ID_VSI;
4163 f_info.src = hw_vsi_id;
4165 f_info.fltr_rule_id =
4166 pi->dflt_tx_vsi_rule_id;
4170 opcode = ice_aqc_opc_add_sw_rules;
4172 opcode = ice_aqc_opc_remove_sw_rules;
4174 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4176 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4177 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4180 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4182 if (f_info.flag & ICE_FLTR_TX) {
4183 pi->dflt_tx_vsi_num = hw_vsi_id;
4184 pi->dflt_tx_vsi_rule_id = index;
4185 } else if (f_info.flag & ICE_FLTR_RX) {
4186 pi->dflt_rx_vsi_num = hw_vsi_id;
4187 pi->dflt_rx_vsi_rule_id = index;
4190 if (f_info.flag & ICE_FLTR_TX) {
4191 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4192 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4193 } else if (f_info.flag & ICE_FLTR_RX) {
4194 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4195 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4200 ice_free(hw, s_rule);
4205 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4206 * @list_head: head of rule list
4207 * @f_info: rule information
4209 * Helper function to search for a unicast rule entry - this is to be used
4210 * to remove unicast MAC filter that is not shared with other VSIs on the
4213 * Returns pointer to entry storing the rule if found
4215 static struct ice_fltr_mgmt_list_entry *
4216 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4217 struct ice_fltr_info *f_info)
4219 struct ice_fltr_mgmt_list_entry *list_itr;
4221 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4223 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4224 sizeof(f_info->l_data)) &&
4225 f_info->fwd_id.hw_vsi_id ==
4226 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4227 f_info->flag == list_itr->fltr_info.flag)
4234 * ice_remove_mac_rule - remove a MAC based filter rule
4235 * @hw: pointer to the hardware structure
4236 * @m_list: list of MAC addresses and forwarding information
4237 * @recp_list: list from which function remove MAC address
4239 * This function removes either a MAC filter rule or a specific VSI from a
4240 * VSI list for a multicast MAC address.
4242 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4243 * ice_add_mac. Caller should be aware that this call will only work if all
4244 * the entries passed into m_list were added previously. It will not attempt to
4245 * do a partial remove of entries that were found.
4247 static enum ice_status
4248 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4249 struct ice_sw_recipe *recp_list)
4251 struct ice_fltr_list_entry *list_itr, *tmp;
4252 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4255 return ICE_ERR_PARAM;
4257 rule_lock = &recp_list->filt_rule_lock;
4258 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4260 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4261 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4264 if (l_type != ICE_SW_LKUP_MAC)
4265 return ICE_ERR_PARAM;
4267 vsi_handle = list_itr->fltr_info.vsi_handle;
4268 if (!ice_is_vsi_valid(hw, vsi_handle))
4269 return ICE_ERR_PARAM;
4271 list_itr->fltr_info.fwd_id.hw_vsi_id =
4272 ice_get_hw_vsi_num(hw, vsi_handle);
4273 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4274 /* Don't remove the unicast address that belongs to
4275 * another VSI on the switch, since it is not being
4278 ice_acquire_lock(rule_lock);
4279 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4280 &list_itr->fltr_info)) {
4281 ice_release_lock(rule_lock);
4282 return ICE_ERR_DOES_NOT_EXIST;
4284 ice_release_lock(rule_lock);
4286 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4288 if (list_itr->status)
4289 return list_itr->status;
4295 * ice_remove_mac - remove a MAC address based filter rule
4296 * @hw: pointer to the hardware structure
4297 * @m_list: list of MAC addresses and forwarding information
4300 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4302 struct ice_sw_recipe *recp_list;
4304 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4305 return ice_remove_mac_rule(hw, m_list, recp_list);
4309 * ice_remove_vlan_rule - Remove VLAN based filter rule
4310 * @hw: pointer to the hardware structure
4311 * @v_list: list of VLAN entries and forwarding information
4312 * @recp_list: list from which function remove VLAN
4314 static enum ice_status
4315 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4316 struct ice_sw_recipe *recp_list)
4318 struct ice_fltr_list_entry *v_list_itr, *tmp;
4320 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4322 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4324 if (l_type != ICE_SW_LKUP_VLAN)
4325 return ICE_ERR_PARAM;
4326 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4328 if (v_list_itr->status)
4329 return v_list_itr->status;
4335 * ice_remove_vlan - remove a VLAN address based filter rule
4336 * @hw: pointer to the hardware structure
4337 * @v_list: list of VLAN and forwarding information
4341 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4343 struct ice_sw_recipe *recp_list;
4346 return ICE_ERR_PARAM;
4348 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4349 return ice_remove_vlan_rule(hw, v_list, recp_list);
4353 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4354 * @hw: pointer to the hardware structure
4355 * @v_list: list of MAC VLAN entries and forwarding information
4356 * @recp_list: list from which function remove MAC VLAN
4358 static enum ice_status
4359 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4360 struct ice_sw_recipe *recp_list)
4362 struct ice_fltr_list_entry *v_list_itr, *tmp;
4364 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4365 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4367 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4369 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4370 return ICE_ERR_PARAM;
4371 v_list_itr->status =
4372 ice_remove_rule_internal(hw, recp_list,
4374 if (v_list_itr->status)
4375 return v_list_itr->status;
4381 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4382 * @hw: pointer to the hardware structure
4383 * @mv_list: list of MAC VLAN and forwarding information
4386 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4388 struct ice_sw_recipe *recp_list;
4390 if (!mv_list || !hw)
4391 return ICE_ERR_PARAM;
4393 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4394 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4398 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4399 * @fm_entry: filter entry to inspect
4400 * @vsi_handle: VSI handle to compare with filter info
4403 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4405 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4406 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4407 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4408 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4413 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4414 * @hw: pointer to the hardware structure
4415 * @vsi_handle: VSI handle to remove filters from
4416 * @vsi_list_head: pointer to the list to add entry to
4417 * @fi: pointer to fltr_info of filter entry to copy & add
4419 * Helper function, used when creating a list of filters to remove from
4420 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4421 * original filter entry, with the exception of fltr_info.fltr_act and
4422 * fltr_info.fwd_id fields. These are set such that later logic can
4423 * extract which VSI to remove the fltr from, and pass on that information.
4425 static enum ice_status
4426 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4427 struct LIST_HEAD_TYPE *vsi_list_head,
4428 struct ice_fltr_info *fi)
4430 struct ice_fltr_list_entry *tmp;
4432 /* this memory is freed up in the caller function
4433 * once filters for this VSI are removed
4435 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4437 return ICE_ERR_NO_MEMORY;
4439 tmp->fltr_info = *fi;
4441 /* Overwrite these fields to indicate which VSI to remove filter from,
4442 * so find and remove logic can extract the information from the
4443 * list entries. Note that original entries will still have proper
4446 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4447 tmp->fltr_info.vsi_handle = vsi_handle;
4448 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4450 LIST_ADD(&tmp->list_entry, vsi_list_head);
4456 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4457 * @hw: pointer to the hardware structure
4458 * @vsi_handle: VSI handle to remove filters from
4459 * @lkup_list_head: pointer to the list that has certain lookup type filters
4460 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4462 * Locates all filters in lkup_list_head that are used by the given VSI,
4463 * and adds COPIES of those entries to vsi_list_head (intended to be used
4464 * to remove the listed filters).
4465 * Note that this means all entries in vsi_list_head must be explicitly
4466 * deallocated by the caller when done with list.
4468 static enum ice_status
4469 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4470 struct LIST_HEAD_TYPE *lkup_list_head,
4471 struct LIST_HEAD_TYPE *vsi_list_head)
4473 struct ice_fltr_mgmt_list_entry *fm_entry;
4474 enum ice_status status = ICE_SUCCESS;
4476 /* check to make sure VSI ID is valid and within boundary */
4477 if (!ice_is_vsi_valid(hw, vsi_handle))
4478 return ICE_ERR_PARAM;
4480 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4481 ice_fltr_mgmt_list_entry, list_entry) {
4482 struct ice_fltr_info *fi;
4484 fi = &fm_entry->fltr_info;
4485 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4488 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4497 * ice_determine_promisc_mask
4498 * @fi: filter info to parse
4500 * Helper function to determine which ICE_PROMISC_ mask corresponds
4501 * to given filter into.
4503 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4505 u16 vid = fi->l_data.mac_vlan.vlan_id;
4506 u8 *macaddr = fi->l_data.mac.mac_addr;
4507 bool is_tx_fltr = false;
4508 u8 promisc_mask = 0;
4510 if (fi->flag == ICE_FLTR_TX)
4513 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4514 promisc_mask |= is_tx_fltr ?
4515 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4516 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4517 promisc_mask |= is_tx_fltr ?
4518 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4519 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4520 promisc_mask |= is_tx_fltr ?
4521 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4523 promisc_mask |= is_tx_fltr ?
4524 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4526 return promisc_mask;
4530 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4531 * @hw: pointer to the hardware structure
4532 * @vsi_handle: VSI handle to retrieve info from
4533 * @promisc_mask: pointer to mask to be filled in
4534 * @vid: VLAN ID of promisc VLAN VSI
4535 * @sw: pointer to switch info struct for which function add rule
4537 static enum ice_status
4538 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4539 u16 *vid, struct ice_switch_info *sw)
4541 struct ice_fltr_mgmt_list_entry *itr;
4542 struct LIST_HEAD_TYPE *rule_head;
4543 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4545 if (!ice_is_vsi_valid(hw, vsi_handle))
4546 return ICE_ERR_PARAM;
4550 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4551 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4553 ice_acquire_lock(rule_lock);
4554 LIST_FOR_EACH_ENTRY(itr, rule_head,
4555 ice_fltr_mgmt_list_entry, list_entry) {
4556 /* Continue if this filter doesn't apply to this VSI or the
4557 * VSI ID is not in the VSI map for this filter
4559 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4562 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4564 ice_release_lock(rule_lock);
4570 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4571 * @hw: pointer to the hardware structure
4572 * @vsi_handle: VSI handle to retrieve info from
4573 * @promisc_mask: pointer to mask to be filled in
4574 * @vid: VLAN ID of promisc VLAN VSI
4577 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4580 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4581 vid, hw->switch_info);
4585 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4586 * @hw: pointer to the hardware structure
4587 * @vsi_handle: VSI handle to retrieve info from
4588 * @promisc_mask: pointer to mask to be filled in
4589 * @vid: VLAN ID of promisc VLAN VSI
4590 * @sw: pointer to switch info struct for which function add rule
4592 static enum ice_status
4593 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4594 u16 *vid, struct ice_switch_info *sw)
4596 struct ice_fltr_mgmt_list_entry *itr;
4597 struct LIST_HEAD_TYPE *rule_head;
4598 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4600 if (!ice_is_vsi_valid(hw, vsi_handle))
4601 return ICE_ERR_PARAM;
4605 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4606 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4608 ice_acquire_lock(rule_lock);
4609 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4611 /* Continue if this filter doesn't apply to this VSI or the
4612 * VSI ID is not in the VSI map for this filter
4614 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4617 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4619 ice_release_lock(rule_lock);
4625 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4626 * @hw: pointer to the hardware structure
4627 * @vsi_handle: VSI handle to retrieve info from
4628 * @promisc_mask: pointer to mask to be filled in
4629 * @vid: VLAN ID of promisc VLAN VSI
4632 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4635 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4636 vid, hw->switch_info);
4640 * ice_remove_promisc - Remove promisc based filter rules
4641 * @hw: pointer to the hardware structure
4642 * @recp_id: recipe ID for which the rule needs to removed
4643 * @v_list: list of promisc entries
4645 static enum ice_status
4646 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4647 struct LIST_HEAD_TYPE *v_list)
4649 struct ice_fltr_list_entry *v_list_itr, *tmp;
4650 struct ice_sw_recipe *recp_list;
4652 recp_list = &hw->switch_info->recp_list[recp_id];
4653 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4655 v_list_itr->status =
4656 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4657 if (v_list_itr->status)
4658 return v_list_itr->status;
4664 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4665 * @hw: pointer to the hardware structure
4666 * @vsi_handle: VSI handle to clear mode
4667 * @promisc_mask: mask of promiscuous config bits to clear
4668 * @vid: VLAN ID to clear VLAN promiscuous
4669 * @sw: pointer to switch info struct for which function add rule
4671 static enum ice_status
4672 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4673 u16 vid, struct ice_switch_info *sw)
4675 struct ice_fltr_list_entry *fm_entry, *tmp;
4676 struct LIST_HEAD_TYPE remove_list_head;
4677 struct ice_fltr_mgmt_list_entry *itr;
4678 struct LIST_HEAD_TYPE *rule_head;
4679 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4680 enum ice_status status = ICE_SUCCESS;
4683 if (!ice_is_vsi_valid(hw, vsi_handle))
4684 return ICE_ERR_PARAM;
4686 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4687 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4689 recipe_id = ICE_SW_LKUP_PROMISC;
4691 rule_head = &sw->recp_list[recipe_id].filt_rules;
4692 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4694 INIT_LIST_HEAD(&remove_list_head);
4696 ice_acquire_lock(rule_lock);
4697 LIST_FOR_EACH_ENTRY(itr, rule_head,
4698 ice_fltr_mgmt_list_entry, list_entry) {
4699 struct ice_fltr_info *fltr_info;
4700 u8 fltr_promisc_mask = 0;
4702 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4704 fltr_info = &itr->fltr_info;
4706 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4707 vid != fltr_info->l_data.mac_vlan.vlan_id)
4710 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4712 /* Skip if filter is not completely specified by given mask */
4713 if (fltr_promisc_mask & ~promisc_mask)
4716 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4720 ice_release_lock(rule_lock);
4721 goto free_fltr_list;
4724 ice_release_lock(rule_lock);
4726 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4729 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4730 ice_fltr_list_entry, list_entry) {
4731 LIST_DEL(&fm_entry->list_entry);
4732 ice_free(hw, fm_entry);
4739 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4740 * @hw: pointer to the hardware structure
4741 * @vsi_handle: VSI handle to clear mode
4742 * @promisc_mask: mask of promiscuous config bits to clear
4743 * @vid: VLAN ID to clear VLAN promiscuous
4746 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4747 u8 promisc_mask, u16 vid)
4749 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4750 vid, hw->switch_info);
4754 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4755 * @hw: pointer to the hardware structure
4756 * @vsi_handle: VSI handle to configure
4757 * @promisc_mask: mask of promiscuous config bits
4758 * @vid: VLAN ID to set VLAN promiscuous
4759 * @lport: logical port number to configure promisc mode
4760 * @sw: pointer to switch info struct for which function add rule
4762 static enum ice_status
4763 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4764 u16 vid, u8 lport, struct ice_switch_info *sw)
4766 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4767 struct ice_fltr_list_entry f_list_entry;
4768 struct ice_fltr_info new_fltr;
4769 enum ice_status status = ICE_SUCCESS;
4775 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4777 if (!ice_is_vsi_valid(hw, vsi_handle))
4778 return ICE_ERR_PARAM;
4779 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4781 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4783 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4784 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4785 new_fltr.l_data.mac_vlan.vlan_id = vid;
4786 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4788 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4789 recipe_id = ICE_SW_LKUP_PROMISC;
4792 /* Separate filters must be set for each direction/packet type
4793 * combination, so we will loop over the mask value, store the
4794 * individual type, and clear it out in the input mask as it
4797 while (promisc_mask) {
4798 struct ice_sw_recipe *recp_list;
4804 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4805 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4806 pkt_type = UCAST_FLTR;
4807 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4808 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4809 pkt_type = UCAST_FLTR;
4811 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4812 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4813 pkt_type = MCAST_FLTR;
4814 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4815 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4816 pkt_type = MCAST_FLTR;
4818 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4819 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4820 pkt_type = BCAST_FLTR;
4821 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4822 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4823 pkt_type = BCAST_FLTR;
4827 /* Check for VLAN promiscuous flag */
4828 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4829 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4830 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4831 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4835 /* Set filter DA based on packet type */
4836 mac_addr = new_fltr.l_data.mac.mac_addr;
4837 if (pkt_type == BCAST_FLTR) {
4838 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4839 } else if (pkt_type == MCAST_FLTR ||
4840 pkt_type == UCAST_FLTR) {
4841 /* Use the dummy ether header DA */
4842 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4843 ICE_NONDMA_TO_NONDMA);
4844 if (pkt_type == MCAST_FLTR)
4845 mac_addr[0] |= 0x1; /* Set multicast bit */
4848 /* Need to reset this to zero for all iterations */
4851 new_fltr.flag |= ICE_FLTR_TX;
4852 new_fltr.src = hw_vsi_id;
4854 new_fltr.flag |= ICE_FLTR_RX;
4855 new_fltr.src = lport;
4858 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4859 new_fltr.vsi_handle = vsi_handle;
4860 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4861 f_list_entry.fltr_info = new_fltr;
4862 recp_list = &sw->recp_list[recipe_id];
4864 status = ice_add_rule_internal(hw, recp_list, lport,
4866 if (status != ICE_SUCCESS)
4867 goto set_promisc_exit;
4875 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4876 * @hw: pointer to the hardware structure
4877 * @vsi_handle: VSI handle to configure
4878 * @promisc_mask: mask of promiscuous config bits
4879 * @vid: VLAN ID to set VLAN promiscuous
4882 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4885 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
4886 hw->port_info->lport,
4891 * _ice_set_vlan_vsi_promisc
4892 * @hw: pointer to the hardware structure
4893 * @vsi_handle: VSI handle to configure
4894 * @promisc_mask: mask of promiscuous config bits
4895 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4896 * @lport: logical port number to configure promisc mode
4897 * @sw: pointer to switch info struct for which function add rule
4899 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4901 static enum ice_status
4902 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4903 bool rm_vlan_promisc, u8 lport,
4904 struct ice_switch_info *sw)
4906 struct ice_fltr_list_entry *list_itr, *tmp;
4907 struct LIST_HEAD_TYPE vsi_list_head;
4908 struct LIST_HEAD_TYPE *vlan_head;
4909 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4910 enum ice_status status;
4913 INIT_LIST_HEAD(&vsi_list_head);
4914 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4915 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4916 ice_acquire_lock(vlan_lock);
4917 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4919 ice_release_lock(vlan_lock);
4921 goto free_fltr_list;
4923 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4925 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4926 if (rm_vlan_promisc)
4927 status = _ice_clear_vsi_promisc(hw, vsi_handle,
4931 status = _ice_set_vsi_promisc(hw, vsi_handle,
4932 promisc_mask, vlan_id,
4939 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4940 ice_fltr_list_entry, list_entry) {
4941 LIST_DEL(&list_itr->list_entry);
4942 ice_free(hw, list_itr);
4948 * ice_set_vlan_vsi_promisc
4949 * @hw: pointer to the hardware structure
4950 * @vsi_handle: VSI handle to configure
4951 * @promisc_mask: mask of promiscuous config bits
4952 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4954 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4957 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4958 bool rm_vlan_promisc)
4960 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
4961 rm_vlan_promisc, hw->port_info->lport,
4966 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4967 * @hw: pointer to the hardware structure
4968 * @vsi_handle: VSI handle to remove filters from
4969 * @recp_list: recipe list from which function remove fltr
4970 * @lkup: switch rule filter lookup type
4973 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4974 struct ice_sw_recipe *recp_list,
4975 enum ice_sw_lkup_type lkup)
4977 struct ice_fltr_list_entry *fm_entry;
4978 struct LIST_HEAD_TYPE remove_list_head;
4979 struct LIST_HEAD_TYPE *rule_head;
4980 struct ice_fltr_list_entry *tmp;
4981 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4982 enum ice_status status;
4984 INIT_LIST_HEAD(&remove_list_head);
4985 rule_lock = &recp_list[lkup].filt_rule_lock;
4986 rule_head = &recp_list[lkup].filt_rules;
4987 ice_acquire_lock(rule_lock);
4988 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4990 ice_release_lock(rule_lock);
4995 case ICE_SW_LKUP_MAC:
4996 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4998 case ICE_SW_LKUP_VLAN:
4999 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5001 case ICE_SW_LKUP_PROMISC:
5002 case ICE_SW_LKUP_PROMISC_VLAN:
5003 ice_remove_promisc(hw, lkup, &remove_list_head);
5005 case ICE_SW_LKUP_MAC_VLAN:
5006 ice_remove_mac_vlan(hw, &remove_list_head);
5008 case ICE_SW_LKUP_ETHERTYPE:
5009 case ICE_SW_LKUP_ETHERTYPE_MAC:
5010 ice_remove_eth_mac(hw, &remove_list_head);
5012 case ICE_SW_LKUP_DFLT:
5013 ice_debug(hw, ICE_DBG_SW,
5014 "Remove filters for this lookup type hasn't been implemented yet\n");
5016 case ICE_SW_LKUP_LAST:
5017 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5021 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5022 ice_fltr_list_entry, list_entry) {
5023 LIST_DEL(&fm_entry->list_entry);
5024 ice_free(hw, fm_entry);
5029 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5030 * @hw: pointer to the hardware structure
5031 * @vsi_handle: VSI handle to remove filters from
5032 * @sw: pointer to switch info struct
5035 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5036 struct ice_switch_info *sw)
5038 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5040 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5041 sw->recp_list, ICE_SW_LKUP_MAC);
5042 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5043 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5044 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5045 sw->recp_list, ICE_SW_LKUP_PROMISC);
5046 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5047 sw->recp_list, ICE_SW_LKUP_VLAN);
5048 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5049 sw->recp_list, ICE_SW_LKUP_DFLT);
5050 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5051 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5052 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5053 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5054 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5055 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5059 * ice_remove_vsi_fltr - Remove all filters for a VSI
5060 * @hw: pointer to the hardware structure
5061 * @vsi_handle: VSI handle to remove filters from
5063 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5065 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5069 * ice_alloc_res_cntr - allocating resource counter
5070 * @hw: pointer to the hardware structure
5071 * @type: type of resource
5072 * @alloc_shared: if set it is shared else dedicated
5073 * @num_items: number of entries requested for FD resource type
5074 * @counter_id: counter index returned by AQ call
5077 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5080 struct ice_aqc_alloc_free_res_elem *buf;
5081 enum ice_status status;
5084 /* Allocate resource */
5085 buf_len = sizeof(*buf);
5086 buf = (struct ice_aqc_alloc_free_res_elem *)
5087 ice_malloc(hw, buf_len);
5089 return ICE_ERR_NO_MEMORY;
5091 buf->num_elems = CPU_TO_LE16(num_items);
5092 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5093 ICE_AQC_RES_TYPE_M) | alloc_shared);
5095 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5096 ice_aqc_opc_alloc_res, NULL);
5100 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5108 * ice_free_res_cntr - free resource counter
5109 * @hw: pointer to the hardware structure
5110 * @type: type of resource
5111 * @alloc_shared: if set it is shared else dedicated
5112 * @num_items: number of entries to be freed for FD resource type
5113 * @counter_id: counter ID resource which needs to be freed
5116 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5119 struct ice_aqc_alloc_free_res_elem *buf;
5120 enum ice_status status;
5124 buf_len = sizeof(*buf);
5125 buf = (struct ice_aqc_alloc_free_res_elem *)
5126 ice_malloc(hw, buf_len);
5128 return ICE_ERR_NO_MEMORY;
5130 buf->num_elems = CPU_TO_LE16(num_items);
5131 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5132 ICE_AQC_RES_TYPE_M) | alloc_shared);
5133 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5135 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5136 ice_aqc_opc_free_res, NULL);
5138 ice_debug(hw, ICE_DBG_SW,
5139 "counter resource could not be freed\n");
5146 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5147 * @hw: pointer to the hardware structure
5148 * @counter_id: returns counter index
5150 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5152 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5153 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5158 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5159 * @hw: pointer to the hardware structure
5160 * @counter_id: counter index to be freed
5162 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5164 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5165 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5170 * ice_alloc_res_lg_act - add large action resource
5171 * @hw: pointer to the hardware structure
5172 * @l_id: large action ID to fill it in
5173 * @num_acts: number of actions to hold with a large action entry
5175 static enum ice_status
5176 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5178 struct ice_aqc_alloc_free_res_elem *sw_buf;
5179 enum ice_status status;
5182 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5183 return ICE_ERR_PARAM;
5185 /* Allocate resource for large action */
5186 buf_len = sizeof(*sw_buf);
5187 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5188 ice_malloc(hw, buf_len);
5190 return ICE_ERR_NO_MEMORY;
5192 sw_buf->num_elems = CPU_TO_LE16(1);
5194 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5195 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5196 * If num_acts is greater than 2, then use
5197 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5198 * The num_acts cannot exceed 4. This was ensured at the
5199 * beginning of the function.
5202 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5203 else if (num_acts == 2)
5204 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5206 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5208 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5209 ice_aqc_opc_alloc_res, NULL);
5211 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5213 ice_free(hw, sw_buf);
5218 * ice_add_mac_with_sw_marker - add filter with sw marker
5219 * @hw: pointer to the hardware structure
5220 * @f_info: filter info structure containing the MAC filter information
5221 * @sw_marker: sw marker to tag the Rx descriptor with
5224 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5227 struct ice_fltr_mgmt_list_entry *m_entry;
5228 struct ice_fltr_list_entry fl_info;
5229 struct ice_sw_recipe *recp_list;
5230 struct LIST_HEAD_TYPE l_head;
5231 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5232 enum ice_status ret;
5236 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5237 return ICE_ERR_PARAM;
5239 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5240 return ICE_ERR_PARAM;
5242 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5243 return ICE_ERR_PARAM;
5245 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5246 return ICE_ERR_PARAM;
5247 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5249 /* Add filter if it doesn't exist so then the adding of large
5250 * action always results in update
5253 INIT_LIST_HEAD(&l_head);
5254 fl_info.fltr_info = *f_info;
5255 LIST_ADD(&fl_info.list_entry, &l_head);
5257 entry_exists = false;
5258 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5259 hw->port_info->lport);
5260 if (ret == ICE_ERR_ALREADY_EXISTS)
5261 entry_exists = true;
5265 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5266 rule_lock = &recp_list->filt_rule_lock;
5267 ice_acquire_lock(rule_lock);
5268 /* Get the book keeping entry for the filter */
5269 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5273 /* If counter action was enabled for this rule then don't enable
5274 * sw marker large action
5276 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5277 ret = ICE_ERR_PARAM;
5281 /* if same marker was added before */
5282 if (m_entry->sw_marker_id == sw_marker) {
5283 ret = ICE_ERR_ALREADY_EXISTS;
5287 /* Allocate a hardware table entry to hold large act. Three actions
5288 * for marker based large action
5290 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5294 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5297 /* Update the switch rule to add the marker action */
5298 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5300 ice_release_lock(rule_lock);
5305 ice_release_lock(rule_lock);
5306 /* only remove entry if it did not exist previously */
5308 ret = ice_remove_mac(hw, &l_head);
5314 * ice_add_mac_with_counter - add filter with counter enabled
5315 * @hw: pointer to the hardware structure
5316 * @f_info: pointer to filter info structure containing the MAC filter
5320 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5322 struct ice_fltr_mgmt_list_entry *m_entry;
5323 struct ice_fltr_list_entry fl_info;
5324 struct ice_sw_recipe *recp_list;
5325 struct LIST_HEAD_TYPE l_head;
5326 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5327 enum ice_status ret;
5332 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5333 return ICE_ERR_PARAM;
5335 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5336 return ICE_ERR_PARAM;
5338 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5339 return ICE_ERR_PARAM;
5340 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5341 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5343 entry_exist = false;
5345 rule_lock = &recp_list->filt_rule_lock;
5347 /* Add filter if it doesn't exist so then the adding of large
5348 * action always results in update
5350 INIT_LIST_HEAD(&l_head);
5352 fl_info.fltr_info = *f_info;
5353 LIST_ADD(&fl_info.list_entry, &l_head);
5355 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5356 hw->port_info->lport);
5357 if (ret == ICE_ERR_ALREADY_EXISTS)
5362 ice_acquire_lock(rule_lock);
5363 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5365 ret = ICE_ERR_BAD_PTR;
5369 /* Don't enable counter for a filter for which sw marker was enabled */
5370 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5371 ret = ICE_ERR_PARAM;
5375 /* If a counter was already enabled then don't need to add again */
5376 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5377 ret = ICE_ERR_ALREADY_EXISTS;
5381 /* Allocate a hardware table entry to VLAN counter */
5382 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5386 /* Allocate a hardware table entry to hold large act. Two actions for
5387 * counter based large action
5389 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5393 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5396 /* Update the switch rule to add the counter action */
5397 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5399 ice_release_lock(rule_lock);
5404 ice_release_lock(rule_lock);
5405 /* only remove entry if it did not exist previously */
5407 ret = ice_remove_mac(hw, &l_head);
5412 /* This is mapping table entry that maps every word within a given protocol
5413 * structure to the real byte offset as per the specification of that
5415 * for example dst address is 3 words in ethertype header and corresponding
5416 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5417 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5418 * matching entry describing its field. This needs to be updated if new
5419 * structure is added to that union.
5421 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5422 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5423 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5424 { ICE_ETYPE_OL, { 0 } },
5425 { ICE_VLAN_OFOS, { 0, 2 } },
5426 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5427 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5428 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5429 26, 28, 30, 32, 34, 36, 38 } },
5430 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5431 26, 28, 30, 32, 34, 36, 38 } },
5432 { ICE_TCP_IL, { 0, 2 } },
5433 { ICE_UDP_OF, { 0, 2 } },
5434 { ICE_UDP_ILOS, { 0, 2 } },
5435 { ICE_SCTP_IL, { 0, 2 } },
5436 { ICE_VXLAN, { 8, 10, 12, 14 } },
5437 { ICE_GENEVE, { 8, 10, 12, 14 } },
5438 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5439 { ICE_NVGRE, { 0, 2, 4, 6 } },
5440 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5441 { ICE_PPPOE, { 0, 2, 4, 6 } },
5442 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5443 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5444 { ICE_ESP, { 0, 2, 4, 6 } },
5445 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5446 { ICE_NAT_T, { 8, 10, 12, 14 } },
5449 /* The following table describes preferred grouping of recipes.
5450 * If a recipe that needs to be programmed is a superset or matches one of the
5451 * following combinations, then the recipe needs to be chained as per the
5455 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5456 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5457 { ICE_MAC_IL, ICE_MAC_IL_HW },
5458 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5459 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5460 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5461 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5462 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5463 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5464 { ICE_TCP_IL, ICE_TCP_IL_HW },
5465 { ICE_UDP_OF, ICE_UDP_OF_HW },
5466 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5467 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5468 { ICE_VXLAN, ICE_UDP_OF_HW },
5469 { ICE_GENEVE, ICE_UDP_OF_HW },
5470 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5471 { ICE_NVGRE, ICE_GRE_OF_HW },
5472 { ICE_GTP, ICE_UDP_OF_HW },
5473 { ICE_PPPOE, ICE_PPPOE_HW },
5474 { ICE_PFCP, ICE_UDP_ILOS_HW },
5475 { ICE_L2TPV3, ICE_L2TPV3_HW },
5476 { ICE_ESP, ICE_ESP_HW },
5477 { ICE_AH, ICE_AH_HW },
5478 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5482 * ice_find_recp - find a recipe
5483 * @hw: pointer to the hardware structure
5484 * @lkup_exts: extension sequence to match
5486 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5488 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5489 enum ice_sw_tunnel_type tun_type)
5491 bool refresh_required = true;
5492 struct ice_sw_recipe *recp;
5495 /* Walk through existing recipes to find a match */
5496 recp = hw->switch_info->recp_list;
5497 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5498 /* If recipe was not created for this ID, in SW bookkeeping,
5499 * check if FW has an entry for this recipe. If the FW has an
5500 * entry update it in our SW bookkeeping and continue with the
5503 if (!recp[i].recp_created)
5504 if (ice_get_recp_frm_fw(hw,
5505 hw->switch_info->recp_list, i,
5509 /* Skip inverse action recipes */
5510 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5511 ICE_AQ_RECIPE_ACT_INV_ACT)
5514 /* if number of words we are looking for match */
5515 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5516 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5517 struct ice_fv_word *be = lkup_exts->fv_words;
5518 u16 *cr = recp[i].lkup_exts.field_mask;
5519 u16 *de = lkup_exts->field_mask;
5523 /* ar, cr, and qr are related to the recipe words, while
5524 * be, de, and pe are related to the lookup words
5526 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5527 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5529 if (ar[qr].off == be[pe].off &&
5530 ar[qr].prot_id == be[pe].prot_id &&
5532 /* Found the "pe"th word in the
5537 /* After walking through all the words in the
5538 * "i"th recipe if "p"th word was not found then
5539 * this recipe is not what we are looking for.
5540 * So break out from this loop and try the next
5543 if (qr >= recp[i].lkup_exts.n_val_words) {
5548 /* If for "i"th recipe the found was never set to false
5549 * then it means we found our match
5551 if ((tun_type == recp[i].tun_type ||
5552 tun_type == ICE_SW_TUN_AND_NON_TUN) && found)
5553 return i; /* Return the recipe ID */
5556 return ICE_MAX_NUM_RECIPES;
5560 * ice_prot_type_to_id - get protocol ID from protocol type
5561 * @type: protocol type
5562 * @id: pointer to variable that will receive the ID
5564 * Returns true if found, false otherwise
5566 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5570 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5571 if (ice_prot_id_tbl[i].type == type) {
5572 *id = ice_prot_id_tbl[i].protocol_id;
5579 * ice_find_valid_words - count valid words
5580 * @rule: advanced rule with lookup information
5581 * @lkup_exts: byte offset extractions of the words that are valid
5583 * calculate valid words in a lookup rule using mask value
5586 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5587 struct ice_prot_lkup_ext *lkup_exts)
5589 u8 j, word, prot_id, ret_val;
5591 if (!ice_prot_type_to_id(rule->type, &prot_id))
5594 word = lkup_exts->n_val_words;
5596 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5597 if (((u16 *)&rule->m_u)[j] &&
5598 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5599 /* No more space to accommodate */
5600 if (word >= ICE_MAX_CHAIN_WORDS)
5602 lkup_exts->fv_words[word].off =
5603 ice_prot_ext[rule->type].offs[j];
5604 lkup_exts->fv_words[word].prot_id =
5605 ice_prot_id_tbl[rule->type].protocol_id;
5606 lkup_exts->field_mask[word] =
5607 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5611 ret_val = word - lkup_exts->n_val_words;
5612 lkup_exts->n_val_words = word;
5618 * ice_create_first_fit_recp_def - Create a recipe grouping
5619 * @hw: pointer to the hardware structure
5620 * @lkup_exts: an array of protocol header extractions
5621 * @rg_list: pointer to a list that stores new recipe groups
5622 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5624 * Using first fit algorithm, take all the words that are still not done
5625 * and start grouping them in 4-word groups. Each group makes up one
5628 static enum ice_status
5629 ice_create_first_fit_recp_def(struct ice_hw *hw,
5630 struct ice_prot_lkup_ext *lkup_exts,
5631 struct LIST_HEAD_TYPE *rg_list,
5634 struct ice_pref_recipe_group *grp = NULL;
5639 if (!lkup_exts->n_val_words) {
5640 struct ice_recp_grp_entry *entry;
5642 entry = (struct ice_recp_grp_entry *)
5643 ice_malloc(hw, sizeof(*entry));
5645 return ICE_ERR_NO_MEMORY;
5646 LIST_ADD(&entry->l_entry, rg_list);
5647 grp = &entry->r_group;
5649 grp->n_val_pairs = 0;
5652 /* Walk through every word in the rule to check if it is not done. If so
5653 * then this word needs to be part of a new recipe.
5655 for (j = 0; j < lkup_exts->n_val_words; j++)
5656 if (!ice_is_bit_set(lkup_exts->done, j)) {
5658 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5659 struct ice_recp_grp_entry *entry;
5661 entry = (struct ice_recp_grp_entry *)
5662 ice_malloc(hw, sizeof(*entry));
5664 return ICE_ERR_NO_MEMORY;
5665 LIST_ADD(&entry->l_entry, rg_list);
5666 grp = &entry->r_group;
5670 grp->pairs[grp->n_val_pairs].prot_id =
5671 lkup_exts->fv_words[j].prot_id;
5672 grp->pairs[grp->n_val_pairs].off =
5673 lkup_exts->fv_words[j].off;
5674 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5682 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5683 * @hw: pointer to the hardware structure
5684 * @fv_list: field vector with the extraction sequence information
5685 * @rg_list: recipe groupings with protocol-offset pairs
5687 * Helper function to fill in the field vector indices for protocol-offset
5688 * pairs. These indexes are then ultimately programmed into a recipe.
5690 static enum ice_status
5691 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5692 struct LIST_HEAD_TYPE *rg_list)
5694 struct ice_sw_fv_list_entry *fv;
5695 struct ice_recp_grp_entry *rg;
5696 struct ice_fv_word *fv_ext;
5698 if (LIST_EMPTY(fv_list))
5701 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5702 fv_ext = fv->fv_ptr->ew;
5704 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5707 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5708 struct ice_fv_word *pr;
5713 pr = &rg->r_group.pairs[i];
5714 mask = rg->r_group.mask[i];
5716 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5717 if (fv_ext[j].prot_id == pr->prot_id &&
5718 fv_ext[j].off == pr->off) {
5721 /* Store index of field vector */
5723 rg->fv_mask[i] = mask;
5727 /* Protocol/offset could not be found, caller gave an
5731 return ICE_ERR_PARAM;
5739 * ice_find_free_recp_res_idx - find free result indexes for recipe
5740 * @hw: pointer to hardware structure
5741 * @profiles: bitmap of profiles that will be associated with the new recipe
5742 * @free_idx: pointer to variable to receive the free index bitmap
5744 * The algorithm used here is:
5745 * 1. When creating a new recipe, create a set P which contains all
5746 * Profiles that will be associated with our new recipe
5748 * 2. For each Profile p in set P:
5749 * a. Add all recipes associated with Profile p into set R
5750 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5751 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5752 * i. Or just assume they all have the same possible indexes:
5754 * i.e., PossibleIndexes = 0x0000F00000000000
5756 * 3. For each Recipe r in set R:
5757 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5758 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5760 * FreeIndexes will contain the bits indicating the indexes free for use,
5761 * then the code needs to update the recipe[r].used_result_idx_bits to
5762 * indicate which indexes were selected for use by this recipe.
5765 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5766 ice_bitmap_t *free_idx)
5768 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5769 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5770 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5774 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5775 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5776 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5777 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5779 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5780 ice_set_bit(count, possible_idx);
5782 /* For each profile we are going to associate the recipe with, add the
5783 * recipes that are associated with that profile. This will give us
5784 * the set of recipes that our recipe may collide with. Also, determine
5785 * what possible result indexes are usable given this set of profiles.
5788 while (ICE_MAX_NUM_PROFILES >
5789 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5790 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5791 ICE_MAX_NUM_RECIPES);
5792 ice_and_bitmap(possible_idx, possible_idx,
5793 hw->switch_info->prof_res_bm[bit],
5798 /* For each recipe that our new recipe may collide with, determine
5799 * which indexes have been used.
5801 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5802 if (ice_is_bit_set(recipes, bit)) {
5803 ice_or_bitmap(used_idx, used_idx,
5804 hw->switch_info->recp_list[bit].res_idxs,
5808 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5810 /* return number of free indexes */
5813 while (ICE_MAX_FV_WORDS >
5814 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5823 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5824 * @hw: pointer to hardware structure
5825 * @rm: recipe management list entry
5826 * @match_tun_mask: tunnel mask that needs to be programmed
5827 * @profiles: bitmap of profiles that will be associated.
5829 static enum ice_status
5830 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5831 u16 match_tun_mask, ice_bitmap_t *profiles)
5833 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5834 struct ice_aqc_recipe_data_elem *tmp;
5835 struct ice_aqc_recipe_data_elem *buf;
5836 struct ice_recp_grp_entry *entry;
5837 enum ice_status status;
5843 /* When more than one recipe are required, another recipe is needed to
5844 * chain them together. Matching a tunnel metadata ID takes up one of
5845 * the match fields in the chaining recipe reducing the number of
5846 * chained recipes by one.
5848 /* check number of free result indices */
5849 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5850 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5852 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5853 free_res_idx, rm->n_grp_count);
5855 if (rm->n_grp_count > 1) {
5856 if (rm->n_grp_count > free_res_idx)
5857 return ICE_ERR_MAX_LIMIT;
5862 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5863 return ICE_ERR_MAX_LIMIT;
5865 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5866 ICE_MAX_NUM_RECIPES,
5869 return ICE_ERR_NO_MEMORY;
5871 buf = (struct ice_aqc_recipe_data_elem *)
5872 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5874 status = ICE_ERR_NO_MEMORY;
5878 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5879 recipe_count = ICE_MAX_NUM_RECIPES;
5880 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5882 if (status || recipe_count == 0)
5885 /* Allocate the recipe resources, and configure them according to the
5886 * match fields from protocol headers and extracted field vectors.
5888 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5889 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5892 status = ice_alloc_recipe(hw, &entry->rid);
5896 /* Clear the result index of the located recipe, as this will be
5897 * updated, if needed, later in the recipe creation process.
5899 tmp[0].content.result_indx = 0;
5901 buf[recps] = tmp[0];
5902 buf[recps].recipe_indx = (u8)entry->rid;
5903 /* if the recipe is a non-root recipe RID should be programmed
5904 * as 0 for the rules to be applied correctly.
5906 buf[recps].content.rid = 0;
5907 ice_memset(&buf[recps].content.lkup_indx, 0,
5908 sizeof(buf[recps].content.lkup_indx),
5911 /* All recipes use look-up index 0 to match switch ID. */
5912 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5913 buf[recps].content.mask[0] =
5914 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5915 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5918 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5919 buf[recps].content.lkup_indx[i] = 0x80;
5920 buf[recps].content.mask[i] = 0;
5923 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5924 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5925 buf[recps].content.mask[i + 1] =
5926 CPU_TO_LE16(entry->fv_mask[i]);
5929 if (rm->n_grp_count > 1) {
5930 /* Checks to see if there really is a valid result index
5933 if (chain_idx >= ICE_MAX_FV_WORDS) {
5934 ice_debug(hw, ICE_DBG_SW,
5935 "No chain index available\n");
5936 status = ICE_ERR_MAX_LIMIT;
5940 entry->chain_idx = chain_idx;
5941 buf[recps].content.result_indx =
5942 ICE_AQ_RECIPE_RESULT_EN |
5943 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5944 ICE_AQ_RECIPE_RESULT_DATA_M);
5945 ice_clear_bit(chain_idx, result_idx_bm);
5946 chain_idx = ice_find_first_bit(result_idx_bm,
5950 /* fill recipe dependencies */
5951 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5952 ICE_MAX_NUM_RECIPES);
5953 ice_set_bit(buf[recps].recipe_indx,
5954 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5955 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5959 if (rm->n_grp_count == 1) {
5960 rm->root_rid = buf[0].recipe_indx;
5961 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5962 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5963 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5964 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5965 sizeof(buf[0].recipe_bitmap),
5966 ICE_NONDMA_TO_NONDMA);
5968 status = ICE_ERR_BAD_PTR;
5971 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5972 * the recipe which is getting created if specified
5973 * by user. Usually any advanced switch filter, which results
5974 * into new extraction sequence, ended up creating a new recipe
5975 * of type ROOT and usually recipes are associated with profiles
5976 * Switch rule referreing newly created recipe, needs to have
5977 * either/or 'fwd' or 'join' priority, otherwise switch rule
5978 * evaluation will not happen correctly. In other words, if
5979 * switch rule to be evaluated on priority basis, then recipe
5980 * needs to have priority, otherwise it will be evaluated last.
5982 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5984 struct ice_recp_grp_entry *last_chain_entry;
5987 /* Allocate the last recipe that will chain the outcomes of the
5988 * other recipes together
5990 status = ice_alloc_recipe(hw, &rid);
5994 buf[recps].recipe_indx = (u8)rid;
5995 buf[recps].content.rid = (u8)rid;
5996 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5997 /* the new entry created should also be part of rg_list to
5998 * make sure we have complete recipe
6000 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6001 sizeof(*last_chain_entry));
6002 if (!last_chain_entry) {
6003 status = ICE_ERR_NO_MEMORY;
6006 last_chain_entry->rid = rid;
6007 ice_memset(&buf[recps].content.lkup_indx, 0,
6008 sizeof(buf[recps].content.lkup_indx),
6010 /* All recipes use look-up index 0 to match switch ID. */
6011 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6012 buf[recps].content.mask[0] =
6013 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6014 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6015 buf[recps].content.lkup_indx[i] =
6016 ICE_AQ_RECIPE_LKUP_IGNORE;
6017 buf[recps].content.mask[i] = 0;
6021 /* update r_bitmap with the recp that is used for chaining */
6022 ice_set_bit(rid, rm->r_bitmap);
6023 /* this is the recipe that chains all the other recipes so it
6024 * should not have a chaining ID to indicate the same
6026 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6027 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6029 last_chain_entry->fv_idx[i] = entry->chain_idx;
6030 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6031 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6032 ice_set_bit(entry->rid, rm->r_bitmap);
6034 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6035 if (sizeof(buf[recps].recipe_bitmap) >=
6036 sizeof(rm->r_bitmap)) {
6037 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6038 sizeof(buf[recps].recipe_bitmap),
6039 ICE_NONDMA_TO_NONDMA);
6041 status = ICE_ERR_BAD_PTR;
6044 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6046 /* To differentiate among different UDP tunnels, a meta data ID
6049 if (match_tun_mask) {
6050 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
6051 buf[recps].content.mask[i] =
6052 CPU_TO_LE16(match_tun_mask);
6056 rm->root_rid = (u8)rid;
6058 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6062 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6063 ice_release_change_lock(hw);
6067 /* Every recipe that just got created add it to the recipe
6070 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6071 struct ice_switch_info *sw = hw->switch_info;
6072 bool is_root, idx_found = false;
6073 struct ice_sw_recipe *recp;
6074 u16 idx, buf_idx = 0;
6076 /* find buffer index for copying some data */
6077 for (idx = 0; idx < rm->n_grp_count; idx++)
6078 if (buf[idx].recipe_indx == entry->rid) {
6084 status = ICE_ERR_OUT_OF_RANGE;
6088 recp = &sw->recp_list[entry->rid];
6089 is_root = (rm->root_rid == entry->rid);
6090 recp->is_root = is_root;
6092 recp->root_rid = entry->rid;
6093 recp->big_recp = (is_root && rm->n_grp_count > 1);
6095 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6096 entry->r_group.n_val_pairs *
6097 sizeof(struct ice_fv_word),
6098 ICE_NONDMA_TO_NONDMA);
6100 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6101 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6103 /* Copy non-result fv index values and masks to recipe. This
6104 * call will also update the result recipe bitmask.
6106 ice_collect_result_idx(&buf[buf_idx], recp);
6108 /* for non-root recipes, also copy to the root, this allows
6109 * easier matching of a complete chained recipe
6112 ice_collect_result_idx(&buf[buf_idx],
6113 &sw->recp_list[rm->root_rid]);
6115 recp->n_ext_words = entry->r_group.n_val_pairs;
6116 recp->chain_idx = entry->chain_idx;
6117 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6118 recp->n_grp_count = rm->n_grp_count;
6119 recp->tun_type = rm->tun_type;
6120 recp->recp_created = true;
6134 * ice_create_recipe_group - creates recipe group
6135 * @hw: pointer to hardware structure
6136 * @rm: recipe management list entry
6137 * @lkup_exts: lookup elements
6139 static enum ice_status
6140 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6141 struct ice_prot_lkup_ext *lkup_exts)
6143 enum ice_status status;
6146 rm->n_grp_count = 0;
6148 /* Create recipes for words that are marked not done by packing them
6151 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6152 &rm->rg_list, &recp_count);
6154 rm->n_grp_count += recp_count;
6155 rm->n_ext_words = lkup_exts->n_val_words;
6156 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6157 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6158 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6159 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6166 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6167 * @hw: pointer to hardware structure
6168 * @lkups: lookup elements or match criteria for the advanced recipe, one
6169 * structure per protocol header
6170 * @lkups_cnt: number of protocols
6171 * @bm: bitmap of field vectors to consider
6172 * @fv_list: pointer to a list that holds the returned field vectors
6174 static enum ice_status
6175 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6176 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6178 enum ice_status status;
6185 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6187 return ICE_ERR_NO_MEMORY;
6189 for (i = 0; i < lkups_cnt; i++)
6190 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6191 status = ICE_ERR_CFG;
6195 /* Find field vectors that include all specified protocol types */
6196 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6199 ice_free(hw, prot_ids);
6204 * ice_tun_type_match_mask - determine if tun type needs a match mask
6205 * @tun_type: tunnel type
6206 * @mask: mask to be used for the tunnel
6208 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6211 case ICE_SW_TUN_VXLAN_GPE:
6212 case ICE_SW_TUN_GENEVE:
6213 case ICE_SW_TUN_VXLAN:
6214 case ICE_SW_TUN_NVGRE:
6215 case ICE_SW_TUN_UDP:
6216 case ICE_ALL_TUNNELS:
6217 *mask = ICE_TUN_FLAG_MASK;
6220 case ICE_SW_TUN_GENEVE_VLAN:
6221 case ICE_SW_TUN_VXLAN_VLAN:
6222 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6232 * ice_add_special_words - Add words that are not protocols, such as metadata
6233 * @rinfo: other information regarding the rule e.g. priority and action info
6234 * @lkup_exts: lookup word structure
6236 static enum ice_status
6237 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6238 struct ice_prot_lkup_ext *lkup_exts)
6242 /* If this is a tunneled packet, then add recipe index to match the
6243 * tunnel bit in the packet metadata flags.
6245 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6246 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6247 u8 word = lkup_exts->n_val_words++;
6249 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6250 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6251 lkup_exts->field_mask[word] = mask;
6253 return ICE_ERR_MAX_LIMIT;
6260 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6261 * @hw: pointer to hardware structure
6262 * @rinfo: other information regarding the rule e.g. priority and action info
6263 * @bm: pointer to memory for returning the bitmap of field vectors
6266 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6269 enum ice_prof_type prof_type;
6271 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6273 switch (rinfo->tun_type) {
6275 prof_type = ICE_PROF_NON_TUN;
6277 case ICE_ALL_TUNNELS:
6278 prof_type = ICE_PROF_TUN_ALL;
6280 case ICE_SW_TUN_VXLAN_GPE:
6281 case ICE_SW_TUN_GENEVE:
6282 case ICE_SW_TUN_GENEVE_VLAN:
6283 case ICE_SW_TUN_VXLAN:
6284 case ICE_SW_TUN_VXLAN_VLAN:
6285 case ICE_SW_TUN_UDP:
6286 case ICE_SW_TUN_GTP:
6287 prof_type = ICE_PROF_TUN_UDP;
6289 case ICE_SW_TUN_NVGRE:
6290 prof_type = ICE_PROF_TUN_GRE;
6292 case ICE_SW_TUN_PPPOE:
6293 prof_type = ICE_PROF_TUN_PPPOE;
6295 case ICE_SW_TUN_PPPOE_PAY:
6296 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6298 case ICE_SW_TUN_PPPOE_IPV4:
6299 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6300 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6301 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6303 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6304 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6306 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6307 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6309 case ICE_SW_TUN_PPPOE_IPV6:
6310 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6311 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6312 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6314 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6315 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6317 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6318 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6320 case ICE_SW_TUN_PROFID_IPV6_ESP:
6321 case ICE_SW_TUN_IPV6_ESP:
6322 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6324 case ICE_SW_TUN_PROFID_IPV6_AH:
6325 case ICE_SW_TUN_IPV6_AH:
6326 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6328 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6329 case ICE_SW_TUN_IPV6_L2TPV3:
6330 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6332 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6333 case ICE_SW_TUN_IPV6_NAT_T:
6334 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6336 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6337 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6339 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6340 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6342 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6343 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6345 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6346 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6348 case ICE_SW_TUN_IPV4_NAT_T:
6349 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6351 case ICE_SW_TUN_IPV4_L2TPV3:
6352 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6354 case ICE_SW_TUN_IPV4_ESP:
6355 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6357 case ICE_SW_TUN_IPV4_AH:
6358 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6360 case ICE_SW_IPV4_TCP:
6361 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6363 case ICE_SW_IPV4_UDP:
6364 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6366 case ICE_SW_IPV6_TCP:
6367 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6369 case ICE_SW_IPV6_UDP:
6370 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6372 case ICE_SW_TUN_AND_NON_TUN:
6374 prof_type = ICE_PROF_ALL;
6378 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6382 * ice_is_prof_rule - determine if rule type is a profile rule
6383 * @type: the rule type
6385 * if the rule type is a profile rule, that means that there no field value
6386 * match required, in this case just a profile hit is required.
6388 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6391 case ICE_SW_TUN_PROFID_IPV6_ESP:
6392 case ICE_SW_TUN_PROFID_IPV6_AH:
6393 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6394 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6395 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6396 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6397 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6398 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6408 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6409 * @hw: pointer to hardware structure
6410 * @lkups: lookup elements or match criteria for the advanced recipe, one
6411 * structure per protocol header
6412 * @lkups_cnt: number of protocols
6413 * @rinfo: other information regarding the rule e.g. priority and action info
6414 * @rid: return the recipe ID of the recipe created
6416 static enum ice_status
6417 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6418 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6420 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6421 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6422 struct ice_prot_lkup_ext *lkup_exts;
6423 struct ice_recp_grp_entry *r_entry;
6424 struct ice_sw_fv_list_entry *fvit;
6425 struct ice_recp_grp_entry *r_tmp;
6426 struct ice_sw_fv_list_entry *tmp;
6427 enum ice_status status = ICE_SUCCESS;
6428 struct ice_sw_recipe *rm;
6429 u16 match_tun_mask = 0;
6433 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6434 return ICE_ERR_PARAM;
6436 lkup_exts = (struct ice_prot_lkup_ext *)
6437 ice_malloc(hw, sizeof(*lkup_exts));
6439 return ICE_ERR_NO_MEMORY;
6441 /* Determine the number of words to be matched and if it exceeds a
6442 * recipe's restrictions
6444 for (i = 0; i < lkups_cnt; i++) {
6447 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6448 status = ICE_ERR_CFG;
6449 goto err_free_lkup_exts;
6452 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6454 status = ICE_ERR_CFG;
6455 goto err_free_lkup_exts;
6459 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6461 status = ICE_ERR_NO_MEMORY;
6462 goto err_free_lkup_exts;
6465 /* Get field vectors that contain fields extracted from all the protocol
6466 * headers being programmed.
6468 INIT_LIST_HEAD(&rm->fv_list);
6469 INIT_LIST_HEAD(&rm->rg_list);
6471 /* Get bitmap of field vectors (profiles) that are compatible with the
6472 * rule request; only these will be searched in the subsequent call to
6475 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6477 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6481 /* Group match words into recipes using preferred recipe grouping
6484 status = ice_create_recipe_group(hw, rm, lkup_exts);
6488 /* For certain tunnel types it is necessary to use a metadata ID flag to
6489 * differentiate different tunnel types. A separate recipe needs to be
6490 * used for the metadata.
6492 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6493 rm->n_grp_count > 1)
6494 match_tun_mask = mask;
6496 /* set the recipe priority if specified */
6497 rm->priority = (u8)rinfo->priority;
6499 /* Find offsets from the field vector. Pick the first one for all the
6502 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6506 /* An empty FV list means to use all the profiles returned in the
6509 if (LIST_EMPTY(&rm->fv_list)) {
6512 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6513 if (ice_is_bit_set(fv_bitmap, j)) {
6514 struct ice_sw_fv_list_entry *fvl;
6516 fvl = (struct ice_sw_fv_list_entry *)
6517 ice_malloc(hw, sizeof(*fvl));
6521 fvl->profile_id = j;
6522 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6526 /* get bitmap of all profiles the recipe will be associated with */
6527 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6528 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6530 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6531 ice_set_bit((u16)fvit->profile_id, profiles);
6534 /* Create any special protocol/offset pairs, such as looking at tunnel
6535 * bits by extracting metadata
6537 status = ice_add_special_words(rinfo, lkup_exts);
6539 goto err_free_lkup_exts;
6541 /* Look for a recipe which matches our requested fv / mask list */
6542 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6543 if (*rid < ICE_MAX_NUM_RECIPES)
6544 /* Success if found a recipe that match the existing criteria */
6547 rm->tun_type = rinfo->tun_type;
6548 /* Recipe we need does not exist, add a recipe */
6549 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
6553 /* Associate all the recipes created with all the profiles in the
6554 * common field vector.
6556 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6558 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6561 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6562 (u8 *)r_bitmap, NULL);
6566 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6567 ICE_MAX_NUM_RECIPES);
6568 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6572 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6575 ice_release_change_lock(hw);
6580 /* Update profile to recipe bitmap array */
6581 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6582 ICE_MAX_NUM_RECIPES);
6584 /* Update recipe to profile bitmap array */
6585 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6586 if (ice_is_bit_set(r_bitmap, j))
6587 ice_set_bit((u16)fvit->profile_id,
6588 recipe_to_profile[j]);
6591 *rid = rm->root_rid;
6592 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6593 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6595 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6596 ice_recp_grp_entry, l_entry) {
6597 LIST_DEL(&r_entry->l_entry);
6598 ice_free(hw, r_entry);
6601 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6603 LIST_DEL(&fvit->list_entry);
6608 ice_free(hw, rm->root_buf);
6613 ice_free(hw, lkup_exts);
6619 * ice_find_dummy_packet - find dummy packet by tunnel type
6621 * @lkups: lookup elements or match criteria for the advanced recipe, one
6622 * structure per protocol header
6623 * @lkups_cnt: number of protocols
6624 * @tun_type: tunnel type from the match criteria
6625 * @pkt: dummy packet to fill according to filter match criteria
6626 * @pkt_len: packet length of dummy packet
6627 * @offsets: pointer to receive the pointer to the offsets for the packet
6630 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6631 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6633 const struct ice_dummy_pkt_offsets **offsets)
6635 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6639 for (i = 0; i < lkups_cnt; i++) {
6640 if (lkups[i].type == ICE_UDP_ILOS)
6642 else if (lkups[i].type == ICE_TCP_IL)
6644 else if (lkups[i].type == ICE_IPV6_OFOS)
6646 else if (lkups[i].type == ICE_VLAN_OFOS)
6648 else if (lkups[i].type == ICE_IPV4_OFOS &&
6649 lkups[i].h_u.ipv4_hdr.protocol ==
6650 ICE_IPV4_NVGRE_PROTO_ID &&
6651 lkups[i].m_u.ipv4_hdr.protocol ==
6654 else if (lkups[i].type == ICE_PPPOE &&
6655 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6656 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6657 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6660 else if (lkups[i].type == ICE_ETYPE_OL &&
6661 lkups[i].h_u.ethertype.ethtype_id ==
6662 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6663 lkups[i].m_u.ethertype.ethtype_id ==
6668 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6669 *pkt = dummy_ipv4_esp_pkt;
6670 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6671 *offsets = dummy_ipv4_esp_packet_offsets;
6675 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6676 *pkt = dummy_ipv6_esp_pkt;
6677 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6678 *offsets = dummy_ipv6_esp_packet_offsets;
6682 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6683 *pkt = dummy_ipv4_ah_pkt;
6684 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6685 *offsets = dummy_ipv4_ah_packet_offsets;
6689 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6690 *pkt = dummy_ipv6_ah_pkt;
6691 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6692 *offsets = dummy_ipv6_ah_packet_offsets;
6696 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6697 *pkt = dummy_ipv4_nat_pkt;
6698 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6699 *offsets = dummy_ipv4_nat_packet_offsets;
6703 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6704 *pkt = dummy_ipv6_nat_pkt;
6705 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6706 *offsets = dummy_ipv6_nat_packet_offsets;
6710 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6711 *pkt = dummy_ipv4_l2tpv3_pkt;
6712 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6713 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6717 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6718 *pkt = dummy_ipv6_l2tpv3_pkt;
6719 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6720 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6724 if (tun_type == ICE_SW_TUN_GTP) {
6725 *pkt = dummy_udp_gtp_packet;
6726 *pkt_len = sizeof(dummy_udp_gtp_packet);
6727 *offsets = dummy_udp_gtp_packet_offsets;
6731 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6732 *pkt = dummy_pppoe_ipv6_packet;
6733 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6734 *offsets = dummy_pppoe_packet_offsets;
6736 } else if (tun_type == ICE_SW_TUN_PPPOE ||
6737 tun_type == ICE_SW_TUN_PPPOE_PAY) {
6738 *pkt = dummy_pppoe_ipv4_packet;
6739 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6740 *offsets = dummy_pppoe_packet_offsets;
6744 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
6745 *pkt = dummy_pppoe_ipv4_packet;
6746 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6747 *offsets = dummy_pppoe_packet_ipv4_offsets;
6751 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
6752 *pkt = dummy_pppoe_ipv4_tcp_packet;
6753 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
6754 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
6758 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
6759 *pkt = dummy_pppoe_ipv4_udp_packet;
6760 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
6761 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
6765 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
6766 *pkt = dummy_pppoe_ipv6_packet;
6767 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6768 *offsets = dummy_pppoe_packet_ipv6_offsets;
6772 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
6773 *pkt = dummy_pppoe_ipv6_tcp_packet;
6774 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
6775 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
6779 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
6780 *pkt = dummy_pppoe_ipv6_udp_packet;
6781 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
6782 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
6786 if (tun_type == ICE_SW_IPV4_TCP) {
6787 *pkt = dummy_tcp_packet;
6788 *pkt_len = sizeof(dummy_tcp_packet);
6789 *offsets = dummy_tcp_packet_offsets;
6793 if (tun_type == ICE_SW_IPV4_UDP) {
6794 *pkt = dummy_udp_packet;
6795 *pkt_len = sizeof(dummy_udp_packet);
6796 *offsets = dummy_udp_packet_offsets;
6800 if (tun_type == ICE_SW_IPV6_TCP) {
6801 *pkt = dummy_tcp_ipv6_packet;
6802 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6803 *offsets = dummy_tcp_ipv6_packet_offsets;
6807 if (tun_type == ICE_SW_IPV6_UDP) {
6808 *pkt = dummy_udp_ipv6_packet;
6809 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6810 *offsets = dummy_udp_ipv6_packet_offsets;
6814 if (tun_type == ICE_ALL_TUNNELS) {
6815 *pkt = dummy_gre_udp_packet;
6816 *pkt_len = sizeof(dummy_gre_udp_packet);
6817 *offsets = dummy_gre_udp_packet_offsets;
6821 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6823 *pkt = dummy_gre_tcp_packet;
6824 *pkt_len = sizeof(dummy_gre_tcp_packet);
6825 *offsets = dummy_gre_tcp_packet_offsets;
6829 *pkt = dummy_gre_udp_packet;
6830 *pkt_len = sizeof(dummy_gre_udp_packet);
6831 *offsets = dummy_gre_udp_packet_offsets;
6835 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6836 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
6837 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
6838 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
6840 *pkt = dummy_udp_tun_tcp_packet;
6841 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6842 *offsets = dummy_udp_tun_tcp_packet_offsets;
6846 *pkt = dummy_udp_tun_udp_packet;
6847 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6848 *offsets = dummy_udp_tun_udp_packet_offsets;
6854 *pkt = dummy_vlan_udp_packet;
6855 *pkt_len = sizeof(dummy_vlan_udp_packet);
6856 *offsets = dummy_vlan_udp_packet_offsets;
6859 *pkt = dummy_udp_packet;
6860 *pkt_len = sizeof(dummy_udp_packet);
6861 *offsets = dummy_udp_packet_offsets;
6863 } else if (udp && ipv6) {
6865 *pkt = dummy_vlan_udp_ipv6_packet;
6866 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6867 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6870 *pkt = dummy_udp_ipv6_packet;
6871 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6872 *offsets = dummy_udp_ipv6_packet_offsets;
6874 } else if ((tcp && ipv6) || ipv6) {
6876 *pkt = dummy_vlan_tcp_ipv6_packet;
6877 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6878 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6881 *pkt = dummy_tcp_ipv6_packet;
6882 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6883 *offsets = dummy_tcp_ipv6_packet_offsets;
6888 *pkt = dummy_vlan_tcp_packet;
6889 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6890 *offsets = dummy_vlan_tcp_packet_offsets;
6892 *pkt = dummy_tcp_packet;
6893 *pkt_len = sizeof(dummy_tcp_packet);
6894 *offsets = dummy_tcp_packet_offsets;
6899 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6901 * @lkups: lookup elements or match criteria for the advanced recipe, one
6902 * structure per protocol header
6903 * @lkups_cnt: number of protocols
6904 * @s_rule: stores rule information from the match criteria
6905 * @dummy_pkt: dummy packet to fill according to filter match criteria
6906 * @pkt_len: packet length of dummy packet
6907 * @offsets: offset info for the dummy packet
6909 static enum ice_status
6910 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6911 struct ice_aqc_sw_rules_elem *s_rule,
6912 const u8 *dummy_pkt, u16 pkt_len,
6913 const struct ice_dummy_pkt_offsets *offsets)
6918 /* Start with a packet with a pre-defined/dummy content. Then, fill
6919 * in the header values to be looked up or matched.
6921 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6923 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6925 for (i = 0; i < lkups_cnt; i++) {
6926 enum ice_protocol_type type;
6927 u16 offset = 0, len = 0, j;
6930 /* find the start of this layer; it should be found since this
6931 * was already checked when search for the dummy packet
6933 type = lkups[i].type;
6934 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6935 if (type == offsets[j].type) {
6936 offset = offsets[j].offset;
6941 /* this should never happen in a correct calling sequence */
6943 return ICE_ERR_PARAM;
6945 switch (lkups[i].type) {
6948 len = sizeof(struct ice_ether_hdr);
6951 len = sizeof(struct ice_ethtype_hdr);
6954 len = sizeof(struct ice_vlan_hdr);
6958 len = sizeof(struct ice_ipv4_hdr);
6962 len = sizeof(struct ice_ipv6_hdr);
6967 len = sizeof(struct ice_l4_hdr);
6970 len = sizeof(struct ice_sctp_hdr);
6973 len = sizeof(struct ice_nvgre);
6978 len = sizeof(struct ice_udp_tnl_hdr);
6982 len = sizeof(struct ice_udp_gtp_hdr);
6985 len = sizeof(struct ice_pppoe_hdr);
6988 len = sizeof(struct ice_esp_hdr);
6991 len = sizeof(struct ice_nat_t_hdr);
6994 len = sizeof(struct ice_ah_hdr);
6997 len = sizeof(struct ice_l2tpv3_sess_hdr);
7000 return ICE_ERR_PARAM;
7003 /* the length should be a word multiple */
7004 if (len % ICE_BYTES_PER_WORD)
7007 /* We have the offset to the header start, the length, the
7008 * caller's header values and mask. Use this information to
7009 * copy the data into the dummy packet appropriately based on
7010 * the mask. Note that we need to only write the bits as
7011 * indicated by the mask to make sure we don't improperly write
7012 * over any significant packet data.
7014 for (j = 0; j < len / sizeof(u16); j++)
7015 if (((u16 *)&lkups[i].m_u)[j])
7016 ((u16 *)(pkt + offset))[j] =
7017 (((u16 *)(pkt + offset))[j] &
7018 ~((u16 *)&lkups[i].m_u)[j]) |
7019 (((u16 *)&lkups[i].h_u)[j] &
7020 ((u16 *)&lkups[i].m_u)[j]);
7023 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7029 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7030 * @hw: pointer to the hardware structure
7031 * @tun_type: tunnel type
7032 * @pkt: dummy packet to fill in
7033 * @offsets: offset info for the dummy packet
7035 static enum ice_status
7036 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7037 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7042 case ICE_SW_TUN_AND_NON_TUN:
7043 case ICE_SW_TUN_VXLAN_GPE:
7044 case ICE_SW_TUN_VXLAN:
7045 case ICE_SW_TUN_VXLAN_VLAN:
7046 case ICE_SW_TUN_UDP:
7047 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7051 case ICE_SW_TUN_GENEVE:
7052 case ICE_SW_TUN_GENEVE_VLAN:
7053 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7058 /* Nothing needs to be done for this tunnel type */
7062 /* Find the outer UDP protocol header and insert the port number */
7063 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7064 if (offsets[i].type == ICE_UDP_OF) {
7065 struct ice_l4_hdr *hdr;
7068 offset = offsets[i].offset;
7069 hdr = (struct ice_l4_hdr *)&pkt[offset];
7070 hdr->dst_port = CPU_TO_BE16(open_port);
7080 * ice_find_adv_rule_entry - Search a rule entry
7081 * @hw: pointer to the hardware structure
7082 * @lkups: lookup elements or match criteria for the advanced recipe, one
7083 * structure per protocol header
7084 * @lkups_cnt: number of protocols
7085 * @recp_id: recipe ID for which we are finding the rule
7086 * @rinfo: other information regarding the rule e.g. priority and action info
7088 * Helper function to search for a given advance rule entry
7089 * Returns pointer to entry storing the rule if found
7091 static struct ice_adv_fltr_mgmt_list_entry *
7092 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7093 u16 lkups_cnt, u16 recp_id,
7094 struct ice_adv_rule_info *rinfo)
7096 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7097 struct ice_switch_info *sw = hw->switch_info;
7100 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7101 ice_adv_fltr_mgmt_list_entry, list_entry) {
7102 bool lkups_matched = true;
7104 if (lkups_cnt != list_itr->lkups_cnt)
7106 for (i = 0; i < list_itr->lkups_cnt; i++)
7107 if (memcmp(&list_itr->lkups[i], &lkups[i],
7109 lkups_matched = false;
7112 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7113 rinfo->tun_type == list_itr->rule_info.tun_type &&
7121 * ice_adv_add_update_vsi_list
7122 * @hw: pointer to the hardware structure
7123 * @m_entry: pointer to current adv filter management list entry
7124 * @cur_fltr: filter information from the book keeping entry
7125 * @new_fltr: filter information with the new VSI to be added
7127 * Call AQ command to add or update previously created VSI list with new VSI.
7129 * Helper function to do book keeping associated with adding filter information
7130 * The algorithm to do the booking keeping is described below :
7131 * When a VSI needs to subscribe to a given advanced filter
7132 * if only one VSI has been added till now
7133 * Allocate a new VSI list and add two VSIs
7134 * to this list using switch rule command
7135 * Update the previously created switch rule with the
7136 * newly created VSI list ID
7137 * if a VSI list was previously created
7138 * Add the new VSI to the previously created VSI list set
7139 * using the update switch rule command
7141 static enum ice_status
7142 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7143 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7144 struct ice_adv_rule_info *cur_fltr,
7145 struct ice_adv_rule_info *new_fltr)
7147 enum ice_status status;
7148 u16 vsi_list_id = 0;
7150 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7151 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7152 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7153 return ICE_ERR_NOT_IMPL;
7155 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7156 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7157 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7158 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7159 return ICE_ERR_NOT_IMPL;
7161 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7162 /* Only one entry existed in the mapping and it was not already
7163 * a part of a VSI list. So, create a VSI list with the old and
7166 struct ice_fltr_info tmp_fltr;
7167 u16 vsi_handle_arr[2];
7169 /* A rule already exists with the new VSI being added */
7170 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7171 new_fltr->sw_act.fwd_id.hw_vsi_id)
7172 return ICE_ERR_ALREADY_EXISTS;
7174 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7175 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7176 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7182 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7183 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7184 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7185 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7186 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7187 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7189 /* Update the previous switch rule of "forward to VSI" to
7192 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7196 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7197 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7198 m_entry->vsi_list_info =
7199 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7202 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7204 if (!m_entry->vsi_list_info)
7207 /* A rule already exists with the new VSI being added */
7208 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7211 /* Update the previously created VSI list set with
7212 * the new VSI ID passed in
7214 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7216 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7218 ice_aqc_opc_update_sw_rules,
7220 /* update VSI list mapping info with new VSI ID */
7222 ice_set_bit(vsi_handle,
7223 m_entry->vsi_list_info->vsi_map);
7226 m_entry->vsi_count++;
7231 * ice_add_adv_rule - helper function to create an advanced switch rule
7232 * @hw: pointer to the hardware structure
7233 * @lkups: information on the words that needs to be looked up. All words
7234 * together makes one recipe
7235 * @lkups_cnt: num of entries in the lkups array
7236 * @rinfo: other information related to the rule that needs to be programmed
7237 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7238 * ignored is case of error.
7240 * This function can program only 1 rule at a time. The lkups is used to
7241 * describe the all the words that forms the "lookup" portion of the recipe.
7242 * These words can span multiple protocols. Callers to this function need to
7243 * pass in a list of protocol headers with lookup information along and mask
7244 * that determines which words are valid from the given protocol header.
7245 * rinfo describes other information related to this rule such as forwarding
7246 * IDs, priority of this rule, etc.
7249 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7250 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7251 struct ice_rule_query_data *added_entry)
7253 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7254 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7255 const struct ice_dummy_pkt_offsets *pkt_offsets;
7256 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7257 struct LIST_HEAD_TYPE *rule_head;
7258 struct ice_switch_info *sw;
7259 enum ice_status status;
7260 const u8 *pkt = NULL;
7266 /* Initialize profile to result index bitmap */
7267 if (!hw->switch_info->prof_res_bm_init) {
7268 hw->switch_info->prof_res_bm_init = 1;
7269 ice_init_prof_result_bm(hw);
7272 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7273 if (!prof_rule && !lkups_cnt)
7274 return ICE_ERR_PARAM;
7276 /* get # of words we need to match */
7278 for (i = 0; i < lkups_cnt; i++) {
7281 ptr = (u16 *)&lkups[i].m_u;
7282 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7288 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7289 return ICE_ERR_PARAM;
7291 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7292 return ICE_ERR_PARAM;
7295 /* make sure that we can locate a dummy packet */
7296 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7299 status = ICE_ERR_PARAM;
7300 goto err_ice_add_adv_rule;
7303 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7304 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7305 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7306 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7309 vsi_handle = rinfo->sw_act.vsi_handle;
7310 if (!ice_is_vsi_valid(hw, vsi_handle))
7311 return ICE_ERR_PARAM;
7313 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7314 rinfo->sw_act.fwd_id.hw_vsi_id =
7315 ice_get_hw_vsi_num(hw, vsi_handle);
7316 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7317 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7319 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7322 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7324 /* we have to add VSI to VSI_LIST and increment vsi_count.
7325 * Also Update VSI list so that we can change forwarding rule
7326 * if the rule already exists, we will check if it exists with
7327 * same vsi_id, if not then add it to the VSI list if it already
7328 * exists if not then create a VSI list and add the existing VSI
7329 * ID and the new VSI ID to the list
7330 * We will add that VSI to the list
7332 status = ice_adv_add_update_vsi_list(hw, m_entry,
7333 &m_entry->rule_info,
7336 added_entry->rid = rid;
7337 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7338 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7342 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7343 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7345 return ICE_ERR_NO_MEMORY;
7346 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7347 switch (rinfo->sw_act.fltr_act) {
7348 case ICE_FWD_TO_VSI:
7349 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7350 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7351 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7354 act |= ICE_SINGLE_ACT_TO_Q;
7355 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7356 ICE_SINGLE_ACT_Q_INDEX_M;
7358 case ICE_FWD_TO_QGRP:
7359 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7360 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7361 act |= ICE_SINGLE_ACT_TO_Q;
7362 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7363 ICE_SINGLE_ACT_Q_INDEX_M;
7364 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7365 ICE_SINGLE_ACT_Q_REGION_M;
7367 case ICE_DROP_PACKET:
7368 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7369 ICE_SINGLE_ACT_VALID_BIT;
7372 status = ICE_ERR_CFG;
7373 goto err_ice_add_adv_rule;
7376 /* set the rule LOOKUP type based on caller specified 'RX'
7377 * instead of hardcoding it to be either LOOKUP_TX/RX
7379 * for 'RX' set the source to be the port number
7380 * for 'TX' set the source to be the source HW VSI number (determined
7384 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7385 s_rule->pdata.lkup_tx_rx.src =
7386 CPU_TO_LE16(hw->port_info->lport);
7388 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7389 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7392 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7393 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7395 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7396 pkt_len, pkt_offsets);
7398 goto err_ice_add_adv_rule;
7400 if (rinfo->tun_type != ICE_NON_TUN &&
7401 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7402 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7403 s_rule->pdata.lkup_tx_rx.hdr,
7406 goto err_ice_add_adv_rule;
7409 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7410 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7413 goto err_ice_add_adv_rule;
7414 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7415 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7417 status = ICE_ERR_NO_MEMORY;
7418 goto err_ice_add_adv_rule;
7421 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7422 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7423 ICE_NONDMA_TO_NONDMA);
7424 if (!adv_fltr->lkups && !prof_rule) {
7425 status = ICE_ERR_NO_MEMORY;
7426 goto err_ice_add_adv_rule;
7429 adv_fltr->lkups_cnt = lkups_cnt;
7430 adv_fltr->rule_info = *rinfo;
7431 adv_fltr->rule_info.fltr_rule_id =
7432 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7433 sw = hw->switch_info;
7434 sw->recp_list[rid].adv_rule = true;
7435 rule_head = &sw->recp_list[rid].filt_rules;
7437 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7438 adv_fltr->vsi_count = 1;
7440 /* Add rule entry to book keeping list */
7441 LIST_ADD(&adv_fltr->list_entry, rule_head);
7443 added_entry->rid = rid;
7444 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7445 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7447 err_ice_add_adv_rule:
7448 if (status && adv_fltr) {
7449 ice_free(hw, adv_fltr->lkups);
7450 ice_free(hw, adv_fltr);
7453 ice_free(hw, s_rule);
7459 * ice_adv_rem_update_vsi_list
7460 * @hw: pointer to the hardware structure
7461 * @vsi_handle: VSI handle of the VSI to remove
7462 * @fm_list: filter management entry for which the VSI list management needs to
7465 static enum ice_status
7466 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7467 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7469 struct ice_vsi_list_map_info *vsi_list_info;
7470 enum ice_sw_lkup_type lkup_type;
7471 enum ice_status status;
7474 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7475 fm_list->vsi_count == 0)
7476 return ICE_ERR_PARAM;
7478 /* A rule with the VSI being removed does not exist */
7479 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7480 return ICE_ERR_DOES_NOT_EXIST;
7482 lkup_type = ICE_SW_LKUP_LAST;
7483 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7484 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7485 ice_aqc_opc_update_sw_rules,
7490 fm_list->vsi_count--;
7491 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7492 vsi_list_info = fm_list->vsi_list_info;
7493 if (fm_list->vsi_count == 1) {
7494 struct ice_fltr_info tmp_fltr;
7497 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7499 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7500 return ICE_ERR_OUT_OF_RANGE;
7502 /* Make sure VSI list is empty before removing it below */
7503 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7505 ice_aqc_opc_update_sw_rules,
7510 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7511 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7512 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7513 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7514 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7515 tmp_fltr.fwd_id.hw_vsi_id =
7516 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7517 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7518 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7519 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7521 /* Update the previous switch rule of "MAC forward to VSI" to
7522 * "MAC fwd to VSI list"
7524 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7526 ice_debug(hw, ICE_DBG_SW,
7527 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7528 tmp_fltr.fwd_id.hw_vsi_id, status);
7531 fm_list->vsi_list_info->ref_cnt--;
7533 /* Remove the VSI list since it is no longer used */
7534 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7536 ice_debug(hw, ICE_DBG_SW,
7537 "Failed to remove VSI list %d, error %d\n",
7538 vsi_list_id, status);
7542 LIST_DEL(&vsi_list_info->list_entry);
7543 ice_free(hw, vsi_list_info);
7544 fm_list->vsi_list_info = NULL;
7551 * ice_rem_adv_rule - removes existing advanced switch rule
7552 * @hw: pointer to the hardware structure
7553 * @lkups: information on the words that needs to be looked up. All words
7554 * together makes one recipe
7555 * @lkups_cnt: num of entries in the lkups array
7556 * @rinfo: Its the pointer to the rule information for the rule
7558 * This function can be used to remove 1 rule at a time. The lkups is
7559 * used to describe all the words that forms the "lookup" portion of the
7560 * rule. These words can span multiple protocols. Callers to this function
7561 * need to pass in a list of protocol headers with lookup information along
7562 * and mask that determines which words are valid from the given protocol
7563 * header. rinfo describes other information related to this rule such as
7564 * forwarding IDs, priority of this rule, etc.
7567 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7568 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7570 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7571 struct ice_prot_lkup_ext lkup_exts;
7572 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7573 enum ice_status status = ICE_SUCCESS;
7574 bool remove_rule = false;
7575 u16 i, rid, vsi_handle;
7577 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7578 for (i = 0; i < lkups_cnt; i++) {
7581 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7584 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7589 /* Create any special protocol/offset pairs, such as looking at tunnel
7590 * bits by extracting metadata
7592 status = ice_add_special_words(rinfo, &lkup_exts);
7596 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7597 /* If did not find a recipe that match the existing criteria */
7598 if (rid == ICE_MAX_NUM_RECIPES)
7599 return ICE_ERR_PARAM;
7601 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7602 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7603 /* the rule is already removed */
7606 ice_acquire_lock(rule_lock);
7607 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7609 } else if (list_elem->vsi_count > 1) {
7610 remove_rule = false;
7611 vsi_handle = rinfo->sw_act.vsi_handle;
7612 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7614 vsi_handle = rinfo->sw_act.vsi_handle;
7615 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7617 ice_release_lock(rule_lock);
7620 if (list_elem->vsi_count == 0)
7623 ice_release_lock(rule_lock);
7625 struct ice_aqc_sw_rules_elem *s_rule;
7628 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7630 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7633 return ICE_ERR_NO_MEMORY;
7634 s_rule->pdata.lkup_tx_rx.act = 0;
7635 s_rule->pdata.lkup_tx_rx.index =
7636 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7637 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7638 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7640 ice_aqc_opc_remove_sw_rules, NULL);
7641 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
7642 struct ice_switch_info *sw = hw->switch_info;
7644 ice_acquire_lock(rule_lock);
7645 LIST_DEL(&list_elem->list_entry);
7646 ice_free(hw, list_elem->lkups);
7647 ice_free(hw, list_elem);
7648 ice_release_lock(rule_lock);
7649 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
7650 sw->recp_list[rid].adv_rule = false;
7652 ice_free(hw, s_rule);
7658 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7659 * @hw: pointer to the hardware structure
7660 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7662 * This function is used to remove 1 rule at a time. The removal is based on
7663 * the remove_entry parameter. This function will remove rule for a given
7664 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7667 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7668 struct ice_rule_query_data *remove_entry)
7670 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7671 struct LIST_HEAD_TYPE *list_head;
7672 struct ice_adv_rule_info rinfo;
7673 struct ice_switch_info *sw;
7675 sw = hw->switch_info;
7676 if (!sw->recp_list[remove_entry->rid].recp_created)
7677 return ICE_ERR_PARAM;
7678 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7679 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7681 if (list_itr->rule_info.fltr_rule_id ==
7682 remove_entry->rule_id) {
7683 rinfo = list_itr->rule_info;
7684 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7685 return ice_rem_adv_rule(hw, list_itr->lkups,
7686 list_itr->lkups_cnt, &rinfo);
7689 /* either list is empty or unable to find rule */
7690 return ICE_ERR_DOES_NOT_EXIST;
7694 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7696 * @hw: pointer to the hardware structure
7697 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7699 * This function is used to remove all the rules for a given VSI and as soon
7700 * as removing a rule fails, it will return immediately with the error code,
7701 * else it will return ICE_SUCCESS
7703 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7705 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7706 struct ice_vsi_list_map_info *map_info;
7707 struct LIST_HEAD_TYPE *list_head;
7708 struct ice_adv_rule_info rinfo;
7709 struct ice_switch_info *sw;
7710 enum ice_status status;
7711 u16 vsi_list_id = 0;
7714 sw = hw->switch_info;
7715 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7716 if (!sw->recp_list[rid].recp_created)
7718 if (!sw->recp_list[rid].adv_rule)
7720 list_head = &sw->recp_list[rid].filt_rules;
7722 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7723 ice_adv_fltr_mgmt_list_entry, list_entry) {
7724 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7729 rinfo = list_itr->rule_info;
7730 rinfo.sw_act.vsi_handle = vsi_handle;
7731 status = ice_rem_adv_rule(hw, list_itr->lkups,
7732 list_itr->lkups_cnt, &rinfo);
7742 * ice_replay_fltr - Replay all the filters stored by a specific list head
7743 * @hw: pointer to the hardware structure
7744 * @list_head: list for which filters needs to be replayed
7745 * @recp_id: Recipe ID for which rules need to be replayed
7747 static enum ice_status
7748 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7750 struct ice_fltr_mgmt_list_entry *itr;
7751 enum ice_status status = ICE_SUCCESS;
7752 struct ice_sw_recipe *recp_list;
7753 u8 lport = hw->port_info->lport;
7754 struct LIST_HEAD_TYPE l_head;
7756 if (LIST_EMPTY(list_head))
7759 recp_list = &hw->switch_info->recp_list[recp_id];
7760 /* Move entries from the given list_head to a temporary l_head so that
7761 * they can be replayed. Otherwise when trying to re-add the same
7762 * filter, the function will return already exists
7764 LIST_REPLACE_INIT(list_head, &l_head);
7766 /* Mark the given list_head empty by reinitializing it so filters
7767 * could be added again by *handler
7769 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7771 struct ice_fltr_list_entry f_entry;
7773 f_entry.fltr_info = itr->fltr_info;
7774 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7775 status = ice_add_rule_internal(hw, recp_list, lport,
7777 if (status != ICE_SUCCESS)
7782 /* Add a filter per VSI separately */
7787 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7789 if (!ice_is_vsi_valid(hw, vsi_handle))
7792 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7793 f_entry.fltr_info.vsi_handle = vsi_handle;
7794 f_entry.fltr_info.fwd_id.hw_vsi_id =
7795 ice_get_hw_vsi_num(hw, vsi_handle);
7796 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7797 if (recp_id == ICE_SW_LKUP_VLAN)
7798 status = ice_add_vlan_internal(hw, recp_list,
7801 status = ice_add_rule_internal(hw, recp_list,
7804 if (status != ICE_SUCCESS)
7809 /* Clear the filter management list */
7810 ice_rem_sw_rule_info(hw, &l_head);
7815 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7816 * @hw: pointer to the hardware structure
7818 * NOTE: This function does not clean up partially added filters on error.
7819 * It is up to caller of the function to issue a reset or fail early.
7821 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7823 struct ice_switch_info *sw = hw->switch_info;
7824 enum ice_status status = ICE_SUCCESS;
7827 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7828 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7830 status = ice_replay_fltr(hw, i, head);
7831 if (status != ICE_SUCCESS)
7838 * ice_replay_vsi_fltr - Replay filters for requested VSI
7839 * @hw: pointer to the hardware structure
7840 * @pi: pointer to port information structure
7841 * @sw: pointer to switch info struct for which function replays filters
7842 * @vsi_handle: driver VSI handle
7843 * @recp_id: Recipe ID for which rules need to be replayed
7844 * @list_head: list for which filters need to be replayed
7846 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7847 * It is required to pass valid VSI handle.
7849 static enum ice_status
7850 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7851 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
7852 struct LIST_HEAD_TYPE *list_head)
7854 struct ice_fltr_mgmt_list_entry *itr;
7855 enum ice_status status = ICE_SUCCESS;
7856 struct ice_sw_recipe *recp_list;
7859 if (LIST_EMPTY(list_head))
7861 recp_list = &sw->recp_list[recp_id];
7862 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7864 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7866 struct ice_fltr_list_entry f_entry;
7868 f_entry.fltr_info = itr->fltr_info;
7869 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7870 itr->fltr_info.vsi_handle == vsi_handle) {
7871 /* update the src in case it is VSI num */
7872 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7873 f_entry.fltr_info.src = hw_vsi_id;
7874 status = ice_add_rule_internal(hw, recp_list,
7877 if (status != ICE_SUCCESS)
7881 if (!itr->vsi_list_info ||
7882 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7884 /* Clearing it so that the logic can add it back */
7885 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7886 f_entry.fltr_info.vsi_handle = vsi_handle;
7887 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7888 /* update the src in case it is VSI num */
7889 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7890 f_entry.fltr_info.src = hw_vsi_id;
7891 if (recp_id == ICE_SW_LKUP_VLAN)
7892 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7894 status = ice_add_rule_internal(hw, recp_list,
7897 if (status != ICE_SUCCESS)
7905 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7906 * @hw: pointer to the hardware structure
7907 * @vsi_handle: driver VSI handle
7908 * @list_head: list for which filters need to be replayed
7910 * Replay the advanced rule for the given VSI.
7912 static enum ice_status
7913 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7914 struct LIST_HEAD_TYPE *list_head)
7916 struct ice_rule_query_data added_entry = { 0 };
7917 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7918 enum ice_status status = ICE_SUCCESS;
7920 if (LIST_EMPTY(list_head))
7922 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7924 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7925 u16 lk_cnt = adv_fltr->lkups_cnt;
7927 if (vsi_handle != rinfo->sw_act.vsi_handle)
7929 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7938 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7939 * @hw: pointer to the hardware structure
7940 * @pi: pointer to port information structure
7941 * @vsi_handle: driver VSI handle
7943 * Replays filters for requested VSI via vsi_handle.
7946 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7949 struct ice_switch_info *sw = hw->switch_info;
7950 enum ice_status status;
7953 /* Update the recipes that were created */
7954 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7955 struct LIST_HEAD_TYPE *head;
7957 head = &sw->recp_list[i].filt_replay_rules;
7958 if (!sw->recp_list[i].adv_rule)
7959 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
7962 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7963 if (status != ICE_SUCCESS)
7971 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
7972 * @hw: pointer to the HW struct
7973 * @sw: pointer to switch info struct for which function removes filters
7975 * Deletes the filter replay rules for given switch
7977 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
7984 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7985 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7986 struct LIST_HEAD_TYPE *l_head;
7988 l_head = &sw->recp_list[i].filt_replay_rules;
7989 if (!sw->recp_list[i].adv_rule)
7990 ice_rem_sw_rule_info(hw, l_head);
7992 ice_rem_adv_rule_info(hw, l_head);
7998 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7999 * @hw: pointer to the HW struct
8001 * Deletes the filter replay rules.
8003 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8005 ice_rm_sw_replay_rule_info(hw, hw->switch_info);