1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x10, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
463 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, /* 2 bytes for 4 byte alignment */
468 /* C-tag (802.1Q): IPv6 + UDP */
469 static const struct ice_dummy_pkt_offsets
470 dummy_vlan_udp_ipv6_packet_offsets[] = {
472 { ICE_ETYPE_OL, 12 },
473 { ICE_VLAN_OFOS, 14 },
474 { ICE_IPV6_OFOS, 18 },
475 { ICE_UDP_ILOS, 58 },
476 { ICE_PROTOCOL_LAST, 0 },
479 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
480 static const u8 dummy_vlan_udp_ipv6_packet[] = {
481 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
485 0x81, 0x00, /* ICE_ETYPE_OL 12 */
487 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
490 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
501 0x00, 0x08, 0x00, 0x00,
503 0x00, 0x00, /* 2 bytes for 4 byte alignment */
506 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508 { ICE_IPV4_OFOS, 14 },
511 { ICE_PROTOCOL_LAST, 0 },
514 static const u8 dummy_udp_gtp_packet[] = {
515 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
520 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x11, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
526 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
527 0x00, 0x1c, 0x00, 0x00,
529 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
530 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x00, 0x85,
533 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
534 0x00, 0x00, 0x00, 0x00,
537 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
539 { ICE_ETYPE_OL, 12 },
540 { ICE_VLAN_OFOS, 14},
542 { ICE_PROTOCOL_LAST, 0 },
545 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
547 { ICE_ETYPE_OL, 12 },
548 { ICE_VLAN_OFOS, 14},
550 { ICE_IPV4_OFOS, 26 },
551 { ICE_PROTOCOL_LAST, 0 },
554 static const u8 dummy_pppoe_ipv4_packet[] = {
555 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
556 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
559 0x81, 0x00, /* ICE_ETYPE_OL 12 */
561 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
563 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
566 0x00, 0x21, /* PPP Link Layer 24 */
568 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
578 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
580 { ICE_ETYPE_OL, 12 },
581 { ICE_VLAN_OFOS, 14},
583 { ICE_IPV4_OFOS, 26 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
593 0x81, 0x00, /* ICE_ETYPE_OL 12 */
595 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
597 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
600 0x00, 0x21, /* PPP Link Layer 24 */
602 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
603 0x00, 0x01, 0x00, 0x00,
604 0x00, 0x06, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
611 0x50, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
618 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
620 { ICE_ETYPE_OL, 12 },
621 { ICE_VLAN_OFOS, 14},
623 { ICE_IPV4_OFOS, 26 },
624 { ICE_UDP_ILOS, 46 },
625 { ICE_PROTOCOL_LAST, 0 },
628 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
629 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
630 0x00, 0x00, 0x00, 0x00,
631 0x00, 0x00, 0x00, 0x00,
633 0x81, 0x00, /* ICE_ETYPE_OL 12 */
635 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
637 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
640 0x00, 0x21, /* PPP Link Layer 24 */
642 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
643 0x00, 0x01, 0x00, 0x00,
644 0x00, 0x11, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
649 0x00, 0x08, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
654 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
656 { ICE_ETYPE_OL, 12 },
657 { ICE_VLAN_OFOS, 14},
659 { ICE_IPV6_OFOS, 26 },
660 { ICE_PROTOCOL_LAST, 0 },
663 static const u8 dummy_pppoe_ipv6_packet[] = {
664 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
665 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00,
668 0x81, 0x00, /* ICE_ETYPE_OL 12 */
670 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
672 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
675 0x00, 0x57, /* PPP Link Layer 24 */
677 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
678 0x00, 0x00, 0x3b, 0x00,
679 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
685 0x00, 0x00, 0x00, 0x00,
686 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
692 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
694 { ICE_ETYPE_OL, 12 },
695 { ICE_VLAN_OFOS, 14},
697 { ICE_IPV6_OFOS, 26 },
699 { ICE_PROTOCOL_LAST, 0 },
702 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
703 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
704 0x00, 0x00, 0x00, 0x00,
705 0x00, 0x00, 0x00, 0x00,
707 0x81, 0x00, /* ICE_ETYPE_OL 12 */
709 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
711 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
714 0x00, 0x57, /* PPP Link Layer 24 */
716 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
717 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
728 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, 0x00, 0x00,
730 0x50, 0x00, 0x00, 0x00,
731 0x00, 0x00, 0x00, 0x00,
733 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
737 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
739 { ICE_ETYPE_OL, 12 },
740 { ICE_VLAN_OFOS, 14},
742 { ICE_IPV6_OFOS, 26 },
743 { ICE_UDP_ILOS, 66 },
744 { ICE_PROTOCOL_LAST, 0 },
747 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
748 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x81, 0x00, /* ICE_ETYPE_OL 12 */
754 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
756 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
759 0x00, 0x57, /* PPP Link Layer 24 */
761 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
762 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
763 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
773 0x00, 0x08, 0x00, 0x00,
775 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
778 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
780 { ICE_IPV4_OFOS, 14 },
782 { ICE_PROTOCOL_LAST, 0 },
785 static const u8 dummy_ipv4_esp_pkt[] = {
786 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
787 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x00, 0x00, 0x00,
791 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
792 0x00, 0x00, 0x40, 0x00,
793 0x40, 0x32, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
798 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
802 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
804 { ICE_IPV6_OFOS, 14 },
806 { ICE_PROTOCOL_LAST, 0 },
809 static const u8 dummy_ipv6_esp_pkt[] = {
810 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
815 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
816 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
831 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
833 { ICE_IPV4_OFOS, 14 },
835 { ICE_PROTOCOL_LAST, 0 },
838 static const u8 dummy_ipv4_ah_pkt[] = {
839 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
840 0x00, 0x00, 0x00, 0x00,
841 0x00, 0x00, 0x00, 0x00,
844 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
845 0x00, 0x00, 0x40, 0x00,
846 0x40, 0x33, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
851 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
856 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
858 { ICE_IPV6_OFOS, 14 },
860 { ICE_PROTOCOL_LAST, 0 },
863 static const u8 dummy_ipv6_ah_pkt[] = {
864 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
865 0x00, 0x00, 0x00, 0x00,
866 0x00, 0x00, 0x00, 0x00,
869 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
870 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
871 0x00, 0x00, 0x00, 0x00,
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
886 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
888 { ICE_IPV4_OFOS, 14 },
889 { ICE_UDP_ILOS, 34 },
891 { ICE_PROTOCOL_LAST, 0 },
894 static const u8 dummy_ipv4_nat_pkt[] = {
895 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
896 0x00, 0x00, 0x00, 0x00,
897 0x00, 0x00, 0x00, 0x00,
900 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
901 0x00, 0x00, 0x40, 0x00,
902 0x40, 0x11, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
914 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
916 { ICE_IPV6_OFOS, 14 },
917 { ICE_UDP_ILOS, 54 },
919 { ICE_PROTOCOL_LAST, 0 },
922 static const u8 dummy_ipv6_nat_pkt[] = {
923 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
928 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
929 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x00, 0x00,
936 0x00, 0x00, 0x00, 0x00,
937 0x00, 0x00, 0x00, 0x00,
939 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
940 0x00, 0x00, 0x00, 0x00,
942 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
948 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
950 { ICE_IPV4_OFOS, 14 },
952 { ICE_PROTOCOL_LAST, 0 },
955 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
956 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
957 0x00, 0x00, 0x00, 0x00,
958 0x00, 0x00, 0x00, 0x00,
961 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
962 0x00, 0x00, 0x40, 0x00,
963 0x40, 0x73, 0x00, 0x00,
964 0x00, 0x00, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
968 0x00, 0x00, 0x00, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
973 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
975 { ICE_IPV6_OFOS, 14 },
977 { ICE_PROTOCOL_LAST, 0 },
980 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
981 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
982 0x00, 0x00, 0x00, 0x00,
983 0x00, 0x00, 0x00, 0x00,
986 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
987 0x00, 0x0c, 0x73, 0x40,
988 0x00, 0x00, 0x00, 0x00,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
997 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
998 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1003 /* this is a recipe to profile association bitmap */
1004 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1005 ICE_MAX_NUM_PROFILES);
1007 /* this is a profile to recipe association bitmap */
1008 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1009 ICE_MAX_NUM_RECIPES);
1011 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1014 * ice_collect_result_idx - copy result index values
1015 * @buf: buffer that contains the result index
1016 * @recp: the recipe struct to copy data into
1018 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1019 struct ice_sw_recipe *recp)
1021 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1022 ice_set_bit(buf->content.result_indx &
1023 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1027 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1028 * @hw: pointer to hardware structure
1029 * @recps: struct that we need to populate
1030 * @rid: recipe ID that we are populating
1031 * @refresh_required: true if we should get recipe to profile mapping from FW
1033 * This function is used to populate all the necessary entries into our
1034 * bookkeeping so that we have a current list of all the recipes that are
1035 * programmed in the firmware.
1037 static enum ice_status
1038 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1039 bool *refresh_required)
1041 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1042 struct ice_aqc_recipe_data_elem *tmp;
1043 u16 num_recps = ICE_MAX_NUM_RECIPES;
1044 struct ice_prot_lkup_ext *lkup_exts;
1045 enum ice_status status;
1049 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1051 /* we need a buffer big enough to accommodate all the recipes */
1052 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1053 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1055 return ICE_ERR_NO_MEMORY;
1057 tmp[0].recipe_indx = rid;
1058 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1059 /* non-zero status meaning recipe doesn't exist */
1063 /* Get recipe to profile map so that we can get the fv from lkups that
1064 * we read for a recipe from FW. Since we want to minimize the number of
1065 * times we make this FW call, just make one call and cache the copy
1066 * until a new recipe is added. This operation is only required the
1067 * first time to get the changes from FW. Then to search existing
1068 * entries we don't need to update the cache again until another recipe
1071 if (*refresh_required) {
1072 ice_get_recp_to_prof_map(hw);
1073 *refresh_required = false;
1076 /* Start populating all the entries for recps[rid] based on lkups from
1077 * firmware. Note that we are only creating the root recipe in our
1080 lkup_exts = &recps[rid].lkup_exts;
1082 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1083 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1084 struct ice_recp_grp_entry *rg_entry;
1085 u8 i, prof, idx, prot = 0;
1089 rg_entry = (struct ice_recp_grp_entry *)
1090 ice_malloc(hw, sizeof(*rg_entry));
1092 status = ICE_ERR_NO_MEMORY;
1096 idx = root_bufs.recipe_indx;
1097 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1099 /* Mark all result indices in this chain */
1100 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1101 ice_set_bit(root_bufs.content.result_indx &
1102 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1104 /* get the first profile that is associated with rid */
1105 prof = ice_find_first_bit(recipe_to_profile[idx],
1106 ICE_MAX_NUM_PROFILES);
1107 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1108 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1110 rg_entry->fv_idx[i] = lkup_indx;
1111 rg_entry->fv_mask[i] =
1112 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1114 /* If the recipe is a chained recipe then all its
1115 * child recipe's result will have a result index.
1116 * To fill fv_words we should not use those result
1117 * index, we only need the protocol ids and offsets.
1118 * We will skip all the fv_idx which stores result
1119 * index in them. We also need to skip any fv_idx which
1120 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1121 * valid offset value.
1123 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1124 rg_entry->fv_idx[i]) ||
1125 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1126 rg_entry->fv_idx[i] == 0)
1129 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1130 rg_entry->fv_idx[i], &prot, &off);
1131 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1132 lkup_exts->fv_words[fv_word_idx].off = off;
1133 lkup_exts->field_mask[fv_word_idx] =
1134 rg_entry->fv_mask[i];
1137 /* populate rg_list with the data from the child entry of this
1140 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1142 /* Propagate some data to the recipe database */
1143 recps[idx].is_root = !!is_root;
1144 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1145 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1146 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1147 recps[idx].chain_idx = root_bufs.content.result_indx &
1148 ~ICE_AQ_RECIPE_RESULT_EN;
1149 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1151 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1157 /* Only do the following for root recipes entries */
1158 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1159 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1160 recps[idx].root_rid = root_bufs.content.rid &
1161 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1162 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1165 /* Complete initialization of the root recipe entry */
1166 lkup_exts->n_val_words = fv_word_idx;
1167 recps[rid].big_recp = (num_recps > 1);
1168 recps[rid].n_grp_count = (u8)num_recps;
1169 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1170 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1171 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1172 if (!recps[rid].root_buf)
1175 /* Copy result indexes */
1176 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1177 recps[rid].recp_created = true;
1185 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1186 * @hw: pointer to hardware structure
1188 * This function is used to populate recipe_to_profile matrix where index to
1189 * this array is the recipe ID and the element is the mapping of which profiles
1190 * is this recipe mapped to.
1192 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1194 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1197 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1200 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1201 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1202 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1204 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1205 ICE_MAX_NUM_RECIPES);
1206 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1207 if (ice_is_bit_set(r_bitmap, j))
1208 ice_set_bit(i, recipe_to_profile[j]);
1213 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1214 * @hw: pointer to the HW struct
1215 * @recp_list: pointer to sw recipe list
1217 * Allocate memory for the entire recipe table and initialize the structures/
1218 * entries corresponding to basic recipes.
1221 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1223 struct ice_sw_recipe *recps;
1226 recps = (struct ice_sw_recipe *)
1227 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1229 return ICE_ERR_NO_MEMORY;
1231 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1232 recps[i].root_rid = i;
1233 INIT_LIST_HEAD(&recps[i].filt_rules);
1234 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1235 INIT_LIST_HEAD(&recps[i].rg_list);
1236 ice_init_lock(&recps[i].filt_rule_lock);
1245 * ice_aq_get_sw_cfg - get switch configuration
1246 * @hw: pointer to the hardware structure
1247 * @buf: pointer to the result buffer
1248 * @buf_size: length of the buffer available for response
1249 * @req_desc: pointer to requested descriptor
1250 * @num_elems: pointer to number of elements
1251 * @cd: pointer to command details structure or NULL
1253 * Get switch configuration (0x0200) to be placed in 'buff'.
1254 * This admin command returns information such as initial VSI/port number
1255 * and switch ID it belongs to.
1257 * NOTE: *req_desc is both an input/output parameter.
1258 * The caller of this function first calls this function with *request_desc set
1259 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1260 * configuration information has been returned; if non-zero (meaning not all
1261 * the information was returned), the caller should call this function again
1262 * with *req_desc set to the previous value returned by f/w to get the
1263 * next block of switch configuration information.
1265 * *num_elems is output only parameter. This reflects the number of elements
1266 * in response buffer. The caller of this function to use *num_elems while
1267 * parsing the response buffer.
1269 static enum ice_status
1270 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1271 u16 buf_size, u16 *req_desc, u16 *num_elems,
1272 struct ice_sq_cd *cd)
1274 struct ice_aqc_get_sw_cfg *cmd;
1275 enum ice_status status;
1276 struct ice_aq_desc desc;
1278 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1279 cmd = &desc.params.get_sw_conf;
1280 cmd->element = CPU_TO_LE16(*req_desc);
1282 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1284 *req_desc = LE16_TO_CPU(cmd->element);
1285 *num_elems = LE16_TO_CPU(cmd->num_elems);
1292 * ice_alloc_sw - allocate resources specific to switch
1293 * @hw: pointer to the HW struct
1294 * @ena_stats: true to turn on VEB stats
1295 * @shared_res: true for shared resource, false for dedicated resource
1296 * @sw_id: switch ID returned
1297 * @counter_id: VEB counter ID returned
1299 * allocates switch resources (SWID and VEB counter) (0x0208)
1302 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1305 struct ice_aqc_alloc_free_res_elem *sw_buf;
1306 struct ice_aqc_res_elem *sw_ele;
1307 enum ice_status status;
1310 buf_len = sizeof(*sw_buf);
1311 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1312 ice_malloc(hw, buf_len);
1314 return ICE_ERR_NO_MEMORY;
1316 /* Prepare buffer for switch ID.
1317 * The number of resource entries in buffer is passed as 1 since only a
1318 * single switch/VEB instance is allocated, and hence a single sw_id
1321 sw_buf->num_elems = CPU_TO_LE16(1);
1323 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1324 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1325 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1327 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1328 ice_aqc_opc_alloc_res, NULL);
1331 goto ice_alloc_sw_exit;
1333 sw_ele = &sw_buf->elem[0];
1334 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1337 /* Prepare buffer for VEB Counter */
1338 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1339 struct ice_aqc_alloc_free_res_elem *counter_buf;
1340 struct ice_aqc_res_elem *counter_ele;
1342 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1343 ice_malloc(hw, buf_len);
1345 status = ICE_ERR_NO_MEMORY;
1346 goto ice_alloc_sw_exit;
1349 /* The number of resource entries in buffer is passed as 1 since
1350 * only a single switch/VEB instance is allocated, and hence a
1351 * single VEB counter is requested.
1353 counter_buf->num_elems = CPU_TO_LE16(1);
1354 counter_buf->res_type =
1355 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1356 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1357 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1361 ice_free(hw, counter_buf);
1362 goto ice_alloc_sw_exit;
1364 counter_ele = &counter_buf->elem[0];
1365 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1366 ice_free(hw, counter_buf);
1370 ice_free(hw, sw_buf);
1375 * ice_free_sw - free resources specific to switch
1376 * @hw: pointer to the HW struct
1377 * @sw_id: switch ID returned
1378 * @counter_id: VEB counter ID returned
1380 * free switch resources (SWID and VEB counter) (0x0209)
1382 * NOTE: This function frees multiple resources. It continues
1383 * releasing other resources even after it encounters error.
1384 * The error code returned is the last error it encountered.
1386 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1388 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1389 enum ice_status status, ret_status;
1392 buf_len = sizeof(*sw_buf);
1393 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1394 ice_malloc(hw, buf_len);
1396 return ICE_ERR_NO_MEMORY;
1398 /* Prepare buffer to free for switch ID res.
1399 * The number of resource entries in buffer is passed as 1 since only a
1400 * single switch/VEB instance is freed, and hence a single sw_id
1403 sw_buf->num_elems = CPU_TO_LE16(1);
1404 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1405 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1407 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1408 ice_aqc_opc_free_res, NULL);
1411 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1413 /* Prepare buffer to free for VEB Counter resource */
1414 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1415 ice_malloc(hw, buf_len);
1417 ice_free(hw, sw_buf);
1418 return ICE_ERR_NO_MEMORY;
1421 /* The number of resource entries in buffer is passed as 1 since only a
1422 * single switch/VEB instance is freed, and hence a single VEB counter
1425 counter_buf->num_elems = CPU_TO_LE16(1);
1426 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1427 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1429 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1430 ice_aqc_opc_free_res, NULL);
1432 ice_debug(hw, ICE_DBG_SW,
1433 "VEB counter resource could not be freed\n");
1434 ret_status = status;
1437 ice_free(hw, counter_buf);
1438 ice_free(hw, sw_buf);
1444 * @hw: pointer to the HW struct
1445 * @vsi_ctx: pointer to a VSI context struct
1446 * @cd: pointer to command details structure or NULL
1448 * Add a VSI context to the hardware (0x0210)
1451 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1452 struct ice_sq_cd *cd)
1454 struct ice_aqc_add_update_free_vsi_resp *res;
1455 struct ice_aqc_add_get_update_free_vsi *cmd;
1456 struct ice_aq_desc desc;
1457 enum ice_status status;
1459 cmd = &desc.params.vsi_cmd;
1460 res = &desc.params.add_update_free_vsi_res;
1462 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1464 if (!vsi_ctx->alloc_from_pool)
1465 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1466 ICE_AQ_VSI_IS_VALID);
1468 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1470 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1472 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1473 sizeof(vsi_ctx->info), cd);
1476 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1477 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1478 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1486 * @hw: pointer to the HW struct
1487 * @vsi_ctx: pointer to a VSI context struct
1488 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1489 * @cd: pointer to command details structure or NULL
1491 * Free VSI context info from hardware (0x0213)
1494 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1495 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1497 struct ice_aqc_add_update_free_vsi_resp *resp;
1498 struct ice_aqc_add_get_update_free_vsi *cmd;
1499 struct ice_aq_desc desc;
1500 enum ice_status status;
1502 cmd = &desc.params.vsi_cmd;
1503 resp = &desc.params.add_update_free_vsi_res;
1505 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1507 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1509 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1511 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1513 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1514 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1522 * @hw: pointer to the HW struct
1523 * @vsi_ctx: pointer to a VSI context struct
1524 * @cd: pointer to command details structure or NULL
1526 * Update VSI context in the hardware (0x0211)
1529 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1530 struct ice_sq_cd *cd)
1532 struct ice_aqc_add_update_free_vsi_resp *resp;
1533 struct ice_aqc_add_get_update_free_vsi *cmd;
1534 struct ice_aq_desc desc;
1535 enum ice_status status;
1537 cmd = &desc.params.vsi_cmd;
1538 resp = &desc.params.add_update_free_vsi_res;
1540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1542 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1544 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1546 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1547 sizeof(vsi_ctx->info), cd);
1550 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1551 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1558 * ice_is_vsi_valid - check whether the VSI is valid or not
1559 * @hw: pointer to the HW struct
1560 * @vsi_handle: VSI handle
1562 * check whether the VSI is valid or not
1564 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1566 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1570 * ice_get_hw_vsi_num - return the HW VSI number
1571 * @hw: pointer to the HW struct
1572 * @vsi_handle: VSI handle
1574 * return the HW VSI number
1575 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1577 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1579 return hw->vsi_ctx[vsi_handle]->vsi_num;
1583 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1584 * @hw: pointer to the HW struct
1585 * @vsi_handle: VSI handle
1587 * return the VSI context entry for a given VSI handle
1589 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1591 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1595 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1596 * @hw: pointer to the HW struct
1597 * @vsi_handle: VSI handle
1598 * @vsi: VSI context pointer
1600 * save the VSI context entry for a given VSI handle
1603 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1605 hw->vsi_ctx[vsi_handle] = vsi;
1609 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1610 * @hw: pointer to the HW struct
1611 * @vsi_handle: VSI handle
1613 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1615 struct ice_vsi_ctx *vsi;
1618 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1621 ice_for_each_traffic_class(i) {
1622 if (vsi->lan_q_ctx[i]) {
1623 ice_free(hw, vsi->lan_q_ctx[i]);
1624 vsi->lan_q_ctx[i] = NULL;
1630 * ice_clear_vsi_ctx - clear the VSI context entry
1631 * @hw: pointer to the HW struct
1632 * @vsi_handle: VSI handle
1634 * clear the VSI context entry
1636 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1638 struct ice_vsi_ctx *vsi;
1640 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1642 ice_clear_vsi_q_ctx(hw, vsi_handle);
1644 hw->vsi_ctx[vsi_handle] = NULL;
1649 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1650 * @hw: pointer to the HW struct
1652 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1656 for (i = 0; i < ICE_MAX_VSI; i++)
1657 ice_clear_vsi_ctx(hw, i);
1661 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1662 * @hw: pointer to the HW struct
1663 * @vsi_handle: unique VSI handle provided by drivers
1664 * @vsi_ctx: pointer to a VSI context struct
1665 * @cd: pointer to command details structure or NULL
1667 * Add a VSI context to the hardware also add it into the VSI handle list.
1668 * If this function gets called after reset for existing VSIs then update
1669 * with the new HW VSI number in the corresponding VSI handle list entry.
1672 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1673 struct ice_sq_cd *cd)
1675 struct ice_vsi_ctx *tmp_vsi_ctx;
1676 enum ice_status status;
1678 if (vsi_handle >= ICE_MAX_VSI)
1679 return ICE_ERR_PARAM;
1680 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1683 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1685 /* Create a new VSI context */
1686 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1687 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1689 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1690 return ICE_ERR_NO_MEMORY;
1692 *tmp_vsi_ctx = *vsi_ctx;
1694 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1696 /* update with new HW VSI num */
1697 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1704 * ice_free_vsi- free VSI context from hardware and VSI handle list
1705 * @hw: pointer to the HW struct
1706 * @vsi_handle: unique VSI handle
1707 * @vsi_ctx: pointer to a VSI context struct
1708 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1709 * @cd: pointer to command details structure or NULL
1711 * Free VSI context info from hardware as well as from VSI handle list
1714 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1715 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1717 enum ice_status status;
1719 if (!ice_is_vsi_valid(hw, vsi_handle))
1720 return ICE_ERR_PARAM;
1721 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1722 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1724 ice_clear_vsi_ctx(hw, vsi_handle);
1730 * @hw: pointer to the HW struct
1731 * @vsi_handle: unique VSI handle
1732 * @vsi_ctx: pointer to a VSI context struct
1733 * @cd: pointer to command details structure or NULL
1735 * Update VSI context in the hardware
1738 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1739 struct ice_sq_cd *cd)
1741 if (!ice_is_vsi_valid(hw, vsi_handle))
1742 return ICE_ERR_PARAM;
1743 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1744 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1748 * ice_aq_get_vsi_params
1749 * @hw: pointer to the HW struct
1750 * @vsi_ctx: pointer to a VSI context struct
1751 * @cd: pointer to command details structure or NULL
1753 * Get VSI context info from hardware (0x0212)
1756 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1757 struct ice_sq_cd *cd)
1759 struct ice_aqc_add_get_update_free_vsi *cmd;
1760 struct ice_aqc_get_vsi_resp *resp;
1761 struct ice_aq_desc desc;
1762 enum ice_status status;
1764 cmd = &desc.params.vsi_cmd;
1765 resp = &desc.params.get_vsi_resp;
1767 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1769 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1771 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1772 sizeof(vsi_ctx->info), cd);
1774 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1776 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1777 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1784 * ice_aq_add_update_mir_rule - add/update a mirror rule
1785 * @hw: pointer to the HW struct
1786 * @rule_type: Rule Type
1787 * @dest_vsi: VSI number to which packets will be mirrored
1788 * @count: length of the list
1789 * @mr_buf: buffer for list of mirrored VSI numbers
1790 * @cd: pointer to command details structure or NULL
1793 * Add/Update Mirror Rule (0x260).
1796 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1797 u16 count, struct ice_mir_rule_buf *mr_buf,
1798 struct ice_sq_cd *cd, u16 *rule_id)
1800 struct ice_aqc_add_update_mir_rule *cmd;
1801 struct ice_aq_desc desc;
1802 enum ice_status status;
1803 __le16 *mr_list = NULL;
1806 switch (rule_type) {
1807 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1808 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1809 /* Make sure count and mr_buf are set for these rule_types */
1810 if (!(count && mr_buf))
1811 return ICE_ERR_PARAM;
1813 buf_size = count * sizeof(__le16);
1814 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1816 return ICE_ERR_NO_MEMORY;
1818 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1819 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1820 /* Make sure count and mr_buf are not set for these
1823 if (count || mr_buf)
1824 return ICE_ERR_PARAM;
1827 ice_debug(hw, ICE_DBG_SW,
1828 "Error due to unsupported rule_type %u\n", rule_type);
1829 return ICE_ERR_OUT_OF_RANGE;
1832 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1834 /* Pre-process 'mr_buf' items for add/update of virtual port
1835 * ingress/egress mirroring (but not physical port ingress/egress
1841 for (i = 0; i < count; i++) {
1844 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1846 /* Validate specified VSI number, make sure it is less
1847 * than ICE_MAX_VSI, if not return with error.
1849 if (id >= ICE_MAX_VSI) {
1850 ice_debug(hw, ICE_DBG_SW,
1851 "Error VSI index (%u) out-of-range\n",
1853 ice_free(hw, mr_list);
1854 return ICE_ERR_OUT_OF_RANGE;
1857 /* add VSI to mirror rule */
1860 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1861 else /* remove VSI from mirror rule */
1862 mr_list[i] = CPU_TO_LE16(id);
1866 cmd = &desc.params.add_update_rule;
1867 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1868 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1869 ICE_AQC_RULE_ID_VALID_M);
1870 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1871 cmd->num_entries = CPU_TO_LE16(count);
1872 cmd->dest = CPU_TO_LE16(dest_vsi);
1874 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1876 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1878 ice_free(hw, mr_list);
1884 * ice_aq_delete_mir_rule - delete a mirror rule
1885 * @hw: pointer to the HW struct
1886 * @rule_id: Mirror rule ID (to be deleted)
1887 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1888 * otherwise it is returned to the shared pool
1889 * @cd: pointer to command details structure or NULL
1891 * Delete Mirror Rule (0x261).
1894 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1895 struct ice_sq_cd *cd)
1897 struct ice_aqc_delete_mir_rule *cmd;
1898 struct ice_aq_desc desc;
1900 /* rule_id should be in the range 0...63 */
1901 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1902 return ICE_ERR_OUT_OF_RANGE;
1904 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1906 cmd = &desc.params.del_rule;
1907 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1908 cmd->rule_id = CPU_TO_LE16(rule_id);
1911 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1913 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1917 * ice_aq_alloc_free_vsi_list
1918 * @hw: pointer to the HW struct
1919 * @vsi_list_id: VSI list ID returned or used for lookup
1920 * @lkup_type: switch rule filter lookup type
1921 * @opc: switch rules population command type - pass in the command opcode
1923 * allocates or free a VSI list resource
1925 static enum ice_status
1926 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1927 enum ice_sw_lkup_type lkup_type,
1928 enum ice_adminq_opc opc)
1930 struct ice_aqc_alloc_free_res_elem *sw_buf;
1931 struct ice_aqc_res_elem *vsi_ele;
1932 enum ice_status status;
1935 buf_len = sizeof(*sw_buf);
1936 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1937 ice_malloc(hw, buf_len);
1939 return ICE_ERR_NO_MEMORY;
1940 sw_buf->num_elems = CPU_TO_LE16(1);
1942 if (lkup_type == ICE_SW_LKUP_MAC ||
1943 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1944 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1945 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1946 lkup_type == ICE_SW_LKUP_PROMISC ||
1947 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1948 lkup_type == ICE_SW_LKUP_LAST) {
1949 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1950 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1952 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1954 status = ICE_ERR_PARAM;
1955 goto ice_aq_alloc_free_vsi_list_exit;
1958 if (opc == ice_aqc_opc_free_res)
1959 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1961 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1963 goto ice_aq_alloc_free_vsi_list_exit;
1965 if (opc == ice_aqc_opc_alloc_res) {
1966 vsi_ele = &sw_buf->elem[0];
1967 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1970 ice_aq_alloc_free_vsi_list_exit:
1971 ice_free(hw, sw_buf);
1976 * ice_aq_set_storm_ctrl - Sets storm control configuration
1977 * @hw: pointer to the HW struct
1978 * @bcast_thresh: represents the upper threshold for broadcast storm control
1979 * @mcast_thresh: represents the upper threshold for multicast storm control
1980 * @ctl_bitmask: storm control control knobs
1982 * Sets the storm control configuration (0x0280)
1985 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1988 struct ice_aqc_storm_cfg *cmd;
1989 struct ice_aq_desc desc;
1991 cmd = &desc.params.storm_conf;
1993 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1995 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1996 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1997 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1999 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2003 * ice_aq_get_storm_ctrl - gets storm control configuration
2004 * @hw: pointer to the HW struct
2005 * @bcast_thresh: represents the upper threshold for broadcast storm control
2006 * @mcast_thresh: represents the upper threshold for multicast storm control
2007 * @ctl_bitmask: storm control control knobs
2009 * Gets the storm control configuration (0x0281)
2012 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2015 enum ice_status status;
2016 struct ice_aq_desc desc;
2018 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2020 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2022 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2025 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2028 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2031 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2038 * ice_aq_sw_rules - add/update/remove switch rules
2039 * @hw: pointer to the HW struct
2040 * @rule_list: pointer to switch rule population list
2041 * @rule_list_sz: total size of the rule list in bytes
2042 * @num_rules: number of switch rules in the rule_list
2043 * @opc: switch rules population command type - pass in the command opcode
2044 * @cd: pointer to command details structure or NULL
2046 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2048 static enum ice_status
2049 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2050 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2052 struct ice_aq_desc desc;
2053 enum ice_status status;
2055 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2057 if (opc != ice_aqc_opc_add_sw_rules &&
2058 opc != ice_aqc_opc_update_sw_rules &&
2059 opc != ice_aqc_opc_remove_sw_rules)
2060 return ICE_ERR_PARAM;
2062 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2064 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2065 desc.params.sw_rules.num_rules_fltr_entry_index =
2066 CPU_TO_LE16(num_rules);
2067 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2068 if (opc != ice_aqc_opc_add_sw_rules &&
2069 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2070 status = ICE_ERR_DOES_NOT_EXIST;
2076 * ice_aq_add_recipe - add switch recipe
2077 * @hw: pointer to the HW struct
2078 * @s_recipe_list: pointer to switch rule population list
2079 * @num_recipes: number of switch recipes in the list
2080 * @cd: pointer to command details structure or NULL
2085 ice_aq_add_recipe(struct ice_hw *hw,
2086 struct ice_aqc_recipe_data_elem *s_recipe_list,
2087 u16 num_recipes, struct ice_sq_cd *cd)
2089 struct ice_aqc_add_get_recipe *cmd;
2090 struct ice_aq_desc desc;
2093 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2094 cmd = &desc.params.add_get_recipe;
2095 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2097 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2098 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2100 buf_size = num_recipes * sizeof(*s_recipe_list);
2102 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2106 * ice_aq_get_recipe - get switch recipe
2107 * @hw: pointer to the HW struct
2108 * @s_recipe_list: pointer to switch rule population list
2109 * @num_recipes: pointer to the number of recipes (input and output)
2110 * @recipe_root: root recipe number of recipe(s) to retrieve
2111 * @cd: pointer to command details structure or NULL
2115 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2116 * On output, *num_recipes will equal the number of entries returned in
2119 * The caller must supply enough space in s_recipe_list to hold all possible
2120 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2123 ice_aq_get_recipe(struct ice_hw *hw,
2124 struct ice_aqc_recipe_data_elem *s_recipe_list,
2125 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2127 struct ice_aqc_add_get_recipe *cmd;
2128 struct ice_aq_desc desc;
2129 enum ice_status status;
2132 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2133 return ICE_ERR_PARAM;
2135 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2136 cmd = &desc.params.add_get_recipe;
2137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2139 cmd->return_index = CPU_TO_LE16(recipe_root);
2140 cmd->num_sub_recipes = 0;
2142 buf_size = *num_recipes * sizeof(*s_recipe_list);
2144 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2145 /* cppcheck-suppress constArgument */
2146 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2152 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2153 * @hw: pointer to the HW struct
2154 * @profile_id: package profile ID to associate the recipe with
2155 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2156 * @cd: pointer to command details structure or NULL
2157 * Recipe to profile association (0x0291)
2160 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2161 struct ice_sq_cd *cd)
2163 struct ice_aqc_recipe_to_profile *cmd;
2164 struct ice_aq_desc desc;
2166 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2167 cmd = &desc.params.recipe_to_profile;
2168 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2169 cmd->profile_id = CPU_TO_LE16(profile_id);
2170 /* Set the recipe ID bit in the bitmask to let the device know which
2171 * profile we are associating the recipe to
2173 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2174 ICE_NONDMA_TO_NONDMA);
2176 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2180 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2181 * @hw: pointer to the HW struct
2182 * @profile_id: package profile ID to associate the recipe with
2183 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2184 * @cd: pointer to command details structure or NULL
2185 * Associate profile ID with given recipe (0x0293)
2188 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2189 struct ice_sq_cd *cd)
2191 struct ice_aqc_recipe_to_profile *cmd;
2192 struct ice_aq_desc desc;
2193 enum ice_status status;
2195 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2196 cmd = &desc.params.recipe_to_profile;
2197 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2198 cmd->profile_id = CPU_TO_LE16(profile_id);
2200 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2202 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2203 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2209 * ice_alloc_recipe - add recipe resource
2210 * @hw: pointer to the hardware structure
2211 * @rid: recipe ID returned as response to AQ call
2213 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2215 struct ice_aqc_alloc_free_res_elem *sw_buf;
2216 enum ice_status status;
2219 buf_len = sizeof(*sw_buf);
2220 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2222 return ICE_ERR_NO_MEMORY;
2224 sw_buf->num_elems = CPU_TO_LE16(1);
2225 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2226 ICE_AQC_RES_TYPE_S) |
2227 ICE_AQC_RES_TYPE_FLAG_SHARED);
2228 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2229 ice_aqc_opc_alloc_res, NULL);
2231 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2232 ice_free(hw, sw_buf);
2237 /* ice_init_port_info - Initialize port_info with switch configuration data
2238 * @pi: pointer to port_info
2239 * @vsi_port_num: VSI number or port number
2240 * @type: Type of switch element (port or VSI)
2241 * @swid: switch ID of the switch the element is attached to
2242 * @pf_vf_num: PF or VF number
2243 * @is_vf: true if the element is a VF, false otherwise
2246 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2247 u16 swid, u16 pf_vf_num, bool is_vf)
2250 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2251 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2253 pi->pf_vf_num = pf_vf_num;
2255 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2256 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2259 ice_debug(pi->hw, ICE_DBG_SW,
2260 "incorrect VSI/port type received\n");
2265 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2266 * @hw: pointer to the hardware structure
2268 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2270 struct ice_aqc_get_sw_cfg_resp *rbuf;
2271 enum ice_status status;
2278 num_total_ports = 1;
2280 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2281 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2284 return ICE_ERR_NO_MEMORY;
2286 /* Multiple calls to ice_aq_get_sw_cfg may be required
2287 * to get all the switch configuration information. The need
2288 * for additional calls is indicated by ice_aq_get_sw_cfg
2289 * writing a non-zero value in req_desc
2292 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2293 &req_desc, &num_elems, NULL);
2298 for (i = 0; i < num_elems; i++) {
2299 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2300 u16 pf_vf_num, swid, vsi_port_num;
2304 ele = rbuf[i].elements;
2305 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2306 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2308 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2309 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2311 swid = LE16_TO_CPU(ele->swid);
2313 if (LE16_TO_CPU(ele->pf_vf_num) &
2314 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2317 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2318 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2321 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2322 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2323 if (j == num_total_ports) {
2324 ice_debug(hw, ICE_DBG_SW,
2325 "more ports than expected\n");
2326 status = ICE_ERR_CFG;
2329 ice_init_port_info(hw->port_info,
2330 vsi_port_num, res_type, swid,
2338 } while (req_desc && !status);
2341 ice_free(hw, (void *)rbuf);
2346 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2347 * @hw: pointer to the hardware structure
2348 * @fi: filter info structure to fill/update
2350 * This helper function populates the lb_en and lan_en elements of the provided
2351 * ice_fltr_info struct using the switch's type and characteristics of the
2352 * switch rule being configured.
2354 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2356 if ((fi->flag & ICE_FLTR_RX) &&
2357 (fi->fltr_act == ICE_FWD_TO_VSI ||
2358 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2359 fi->lkup_type == ICE_SW_LKUP_LAST)
2363 if ((fi->flag & ICE_FLTR_TX) &&
2364 (fi->fltr_act == ICE_FWD_TO_VSI ||
2365 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2366 fi->fltr_act == ICE_FWD_TO_Q ||
2367 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2368 /* Setting LB for prune actions will result in replicated
2369 * packets to the internal switch that will be dropped.
2371 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2374 /* Set lan_en to TRUE if
2375 * 1. The switch is a VEB AND
2377 * 2.1 The lookup is a directional lookup like ethertype,
2378 * promiscuous, ethertype-MAC, promiscuous-VLAN
2379 * and default-port OR
2380 * 2.2 The lookup is VLAN, OR
2381 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2382 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2386 * The switch is a VEPA.
2388 * In all other cases, the LAN enable has to be set to false.
2391 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2392 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2393 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2394 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2395 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2396 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2397 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2398 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2399 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2400 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2409 * ice_fill_sw_rule - Helper function to fill switch rule structure
2410 * @hw: pointer to the hardware structure
2411 * @f_info: entry containing packet forwarding information
2412 * @s_rule: switch rule structure to be filled in based on mac_entry
2413 * @opc: switch rules population command type - pass in the command opcode
2416 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2417 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2419 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2427 if (opc == ice_aqc_opc_remove_sw_rules) {
2428 s_rule->pdata.lkup_tx_rx.act = 0;
2429 s_rule->pdata.lkup_tx_rx.index =
2430 CPU_TO_LE16(f_info->fltr_rule_id);
2431 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2435 eth_hdr_sz = sizeof(dummy_eth_header);
2436 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2438 /* initialize the ether header with a dummy header */
2439 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2440 ice_fill_sw_info(hw, f_info);
2442 switch (f_info->fltr_act) {
2443 case ICE_FWD_TO_VSI:
2444 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2445 ICE_SINGLE_ACT_VSI_ID_M;
2446 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2447 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2448 ICE_SINGLE_ACT_VALID_BIT;
2450 case ICE_FWD_TO_VSI_LIST:
2451 act |= ICE_SINGLE_ACT_VSI_LIST;
2452 act |= (f_info->fwd_id.vsi_list_id <<
2453 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2454 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2455 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2456 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2457 ICE_SINGLE_ACT_VALID_BIT;
2460 act |= ICE_SINGLE_ACT_TO_Q;
2461 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2462 ICE_SINGLE_ACT_Q_INDEX_M;
2464 case ICE_DROP_PACKET:
2465 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2466 ICE_SINGLE_ACT_VALID_BIT;
2468 case ICE_FWD_TO_QGRP:
2469 q_rgn = f_info->qgrp_size > 0 ?
2470 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2471 act |= ICE_SINGLE_ACT_TO_Q;
2472 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2473 ICE_SINGLE_ACT_Q_INDEX_M;
2474 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2475 ICE_SINGLE_ACT_Q_REGION_M;
2482 act |= ICE_SINGLE_ACT_LB_ENABLE;
2484 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2486 switch (f_info->lkup_type) {
2487 case ICE_SW_LKUP_MAC:
2488 daddr = f_info->l_data.mac.mac_addr;
2490 case ICE_SW_LKUP_VLAN:
2491 vlan_id = f_info->l_data.vlan.vlan_id;
2492 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2493 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2494 act |= ICE_SINGLE_ACT_PRUNE;
2495 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2498 case ICE_SW_LKUP_ETHERTYPE_MAC:
2499 daddr = f_info->l_data.ethertype_mac.mac_addr;
2501 case ICE_SW_LKUP_ETHERTYPE:
2502 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2503 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2505 case ICE_SW_LKUP_MAC_VLAN:
2506 daddr = f_info->l_data.mac_vlan.mac_addr;
2507 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2509 case ICE_SW_LKUP_PROMISC_VLAN:
2510 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2512 case ICE_SW_LKUP_PROMISC:
2513 daddr = f_info->l_data.mac_vlan.mac_addr;
2519 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2520 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2521 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2523 /* Recipe set depending on lookup type */
2524 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2525 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2526 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2529 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2530 ICE_NONDMA_TO_NONDMA);
2532 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2533 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2534 *off = CPU_TO_BE16(vlan_id);
2537 /* Create the switch rule with the final dummy Ethernet header */
2538 if (opc != ice_aqc_opc_update_sw_rules)
2539 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2543 * ice_add_marker_act
2544 * @hw: pointer to the hardware structure
2545 * @m_ent: the management entry for which sw marker needs to be added
2546 * @sw_marker: sw marker to tag the Rx descriptor with
2547 * @l_id: large action resource ID
2549 * Create a large action to hold software marker and update the switch rule
2550 * entry pointed by m_ent with newly created large action
2552 static enum ice_status
2553 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2554 u16 sw_marker, u16 l_id)
2556 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2557 /* For software marker we need 3 large actions
2558 * 1. FWD action: FWD TO VSI or VSI LIST
2559 * 2. GENERIC VALUE action to hold the profile ID
2560 * 3. GENERIC VALUE action to hold the software marker ID
2562 const u16 num_lg_acts = 3;
2563 enum ice_status status;
2569 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2570 return ICE_ERR_PARAM;
2572 /* Create two back-to-back switch rules and submit them to the HW using
2573 * one memory buffer:
2577 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2578 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2579 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2581 return ICE_ERR_NO_MEMORY;
2583 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2585 /* Fill in the first switch rule i.e. large action */
2586 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2587 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2588 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2590 /* First action VSI forwarding or VSI list forwarding depending on how
2593 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2594 m_ent->fltr_info.fwd_id.hw_vsi_id;
2596 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2597 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2598 ICE_LG_ACT_VSI_LIST_ID_M;
2599 if (m_ent->vsi_count > 1)
2600 act |= ICE_LG_ACT_VSI_LIST;
2601 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2603 /* Second action descriptor type */
2604 act = ICE_LG_ACT_GENERIC;
2606 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2607 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2609 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2610 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2612 /* Third action Marker value */
2613 act |= ICE_LG_ACT_GENERIC;
2614 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2615 ICE_LG_ACT_GENERIC_VALUE_M;
2617 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2619 /* call the fill switch rule to fill the lookup Tx Rx structure */
2620 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2621 ice_aqc_opc_update_sw_rules);
2623 /* Update the action to point to the large action ID */
2624 rx_tx->pdata.lkup_tx_rx.act =
2625 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2626 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2627 ICE_SINGLE_ACT_PTR_VAL_M));
2629 /* Use the filter rule ID of the previously created rule with single
2630 * act. Once the update happens, hardware will treat this as large
2633 rx_tx->pdata.lkup_tx_rx.index =
2634 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2636 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2637 ice_aqc_opc_update_sw_rules, NULL);
2639 m_ent->lg_act_idx = l_id;
2640 m_ent->sw_marker_id = sw_marker;
2643 ice_free(hw, lg_act);
2648 * ice_add_counter_act - add/update filter rule with counter action
2649 * @hw: pointer to the hardware structure
2650 * @m_ent: the management entry for which counter needs to be added
2651 * @counter_id: VLAN counter ID returned as part of allocate resource
2652 * @l_id: large action resource ID
2654 static enum ice_status
2655 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2656 u16 counter_id, u16 l_id)
2658 struct ice_aqc_sw_rules_elem *lg_act;
2659 struct ice_aqc_sw_rules_elem *rx_tx;
2660 enum ice_status status;
2661 /* 2 actions will be added while adding a large action counter */
2662 const int num_acts = 2;
2669 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2670 return ICE_ERR_PARAM;
2672 /* Create two back-to-back switch rules and submit them to the HW using
2673 * one memory buffer:
2677 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2678 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2679 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2682 return ICE_ERR_NO_MEMORY;
2684 rx_tx = (struct ice_aqc_sw_rules_elem *)
2685 ((u8 *)lg_act + lg_act_size);
2687 /* Fill in the first switch rule i.e. large action */
2688 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2689 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2690 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2692 /* First action VSI forwarding or VSI list forwarding depending on how
2695 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2696 m_ent->fltr_info.fwd_id.hw_vsi_id;
2698 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2699 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2700 ICE_LG_ACT_VSI_LIST_ID_M;
2701 if (m_ent->vsi_count > 1)
2702 act |= ICE_LG_ACT_VSI_LIST;
2703 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2705 /* Second action counter ID */
2706 act = ICE_LG_ACT_STAT_COUNT;
2707 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2708 ICE_LG_ACT_STAT_COUNT_M;
2709 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2711 /* call the fill switch rule to fill the lookup Tx Rx structure */
2712 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2713 ice_aqc_opc_update_sw_rules);
2715 act = ICE_SINGLE_ACT_PTR;
2716 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2717 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2719 /* Use the filter rule ID of the previously created rule with single
2720 * act. Once the update happens, hardware will treat this as large
2723 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2724 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2726 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2727 ice_aqc_opc_update_sw_rules, NULL);
2729 m_ent->lg_act_idx = l_id;
2730 m_ent->counter_index = counter_id;
2733 ice_free(hw, lg_act);
2738 * ice_create_vsi_list_map
2739 * @hw: pointer to the hardware structure
2740 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2741 * @num_vsi: number of VSI handles in the array
2742 * @vsi_list_id: VSI list ID generated as part of allocate resource
2744 * Helper function to create a new entry of VSI list ID to VSI mapping
2745 * using the given VSI list ID
2747 static struct ice_vsi_list_map_info *
2748 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2751 struct ice_switch_info *sw = hw->switch_info;
2752 struct ice_vsi_list_map_info *v_map;
2755 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2760 v_map->vsi_list_id = vsi_list_id;
2762 for (i = 0; i < num_vsi; i++)
2763 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2765 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2770 * ice_update_vsi_list_rule
2771 * @hw: pointer to the hardware structure
2772 * @vsi_handle_arr: array of VSI handles to form a VSI list
2773 * @num_vsi: number of VSI handles in the array
2774 * @vsi_list_id: VSI list ID generated as part of allocate resource
2775 * @remove: Boolean value to indicate if this is a remove action
2776 * @opc: switch rules population command type - pass in the command opcode
2777 * @lkup_type: lookup type of the filter
2779 * Call AQ command to add a new switch rule or update existing switch rule
2780 * using the given VSI list ID
2782 static enum ice_status
2783 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2784 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2785 enum ice_sw_lkup_type lkup_type)
2787 struct ice_aqc_sw_rules_elem *s_rule;
2788 enum ice_status status;
2794 return ICE_ERR_PARAM;
2796 if (lkup_type == ICE_SW_LKUP_MAC ||
2797 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2798 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2799 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2800 lkup_type == ICE_SW_LKUP_PROMISC ||
2801 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2802 lkup_type == ICE_SW_LKUP_LAST)
2803 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2804 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2805 else if (lkup_type == ICE_SW_LKUP_VLAN)
2806 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2807 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2809 return ICE_ERR_PARAM;
2811 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2812 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2814 return ICE_ERR_NO_MEMORY;
2815 for (i = 0; i < num_vsi; i++) {
2816 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2817 status = ICE_ERR_PARAM;
2820 /* AQ call requires hw_vsi_id(s) */
2821 s_rule->pdata.vsi_list.vsi[i] =
2822 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2825 s_rule->type = CPU_TO_LE16(rule_type);
2826 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2827 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2829 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2832 ice_free(hw, s_rule);
2837 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2838 * @hw: pointer to the HW struct
2839 * @vsi_handle_arr: array of VSI handles to form a VSI list
2840 * @num_vsi: number of VSI handles in the array
2841 * @vsi_list_id: stores the ID of the VSI list to be created
2842 * @lkup_type: switch rule filter's lookup type
2844 static enum ice_status
2845 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2846 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2848 enum ice_status status;
2850 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2851 ice_aqc_opc_alloc_res);
2855 /* Update the newly created VSI list to include the specified VSIs */
2856 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2857 *vsi_list_id, false,
2858 ice_aqc_opc_add_sw_rules, lkup_type);
2862 * ice_create_pkt_fwd_rule
2863 * @hw: pointer to the hardware structure
2864 * @recp_list: corresponding filter management list
2865 * @f_entry: entry containing packet forwarding information
2867 * Create switch rule with given filter information and add an entry
2868 * to the corresponding filter management list to track this switch rule
2871 static enum ice_status
2872 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2873 struct ice_fltr_list_entry *f_entry)
2875 struct ice_fltr_mgmt_list_entry *fm_entry;
2876 struct ice_aqc_sw_rules_elem *s_rule;
2877 enum ice_status status;
2879 s_rule = (struct ice_aqc_sw_rules_elem *)
2880 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2882 return ICE_ERR_NO_MEMORY;
2883 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2884 ice_malloc(hw, sizeof(*fm_entry));
2886 status = ICE_ERR_NO_MEMORY;
2887 goto ice_create_pkt_fwd_rule_exit;
2890 fm_entry->fltr_info = f_entry->fltr_info;
2892 /* Initialize all the fields for the management entry */
2893 fm_entry->vsi_count = 1;
2894 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2895 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2896 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2898 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2899 ice_aqc_opc_add_sw_rules);
2901 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2902 ice_aqc_opc_add_sw_rules, NULL);
2904 ice_free(hw, fm_entry);
2905 goto ice_create_pkt_fwd_rule_exit;
2908 f_entry->fltr_info.fltr_rule_id =
2909 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2910 fm_entry->fltr_info.fltr_rule_id =
2911 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2913 /* The book keeping entries will get removed when base driver
2914 * calls remove filter AQ command
2916 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2918 ice_create_pkt_fwd_rule_exit:
2919 ice_free(hw, s_rule);
2924 * ice_update_pkt_fwd_rule
2925 * @hw: pointer to the hardware structure
2926 * @f_info: filter information for switch rule
2928 * Call AQ command to update a previously created switch rule with a
2931 static enum ice_status
2932 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2934 struct ice_aqc_sw_rules_elem *s_rule;
2935 enum ice_status status;
2937 s_rule = (struct ice_aqc_sw_rules_elem *)
2938 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2940 return ICE_ERR_NO_MEMORY;
2942 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2944 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2946 /* Update switch rule with new rule set to forward VSI list */
2947 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2948 ice_aqc_opc_update_sw_rules, NULL);
2950 ice_free(hw, s_rule);
2955 * ice_update_sw_rule_bridge_mode
2956 * @hw: pointer to the HW struct
2958 * Updates unicast switch filter rules based on VEB/VEPA mode
2960 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2962 struct ice_switch_info *sw = hw->switch_info;
2963 struct ice_fltr_mgmt_list_entry *fm_entry;
2964 enum ice_status status = ICE_SUCCESS;
2965 struct LIST_HEAD_TYPE *rule_head;
2966 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2968 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2969 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2971 ice_acquire_lock(rule_lock);
2972 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2974 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2975 u8 *addr = fi->l_data.mac.mac_addr;
2977 /* Update unicast Tx rules to reflect the selected
2980 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2981 (fi->fltr_act == ICE_FWD_TO_VSI ||
2982 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2983 fi->fltr_act == ICE_FWD_TO_Q ||
2984 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2985 status = ice_update_pkt_fwd_rule(hw, fi);
2991 ice_release_lock(rule_lock);
2997 * ice_add_update_vsi_list
2998 * @hw: pointer to the hardware structure
2999 * @m_entry: pointer to current filter management list entry
3000 * @cur_fltr: filter information from the book keeping entry
3001 * @new_fltr: filter information with the new VSI to be added
3003 * Call AQ command to add or update previously created VSI list with new VSI.
3005 * Helper function to do book keeping associated with adding filter information
3006 * The algorithm to do the book keeping is described below :
3007 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3008 * if only one VSI has been added till now
3009 * Allocate a new VSI list and add two VSIs
3010 * to this list using switch rule command
3011 * Update the previously created switch rule with the
3012 * newly created VSI list ID
3013 * if a VSI list was previously created
3014 * Add the new VSI to the previously created VSI list set
3015 * using the update switch rule command
3017 static enum ice_status
3018 ice_add_update_vsi_list(struct ice_hw *hw,
3019 struct ice_fltr_mgmt_list_entry *m_entry,
3020 struct ice_fltr_info *cur_fltr,
3021 struct ice_fltr_info *new_fltr)
3023 enum ice_status status = ICE_SUCCESS;
3024 u16 vsi_list_id = 0;
3026 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3027 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3028 return ICE_ERR_NOT_IMPL;
3030 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3031 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3032 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3033 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3034 return ICE_ERR_NOT_IMPL;
3036 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3037 /* Only one entry existed in the mapping and it was not already
3038 * a part of a VSI list. So, create a VSI list with the old and
3041 struct ice_fltr_info tmp_fltr;
3042 u16 vsi_handle_arr[2];
3044 /* A rule already exists with the new VSI being added */
3045 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3046 return ICE_ERR_ALREADY_EXISTS;
3048 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3049 vsi_handle_arr[1] = new_fltr->vsi_handle;
3050 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3052 new_fltr->lkup_type);
3056 tmp_fltr = *new_fltr;
3057 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3058 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3059 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3060 /* Update the previous switch rule of "MAC forward to VSI" to
3061 * "MAC fwd to VSI list"
3063 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3067 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3068 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3069 m_entry->vsi_list_info =
3070 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3073 /* If this entry was large action then the large action needs
3074 * to be updated to point to FWD to VSI list
3076 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3078 ice_add_marker_act(hw, m_entry,
3079 m_entry->sw_marker_id,
3080 m_entry->lg_act_idx);
3082 u16 vsi_handle = new_fltr->vsi_handle;
3083 enum ice_adminq_opc opcode;
3085 if (!m_entry->vsi_list_info)
3088 /* A rule already exists with the new VSI being added */
3089 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3092 /* Update the previously created VSI list set with
3093 * the new VSI ID passed in
3095 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3096 opcode = ice_aqc_opc_update_sw_rules;
3098 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3099 vsi_list_id, false, opcode,
3100 new_fltr->lkup_type);
3101 /* update VSI list mapping info with new VSI ID */
3103 ice_set_bit(vsi_handle,
3104 m_entry->vsi_list_info->vsi_map);
3107 m_entry->vsi_count++;
3112 * ice_find_rule_entry - Search a rule entry
3113 * @list_head: head of rule list
3114 * @f_info: rule information
3116 * Helper function to search for a given rule entry
3117 * Returns pointer to entry storing the rule if found
3119 static struct ice_fltr_mgmt_list_entry *
3120 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3121 struct ice_fltr_info *f_info)
3123 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3125 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3127 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3128 sizeof(f_info->l_data)) &&
3129 f_info->flag == list_itr->fltr_info.flag) {
3138 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3139 * @recp_list: VSI lists needs to be searched
3140 * @vsi_handle: VSI handle to be found in VSI list
3141 * @vsi_list_id: VSI list ID found containing vsi_handle
3143 * Helper function to search a VSI list with single entry containing given VSI
3144 * handle element. This can be extended further to search VSI list with more
3145 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3147 static struct ice_vsi_list_map_info *
3148 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3151 struct ice_vsi_list_map_info *map_info = NULL;
3152 struct LIST_HEAD_TYPE *list_head;
3154 list_head = &recp_list->filt_rules;
3155 if (recp_list->adv_rule) {
3156 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3158 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3159 ice_adv_fltr_mgmt_list_entry,
3161 if (list_itr->vsi_list_info) {
3162 map_info = list_itr->vsi_list_info;
3163 if (ice_is_bit_set(map_info->vsi_map,
3165 *vsi_list_id = map_info->vsi_list_id;
3171 struct ice_fltr_mgmt_list_entry *list_itr;
3173 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3174 ice_fltr_mgmt_list_entry,
3176 if (list_itr->vsi_count == 1 &&
3177 list_itr->vsi_list_info) {
3178 map_info = list_itr->vsi_list_info;
3179 if (ice_is_bit_set(map_info->vsi_map,
3181 *vsi_list_id = map_info->vsi_list_id;
3191 * ice_add_rule_internal - add rule for a given lookup type
3192 * @hw: pointer to the hardware structure
3193 * @recp_list: recipe list for which rule has to be added
3194 * @lport: logic port number on which function add rule
3195 * @f_entry: structure containing MAC forwarding information
3197 * Adds or updates the rule lists for a given recipe
3199 static enum ice_status
3200 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3201 u8 lport, struct ice_fltr_list_entry *f_entry)
3203 struct ice_fltr_info *new_fltr, *cur_fltr;
3204 struct ice_fltr_mgmt_list_entry *m_entry;
3205 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3206 enum ice_status status = ICE_SUCCESS;
3208 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3209 return ICE_ERR_PARAM;
3211 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3212 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3213 f_entry->fltr_info.fwd_id.hw_vsi_id =
3214 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3216 rule_lock = &recp_list->filt_rule_lock;
3218 ice_acquire_lock(rule_lock);
3219 new_fltr = &f_entry->fltr_info;
3220 if (new_fltr->flag & ICE_FLTR_RX)
3221 new_fltr->src = lport;
3222 else if (new_fltr->flag & ICE_FLTR_TX)
3224 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3226 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3228 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3229 goto exit_add_rule_internal;
3232 cur_fltr = &m_entry->fltr_info;
3233 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3235 exit_add_rule_internal:
3236 ice_release_lock(rule_lock);
3241 * ice_remove_vsi_list_rule
3242 * @hw: pointer to the hardware structure
3243 * @vsi_list_id: VSI list ID generated as part of allocate resource
3244 * @lkup_type: switch rule filter lookup type
3246 * The VSI list should be emptied before this function is called to remove the
3249 static enum ice_status
3250 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3251 enum ice_sw_lkup_type lkup_type)
3253 struct ice_aqc_sw_rules_elem *s_rule;
3254 enum ice_status status;
3257 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3258 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3260 return ICE_ERR_NO_MEMORY;
3262 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3263 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3265 /* Free the vsi_list resource that we allocated. It is assumed that the
3266 * list is empty at this point.
3268 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3269 ice_aqc_opc_free_res);
3271 ice_free(hw, s_rule);
3276 * ice_rem_update_vsi_list
3277 * @hw: pointer to the hardware structure
3278 * @vsi_handle: VSI handle of the VSI to remove
3279 * @fm_list: filter management entry for which the VSI list management needs to
3282 static enum ice_status
3283 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3284 struct ice_fltr_mgmt_list_entry *fm_list)
3286 enum ice_sw_lkup_type lkup_type;
3287 enum ice_status status = ICE_SUCCESS;
3290 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3291 fm_list->vsi_count == 0)
3292 return ICE_ERR_PARAM;
3294 /* A rule with the VSI being removed does not exist */
3295 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3296 return ICE_ERR_DOES_NOT_EXIST;
3298 lkup_type = fm_list->fltr_info.lkup_type;
3299 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3300 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3301 ice_aqc_opc_update_sw_rules,
3306 fm_list->vsi_count--;
3307 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3309 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3310 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3311 struct ice_vsi_list_map_info *vsi_list_info =
3312 fm_list->vsi_list_info;
3315 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3317 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3318 return ICE_ERR_OUT_OF_RANGE;
3320 /* Make sure VSI list is empty before removing it below */
3321 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3323 ice_aqc_opc_update_sw_rules,
3328 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3329 tmp_fltr_info.fwd_id.hw_vsi_id =
3330 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3331 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3332 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3334 ice_debug(hw, ICE_DBG_SW,
3335 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3336 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3340 fm_list->fltr_info = tmp_fltr_info;
3343 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3344 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3345 struct ice_vsi_list_map_info *vsi_list_info =
3346 fm_list->vsi_list_info;
3348 /* Remove the VSI list since it is no longer used */
3349 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3351 ice_debug(hw, ICE_DBG_SW,
3352 "Failed to remove VSI list %d, error %d\n",
3353 vsi_list_id, status);
3357 LIST_DEL(&vsi_list_info->list_entry);
3358 ice_free(hw, vsi_list_info);
3359 fm_list->vsi_list_info = NULL;
3366 * ice_remove_rule_internal - Remove a filter rule of a given type
3368 * @hw: pointer to the hardware structure
3369 * @recp_list: recipe list for which the rule needs to removed
3370 * @f_entry: rule entry containing filter information
3372 static enum ice_status
3373 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3374 struct ice_fltr_list_entry *f_entry)
3376 struct ice_fltr_mgmt_list_entry *list_elem;
3377 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3378 enum ice_status status = ICE_SUCCESS;
3379 bool remove_rule = false;
3382 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3383 return ICE_ERR_PARAM;
3384 f_entry->fltr_info.fwd_id.hw_vsi_id =
3385 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3387 rule_lock = &recp_list->filt_rule_lock;
3388 ice_acquire_lock(rule_lock);
3389 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3390 &f_entry->fltr_info);
3392 status = ICE_ERR_DOES_NOT_EXIST;
3396 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3398 } else if (!list_elem->vsi_list_info) {
3399 status = ICE_ERR_DOES_NOT_EXIST;
3401 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3402 /* a ref_cnt > 1 indicates that the vsi_list is being
3403 * shared by multiple rules. Decrement the ref_cnt and
3404 * remove this rule, but do not modify the list, as it
3405 * is in-use by other rules.
3407 list_elem->vsi_list_info->ref_cnt--;
3410 /* a ref_cnt of 1 indicates the vsi_list is only used
3411 * by one rule. However, the original removal request is only
3412 * for a single VSI. Update the vsi_list first, and only
3413 * remove the rule if there are no further VSIs in this list.
3415 vsi_handle = f_entry->fltr_info.vsi_handle;
3416 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3419 /* if VSI count goes to zero after updating the VSI list */
3420 if (list_elem->vsi_count == 0)
3425 /* Remove the lookup rule */
3426 struct ice_aqc_sw_rules_elem *s_rule;
3428 s_rule = (struct ice_aqc_sw_rules_elem *)
3429 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3431 status = ICE_ERR_NO_MEMORY;
3435 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3436 ice_aqc_opc_remove_sw_rules);
3438 status = ice_aq_sw_rules(hw, s_rule,
3439 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3440 ice_aqc_opc_remove_sw_rules, NULL);
3442 /* Remove a book keeping from the list */
3443 ice_free(hw, s_rule);
3448 LIST_DEL(&list_elem->list_entry);
3449 ice_free(hw, list_elem);
3452 ice_release_lock(rule_lock);
3457 * ice_aq_get_res_alloc - get allocated resources
3458 * @hw: pointer to the HW struct
3459 * @num_entries: pointer to u16 to store the number of resource entries returned
3460 * @buf: pointer to user-supplied buffer
3461 * @buf_size: size of buff
3462 * @cd: pointer to command details structure or NULL
3464 * The user-supplied buffer must be large enough to store the resource
3465 * information for all resource types. Each resource type is an
3466 * ice_aqc_get_res_resp_data_elem structure.
3469 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3470 u16 buf_size, struct ice_sq_cd *cd)
3472 struct ice_aqc_get_res_alloc *resp;
3473 enum ice_status status;
3474 struct ice_aq_desc desc;
3477 return ICE_ERR_BAD_PTR;
3479 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3480 return ICE_ERR_INVAL_SIZE;
3482 resp = &desc.params.get_res;
3484 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3485 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3487 if (!status && num_entries)
3488 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3494 * ice_aq_get_res_descs - get allocated resource descriptors
3495 * @hw: pointer to the hardware structure
3496 * @num_entries: number of resource entries in buffer
3497 * @buf: Indirect buffer to hold data parameters and response
3498 * @buf_size: size of buffer for indirect commands
3499 * @res_type: resource type
3500 * @res_shared: is resource shared
3501 * @desc_id: input - first desc ID to start; output - next desc ID
3502 * @cd: pointer to command details structure or NULL
3505 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3506 struct ice_aqc_get_allocd_res_desc_resp *buf,
3507 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3508 struct ice_sq_cd *cd)
3510 struct ice_aqc_get_allocd_res_desc *cmd;
3511 struct ice_aq_desc desc;
3512 enum ice_status status;
3514 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3516 cmd = &desc.params.get_res_desc;
3519 return ICE_ERR_PARAM;
3521 if (buf_size != (num_entries * sizeof(*buf)))
3522 return ICE_ERR_PARAM;
3524 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3526 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3527 ICE_AQC_RES_TYPE_M) | (res_shared ?
3528 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3529 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3531 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3533 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3539 * ice_add_mac_rule - Add a MAC address based filter rule
3540 * @hw: pointer to the hardware structure
3541 * @m_list: list of MAC addresses and forwarding information
3542 * @sw: pointer to switch info struct for which function add rule
3543 * @lport: logic port number on which function add rule
3545 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3546 * multiple unicast addresses, the function assumes that all the
3547 * addresses are unique in a given add_mac call. It doesn't
3548 * check for duplicates in this case, removing duplicates from a given
3549 * list should be taken care of in the caller of this function.
3551 static enum ice_status
3552 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3553 struct ice_switch_info *sw, u8 lport)
3555 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3556 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3557 struct ice_fltr_list_entry *m_list_itr;
3558 struct LIST_HEAD_TYPE *rule_head;
3559 u16 total_elem_left, s_rule_size;
3560 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3561 enum ice_status status = ICE_SUCCESS;
3562 u16 num_unicast = 0;
3566 rule_lock = &recp_list->filt_rule_lock;
3567 rule_head = &recp_list->filt_rules;
3569 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3571 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3575 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3576 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3577 if (!ice_is_vsi_valid(hw, vsi_handle))
3578 return ICE_ERR_PARAM;
3579 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3580 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3581 /* update the src in case it is VSI num */
3582 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3583 return ICE_ERR_PARAM;
3584 m_list_itr->fltr_info.src = hw_vsi_id;
3585 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3586 IS_ZERO_ETHER_ADDR(add))
3587 return ICE_ERR_PARAM;
3588 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3589 /* Don't overwrite the unicast address */
3590 ice_acquire_lock(rule_lock);
3591 if (ice_find_rule_entry(rule_head,
3592 &m_list_itr->fltr_info)) {
3593 ice_release_lock(rule_lock);
3594 return ICE_ERR_ALREADY_EXISTS;
3596 ice_release_lock(rule_lock);
3598 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3599 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3600 m_list_itr->status =
3601 ice_add_rule_internal(hw, recp_list, lport,
3603 if (m_list_itr->status)
3604 return m_list_itr->status;
3608 ice_acquire_lock(rule_lock);
3609 /* Exit if no suitable entries were found for adding bulk switch rule */
3611 status = ICE_SUCCESS;
3612 goto ice_add_mac_exit;
3615 /* Allocate switch rule buffer for the bulk update for unicast */
3616 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3617 s_rule = (struct ice_aqc_sw_rules_elem *)
3618 ice_calloc(hw, num_unicast, s_rule_size);
3620 status = ICE_ERR_NO_MEMORY;
3621 goto ice_add_mac_exit;
3625 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3627 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3628 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3630 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3631 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3632 ice_aqc_opc_add_sw_rules);
3633 r_iter = (struct ice_aqc_sw_rules_elem *)
3634 ((u8 *)r_iter + s_rule_size);
3638 /* Call AQ bulk switch rule update for all unicast addresses */
3640 /* Call AQ switch rule in AQ_MAX chunk */
3641 for (total_elem_left = num_unicast; total_elem_left > 0;
3642 total_elem_left -= elem_sent) {
3643 struct ice_aqc_sw_rules_elem *entry = r_iter;
3645 elem_sent = MIN_T(u8, total_elem_left,
3646 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3647 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3648 elem_sent, ice_aqc_opc_add_sw_rules,
3651 goto ice_add_mac_exit;
3652 r_iter = (struct ice_aqc_sw_rules_elem *)
3653 ((u8 *)r_iter + (elem_sent * s_rule_size));
3656 /* Fill up rule ID based on the value returned from FW */
3658 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3660 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3661 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3662 struct ice_fltr_mgmt_list_entry *fm_entry;
3664 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3665 f_info->fltr_rule_id =
3666 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3667 f_info->fltr_act = ICE_FWD_TO_VSI;
3668 /* Create an entry to track this MAC address */
3669 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3670 ice_malloc(hw, sizeof(*fm_entry));
3672 status = ICE_ERR_NO_MEMORY;
3673 goto ice_add_mac_exit;
3675 fm_entry->fltr_info = *f_info;
3676 fm_entry->vsi_count = 1;
3677 /* The book keeping entries will get removed when
3678 * base driver calls remove filter AQ command
3681 LIST_ADD(&fm_entry->list_entry, rule_head);
3682 r_iter = (struct ice_aqc_sw_rules_elem *)
3683 ((u8 *)r_iter + s_rule_size);
3688 ice_release_lock(rule_lock);
3690 ice_free(hw, s_rule);
3695 * ice_add_mac - Add a MAC address based filter rule
3696 * @hw: pointer to the hardware structure
3697 * @m_list: list of MAC addresses and forwarding information
3699 * Function add MAC rule for logical port from HW struct
3701 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3704 return ICE_ERR_PARAM;
3706 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3707 hw->port_info->lport);
3711 * ice_add_vlan_internal - Add one VLAN based filter rule
3712 * @hw: pointer to the hardware structure
3713 * @recp_list: recipe list for which rule has to be added
3714 * @f_entry: filter entry containing one VLAN information
3716 static enum ice_status
3717 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3718 struct ice_fltr_list_entry *f_entry)
3720 struct ice_fltr_mgmt_list_entry *v_list_itr;
3721 struct ice_fltr_info *new_fltr, *cur_fltr;
3722 enum ice_sw_lkup_type lkup_type;
3723 u16 vsi_list_id = 0, vsi_handle;
3724 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3725 enum ice_status status = ICE_SUCCESS;
3727 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3728 return ICE_ERR_PARAM;
3730 f_entry->fltr_info.fwd_id.hw_vsi_id =
3731 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3732 new_fltr = &f_entry->fltr_info;
3734 /* VLAN ID should only be 12 bits */
3735 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3736 return ICE_ERR_PARAM;
3738 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3739 return ICE_ERR_PARAM;
3741 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3742 lkup_type = new_fltr->lkup_type;
3743 vsi_handle = new_fltr->vsi_handle;
3744 rule_lock = &recp_list->filt_rule_lock;
3745 ice_acquire_lock(rule_lock);
3746 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3748 struct ice_vsi_list_map_info *map_info = NULL;
3750 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3751 /* All VLAN pruning rules use a VSI list. Check if
3752 * there is already a VSI list containing VSI that we
3753 * want to add. If found, use the same vsi_list_id for
3754 * this new VLAN rule or else create a new list.
3756 map_info = ice_find_vsi_list_entry(recp_list,
3760 status = ice_create_vsi_list_rule(hw,
3768 /* Convert the action to forwarding to a VSI list. */
3769 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3770 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3773 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3775 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3778 status = ICE_ERR_DOES_NOT_EXIST;
3781 /* reuse VSI list for new rule and increment ref_cnt */
3783 v_list_itr->vsi_list_info = map_info;
3784 map_info->ref_cnt++;
3786 v_list_itr->vsi_list_info =
3787 ice_create_vsi_list_map(hw, &vsi_handle,
3791 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3792 /* Update existing VSI list to add new VSI ID only if it used
3795 cur_fltr = &v_list_itr->fltr_info;
3796 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3799 /* If VLAN rule exists and VSI list being used by this rule is
3800 * referenced by more than 1 VLAN rule. Then create a new VSI
3801 * list appending previous VSI with new VSI and update existing
3802 * VLAN rule to point to new VSI list ID
3804 struct ice_fltr_info tmp_fltr;
3805 u16 vsi_handle_arr[2];
3808 /* Current implementation only supports reusing VSI list with
3809 * one VSI count. We should never hit below condition
3811 if (v_list_itr->vsi_count > 1 &&
3812 v_list_itr->vsi_list_info->ref_cnt > 1) {
3813 ice_debug(hw, ICE_DBG_SW,
3814 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3815 status = ICE_ERR_CFG;
3820 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3823 /* A rule already exists with the new VSI being added */
3824 if (cur_handle == vsi_handle) {
3825 status = ICE_ERR_ALREADY_EXISTS;
3829 vsi_handle_arr[0] = cur_handle;
3830 vsi_handle_arr[1] = vsi_handle;
3831 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3832 &vsi_list_id, lkup_type);
3836 tmp_fltr = v_list_itr->fltr_info;
3837 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3838 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3839 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3840 /* Update the previous switch rule to a new VSI list which
3841 * includes current VSI that is requested
3843 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3847 /* before overriding VSI list map info. decrement ref_cnt of
3850 v_list_itr->vsi_list_info->ref_cnt--;
3852 /* now update to newly created list */
3853 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3854 v_list_itr->vsi_list_info =
3855 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3857 v_list_itr->vsi_count++;
3861 ice_release_lock(rule_lock);
3866 * ice_add_vlan_rule - Add VLAN based filter rule
3867 * @hw: pointer to the hardware structure
3868 * @v_list: list of VLAN entries and forwarding information
3869 * @sw: pointer to switch info struct for which function add rule
3871 static enum ice_status
3872 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3873 struct ice_switch_info *sw)
3875 struct ice_fltr_list_entry *v_list_itr;
3876 struct ice_sw_recipe *recp_list;
3878 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3879 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3881 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3882 return ICE_ERR_PARAM;
3883 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3884 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3886 if (v_list_itr->status)
3887 return v_list_itr->status;
3893 * ice_add_vlan - Add a VLAN based filter rule
3894 * @hw: pointer to the hardware structure
3895 * @v_list: list of VLAN and forwarding information
3897 * Function add VLAN rule for logical port from HW struct
3899 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3902 return ICE_ERR_PARAM;
3904 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3908 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3909 * @hw: pointer to the hardware structure
3910 * @mv_list: list of MAC and VLAN filters
3911 * @sw: pointer to switch info struct for which function add rule
3912 * @lport: logic port number on which function add rule
3914 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3915 * pruning bits enabled, then it is the responsibility of the caller to make
3916 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3917 * VLAN won't be received on that VSI otherwise.
3919 static enum ice_status
3920 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3921 struct ice_switch_info *sw, u8 lport)
3923 struct ice_fltr_list_entry *mv_list_itr;
3924 struct ice_sw_recipe *recp_list;
3926 if (!mv_list || !hw)
3927 return ICE_ERR_PARAM;
3929 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3930 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3932 enum ice_sw_lkup_type l_type =
3933 mv_list_itr->fltr_info.lkup_type;
3935 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3936 return ICE_ERR_PARAM;
3937 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3938 mv_list_itr->status =
3939 ice_add_rule_internal(hw, recp_list, lport,
3941 if (mv_list_itr->status)
3942 return mv_list_itr->status;
3948 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3949 * @hw: pointer to the hardware structure
3950 * @mv_list: list of MAC VLAN addresses and forwarding information
3952 * Function add MAC VLAN rule for logical port from HW struct
3955 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3957 if (!mv_list || !hw)
3958 return ICE_ERR_PARAM;
3960 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3961 hw->port_info->lport);
3965 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3966 * @hw: pointer to the hardware structure
3967 * @em_list: list of ether type MAC filter, MAC is optional
3968 * @sw: pointer to switch info struct for which function add rule
3969 * @lport: logic port number on which function add rule
3971 * This function requires the caller to populate the entries in
3972 * the filter list with the necessary fields (including flags to
3973 * indicate Tx or Rx rules).
3975 static enum ice_status
3976 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3977 struct ice_switch_info *sw, u8 lport)
3979 struct ice_fltr_list_entry *em_list_itr;
3981 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3983 struct ice_sw_recipe *recp_list;
3984 enum ice_sw_lkup_type l_type;
3986 l_type = em_list_itr->fltr_info.lkup_type;
3987 recp_list = &sw->recp_list[l_type];
3989 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3990 l_type != ICE_SW_LKUP_ETHERTYPE)
3991 return ICE_ERR_PARAM;
3993 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3996 if (em_list_itr->status)
3997 return em_list_itr->status;
4003 * ice_add_eth_mac - Add a ethertype based filter rule
4004 * @hw: pointer to the hardware structure
4005 * @em_list: list of ethertype and forwarding information
4007 * Function add ethertype rule for logical port from HW struct
4010 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4012 if (!em_list || !hw)
4013 return ICE_ERR_PARAM;
4015 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4016 hw->port_info->lport);
4020 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4021 * @hw: pointer to the hardware structure
4022 * @em_list: list of ethertype or ethertype MAC entries
4023 * @sw: pointer to switch info struct for which function add rule
4025 static enum ice_status
4026 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4027 struct ice_switch_info *sw)
4029 struct ice_fltr_list_entry *em_list_itr, *tmp;
4031 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4033 struct ice_sw_recipe *recp_list;
4034 enum ice_sw_lkup_type l_type;
4036 l_type = em_list_itr->fltr_info.lkup_type;
4038 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4039 l_type != ICE_SW_LKUP_ETHERTYPE)
4040 return ICE_ERR_PARAM;
4042 recp_list = &sw->recp_list[l_type];
4043 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4045 if (em_list_itr->status)
4046 return em_list_itr->status;
4052 * ice_remove_eth_mac - remove a ethertype based filter rule
4053 * @hw: pointer to the hardware structure
4054 * @em_list: list of ethertype and forwarding information
4058 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4060 if (!em_list || !hw)
4061 return ICE_ERR_PARAM;
4063 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4067 * ice_rem_sw_rule_info
4068 * @hw: pointer to the hardware structure
4069 * @rule_head: pointer to the switch list structure that we want to delete
4072 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4074 if (!LIST_EMPTY(rule_head)) {
4075 struct ice_fltr_mgmt_list_entry *entry;
4076 struct ice_fltr_mgmt_list_entry *tmp;
4078 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4079 ice_fltr_mgmt_list_entry, list_entry) {
4080 LIST_DEL(&entry->list_entry);
4081 ice_free(hw, entry);
4087 * ice_rem_adv_rule_info
4088 * @hw: pointer to the hardware structure
4089 * @rule_head: pointer to the switch list structure that we want to delete
4092 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4094 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4095 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4097 if (LIST_EMPTY(rule_head))
4100 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4101 ice_adv_fltr_mgmt_list_entry, list_entry) {
4102 LIST_DEL(&lst_itr->list_entry);
4103 ice_free(hw, lst_itr->lkups);
4104 ice_free(hw, lst_itr);
4109 * ice_rem_all_sw_rules_info
4110 * @hw: pointer to the hardware structure
4112 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4114 struct ice_switch_info *sw = hw->switch_info;
4117 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4118 struct LIST_HEAD_TYPE *rule_head;
4120 rule_head = &sw->recp_list[i].filt_rules;
4121 if (!sw->recp_list[i].adv_rule)
4122 ice_rem_sw_rule_info(hw, rule_head);
4124 ice_rem_adv_rule_info(hw, rule_head);
4129 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4130 * @pi: pointer to the port_info structure
4131 * @vsi_handle: VSI handle to set as default
4132 * @set: true to add the above mentioned switch rule, false to remove it
4133 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4135 * add filter rule to set/unset given VSI as default VSI for the switch
4136 * (represented by swid)
4139 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4142 struct ice_aqc_sw_rules_elem *s_rule;
4143 struct ice_fltr_info f_info;
4144 struct ice_hw *hw = pi->hw;
4145 enum ice_adminq_opc opcode;
4146 enum ice_status status;
4150 if (!ice_is_vsi_valid(hw, vsi_handle))
4151 return ICE_ERR_PARAM;
4152 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4154 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4155 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4156 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4158 return ICE_ERR_NO_MEMORY;
4160 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4162 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4163 f_info.flag = direction;
4164 f_info.fltr_act = ICE_FWD_TO_VSI;
4165 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4167 if (f_info.flag & ICE_FLTR_RX) {
4168 f_info.src = pi->lport;
4169 f_info.src_id = ICE_SRC_ID_LPORT;
4171 f_info.fltr_rule_id =
4172 pi->dflt_rx_vsi_rule_id;
4173 } else if (f_info.flag & ICE_FLTR_TX) {
4174 f_info.src_id = ICE_SRC_ID_VSI;
4175 f_info.src = hw_vsi_id;
4177 f_info.fltr_rule_id =
4178 pi->dflt_tx_vsi_rule_id;
4182 opcode = ice_aqc_opc_add_sw_rules;
4184 opcode = ice_aqc_opc_remove_sw_rules;
4186 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4188 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4189 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4192 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4194 if (f_info.flag & ICE_FLTR_TX) {
4195 pi->dflt_tx_vsi_num = hw_vsi_id;
4196 pi->dflt_tx_vsi_rule_id = index;
4197 } else if (f_info.flag & ICE_FLTR_RX) {
4198 pi->dflt_rx_vsi_num = hw_vsi_id;
4199 pi->dflt_rx_vsi_rule_id = index;
4202 if (f_info.flag & ICE_FLTR_TX) {
4203 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4204 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4205 } else if (f_info.flag & ICE_FLTR_RX) {
4206 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4207 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4212 ice_free(hw, s_rule);
4217 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4218 * @list_head: head of rule list
4219 * @f_info: rule information
4221 * Helper function to search for a unicast rule entry - this is to be used
4222 * to remove unicast MAC filter that is not shared with other VSIs on the
4225 * Returns pointer to entry storing the rule if found
4227 static struct ice_fltr_mgmt_list_entry *
4228 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4229 struct ice_fltr_info *f_info)
4231 struct ice_fltr_mgmt_list_entry *list_itr;
4233 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4235 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4236 sizeof(f_info->l_data)) &&
4237 f_info->fwd_id.hw_vsi_id ==
4238 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4239 f_info->flag == list_itr->fltr_info.flag)
4246 * ice_remove_mac_rule - remove a MAC based filter rule
4247 * @hw: pointer to the hardware structure
4248 * @m_list: list of MAC addresses and forwarding information
4249 * @recp_list: list from which function remove MAC address
4251 * This function removes either a MAC filter rule or a specific VSI from a
4252 * VSI list for a multicast MAC address.
4254 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4255 * ice_add_mac. Caller should be aware that this call will only work if all
4256 * the entries passed into m_list were added previously. It will not attempt to
4257 * do a partial remove of entries that were found.
4259 static enum ice_status
4260 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4261 struct ice_sw_recipe *recp_list)
4263 struct ice_fltr_list_entry *list_itr, *tmp;
4264 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4267 return ICE_ERR_PARAM;
4269 rule_lock = &recp_list->filt_rule_lock;
4270 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4272 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4273 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4276 if (l_type != ICE_SW_LKUP_MAC)
4277 return ICE_ERR_PARAM;
4279 vsi_handle = list_itr->fltr_info.vsi_handle;
4280 if (!ice_is_vsi_valid(hw, vsi_handle))
4281 return ICE_ERR_PARAM;
4283 list_itr->fltr_info.fwd_id.hw_vsi_id =
4284 ice_get_hw_vsi_num(hw, vsi_handle);
4285 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4286 /* Don't remove the unicast address that belongs to
4287 * another VSI on the switch, since it is not being
4290 ice_acquire_lock(rule_lock);
4291 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4292 &list_itr->fltr_info)) {
4293 ice_release_lock(rule_lock);
4294 return ICE_ERR_DOES_NOT_EXIST;
4296 ice_release_lock(rule_lock);
4298 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4300 if (list_itr->status)
4301 return list_itr->status;
4307 * ice_remove_mac - remove a MAC address based filter rule
4308 * @hw: pointer to the hardware structure
4309 * @m_list: list of MAC addresses and forwarding information
4312 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4314 struct ice_sw_recipe *recp_list;
4316 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4317 return ice_remove_mac_rule(hw, m_list, recp_list);
4321 * ice_remove_vlan_rule - Remove VLAN based filter rule
4322 * @hw: pointer to the hardware structure
4323 * @v_list: list of VLAN entries and forwarding information
4324 * @recp_list: list from which function remove VLAN
4326 static enum ice_status
4327 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4328 struct ice_sw_recipe *recp_list)
4330 struct ice_fltr_list_entry *v_list_itr, *tmp;
4332 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4334 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4336 if (l_type != ICE_SW_LKUP_VLAN)
4337 return ICE_ERR_PARAM;
4338 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4340 if (v_list_itr->status)
4341 return v_list_itr->status;
4347 * ice_remove_vlan - remove a VLAN address based filter rule
4348 * @hw: pointer to the hardware structure
4349 * @v_list: list of VLAN and forwarding information
4353 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4355 struct ice_sw_recipe *recp_list;
4358 return ICE_ERR_PARAM;
4360 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4361 return ice_remove_vlan_rule(hw, v_list, recp_list);
4365 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4366 * @hw: pointer to the hardware structure
4367 * @v_list: list of MAC VLAN entries and forwarding information
4368 * @recp_list: list from which function remove MAC VLAN
4370 static enum ice_status
4371 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4372 struct ice_sw_recipe *recp_list)
4374 struct ice_fltr_list_entry *v_list_itr, *tmp;
4376 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4377 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4379 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4381 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4382 return ICE_ERR_PARAM;
4383 v_list_itr->status =
4384 ice_remove_rule_internal(hw, recp_list,
4386 if (v_list_itr->status)
4387 return v_list_itr->status;
4393 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4394 * @hw: pointer to the hardware structure
4395 * @mv_list: list of MAC VLAN and forwarding information
4398 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4400 struct ice_sw_recipe *recp_list;
4402 if (!mv_list || !hw)
4403 return ICE_ERR_PARAM;
4405 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4406 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4410 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4411 * @fm_entry: filter entry to inspect
4412 * @vsi_handle: VSI handle to compare with filter info
4415 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4417 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4418 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4419 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4420 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4425 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4426 * @hw: pointer to the hardware structure
4427 * @vsi_handle: VSI handle to remove filters from
4428 * @vsi_list_head: pointer to the list to add entry to
4429 * @fi: pointer to fltr_info of filter entry to copy & add
4431 * Helper function, used when creating a list of filters to remove from
4432 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4433 * original filter entry, with the exception of fltr_info.fltr_act and
4434 * fltr_info.fwd_id fields. These are set such that later logic can
4435 * extract which VSI to remove the fltr from, and pass on that information.
4437 static enum ice_status
4438 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4439 struct LIST_HEAD_TYPE *vsi_list_head,
4440 struct ice_fltr_info *fi)
4442 struct ice_fltr_list_entry *tmp;
4444 /* this memory is freed up in the caller function
4445 * once filters for this VSI are removed
4447 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4449 return ICE_ERR_NO_MEMORY;
4451 tmp->fltr_info = *fi;
4453 /* Overwrite these fields to indicate which VSI to remove filter from,
4454 * so find and remove logic can extract the information from the
4455 * list entries. Note that original entries will still have proper
4458 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4459 tmp->fltr_info.vsi_handle = vsi_handle;
4460 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4462 LIST_ADD(&tmp->list_entry, vsi_list_head);
4468 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4469 * @hw: pointer to the hardware structure
4470 * @vsi_handle: VSI handle to remove filters from
4471 * @lkup_list_head: pointer to the list that has certain lookup type filters
4472 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4474 * Locates all filters in lkup_list_head that are used by the given VSI,
4475 * and adds COPIES of those entries to vsi_list_head (intended to be used
4476 * to remove the listed filters).
4477 * Note that this means all entries in vsi_list_head must be explicitly
4478 * deallocated by the caller when done with list.
4480 static enum ice_status
4481 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4482 struct LIST_HEAD_TYPE *lkup_list_head,
4483 struct LIST_HEAD_TYPE *vsi_list_head)
4485 struct ice_fltr_mgmt_list_entry *fm_entry;
4486 enum ice_status status = ICE_SUCCESS;
4488 /* check to make sure VSI ID is valid and within boundary */
4489 if (!ice_is_vsi_valid(hw, vsi_handle))
4490 return ICE_ERR_PARAM;
4492 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4493 ice_fltr_mgmt_list_entry, list_entry) {
4494 struct ice_fltr_info *fi;
4496 fi = &fm_entry->fltr_info;
4497 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4500 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4509 * ice_determine_promisc_mask
4510 * @fi: filter info to parse
4512 * Helper function to determine which ICE_PROMISC_ mask corresponds
4513 * to given filter into.
4515 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4517 u16 vid = fi->l_data.mac_vlan.vlan_id;
4518 u8 *macaddr = fi->l_data.mac.mac_addr;
4519 bool is_tx_fltr = false;
4520 u8 promisc_mask = 0;
4522 if (fi->flag == ICE_FLTR_TX)
4525 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4526 promisc_mask |= is_tx_fltr ?
4527 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4528 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4529 promisc_mask |= is_tx_fltr ?
4530 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4531 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4532 promisc_mask |= is_tx_fltr ?
4533 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4535 promisc_mask |= is_tx_fltr ?
4536 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4538 return promisc_mask;
4542 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4543 * @hw: pointer to the hardware structure
4544 * @vsi_handle: VSI handle to retrieve info from
4545 * @promisc_mask: pointer to mask to be filled in
4546 * @vid: VLAN ID of promisc VLAN VSI
4547 * @sw: pointer to switch info struct for which function add rule
4549 static enum ice_status
4550 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4551 u16 *vid, struct ice_switch_info *sw)
4553 struct ice_fltr_mgmt_list_entry *itr;
4554 struct LIST_HEAD_TYPE *rule_head;
4555 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4557 if (!ice_is_vsi_valid(hw, vsi_handle))
4558 return ICE_ERR_PARAM;
4562 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4563 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4565 ice_acquire_lock(rule_lock);
4566 LIST_FOR_EACH_ENTRY(itr, rule_head,
4567 ice_fltr_mgmt_list_entry, list_entry) {
4568 /* Continue if this filter doesn't apply to this VSI or the
4569 * VSI ID is not in the VSI map for this filter
4571 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4574 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4576 ice_release_lock(rule_lock);
4582 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4583 * @hw: pointer to the hardware structure
4584 * @vsi_handle: VSI handle to retrieve info from
4585 * @promisc_mask: pointer to mask to be filled in
4586 * @vid: VLAN ID of promisc VLAN VSI
4589 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4592 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4593 vid, hw->switch_info);
4597 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4598 * @hw: pointer to the hardware structure
4599 * @vsi_handle: VSI handle to retrieve info from
4600 * @promisc_mask: pointer to mask to be filled in
4601 * @vid: VLAN ID of promisc VLAN VSI
4602 * @sw: pointer to switch info struct for which function add rule
4604 static enum ice_status
4605 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4606 u16 *vid, struct ice_switch_info *sw)
4608 struct ice_fltr_mgmt_list_entry *itr;
4609 struct LIST_HEAD_TYPE *rule_head;
4610 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4612 if (!ice_is_vsi_valid(hw, vsi_handle))
4613 return ICE_ERR_PARAM;
4617 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4618 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4620 ice_acquire_lock(rule_lock);
4621 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4623 /* Continue if this filter doesn't apply to this VSI or the
4624 * VSI ID is not in the VSI map for this filter
4626 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4629 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4631 ice_release_lock(rule_lock);
4637 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4638 * @hw: pointer to the hardware structure
4639 * @vsi_handle: VSI handle to retrieve info from
4640 * @promisc_mask: pointer to mask to be filled in
4641 * @vid: VLAN ID of promisc VLAN VSI
4644 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4647 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4648 vid, hw->switch_info);
4652 * ice_remove_promisc - Remove promisc based filter rules
4653 * @hw: pointer to the hardware structure
4654 * @recp_id: recipe ID for which the rule needs to removed
4655 * @v_list: list of promisc entries
4657 static enum ice_status
4658 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4659 struct LIST_HEAD_TYPE *v_list)
4661 struct ice_fltr_list_entry *v_list_itr, *tmp;
4662 struct ice_sw_recipe *recp_list;
4664 recp_list = &hw->switch_info->recp_list[recp_id];
4665 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4667 v_list_itr->status =
4668 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4669 if (v_list_itr->status)
4670 return v_list_itr->status;
4676 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4677 * @hw: pointer to the hardware structure
4678 * @vsi_handle: VSI handle to clear mode
4679 * @promisc_mask: mask of promiscuous config bits to clear
4680 * @vid: VLAN ID to clear VLAN promiscuous
4681 * @sw: pointer to switch info struct for which function add rule
4683 static enum ice_status
4684 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4685 u16 vid, struct ice_switch_info *sw)
4687 struct ice_fltr_list_entry *fm_entry, *tmp;
4688 struct LIST_HEAD_TYPE remove_list_head;
4689 struct ice_fltr_mgmt_list_entry *itr;
4690 struct LIST_HEAD_TYPE *rule_head;
4691 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4692 enum ice_status status = ICE_SUCCESS;
4695 if (!ice_is_vsi_valid(hw, vsi_handle))
4696 return ICE_ERR_PARAM;
4698 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4699 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4701 recipe_id = ICE_SW_LKUP_PROMISC;
4703 rule_head = &sw->recp_list[recipe_id].filt_rules;
4704 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4706 INIT_LIST_HEAD(&remove_list_head);
4708 ice_acquire_lock(rule_lock);
4709 LIST_FOR_EACH_ENTRY(itr, rule_head,
4710 ice_fltr_mgmt_list_entry, list_entry) {
4711 struct ice_fltr_info *fltr_info;
4712 u8 fltr_promisc_mask = 0;
4714 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4716 fltr_info = &itr->fltr_info;
4718 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4719 vid != fltr_info->l_data.mac_vlan.vlan_id)
4722 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4724 /* Skip if filter is not completely specified by given mask */
4725 if (fltr_promisc_mask & ~promisc_mask)
4728 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4732 ice_release_lock(rule_lock);
4733 goto free_fltr_list;
4736 ice_release_lock(rule_lock);
4738 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4741 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4742 ice_fltr_list_entry, list_entry) {
4743 LIST_DEL(&fm_entry->list_entry);
4744 ice_free(hw, fm_entry);
4751 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4752 * @hw: pointer to the hardware structure
4753 * @vsi_handle: VSI handle to clear mode
4754 * @promisc_mask: mask of promiscuous config bits to clear
4755 * @vid: VLAN ID to clear VLAN promiscuous
4758 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4759 u8 promisc_mask, u16 vid)
4761 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4762 vid, hw->switch_info);
4766 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4767 * @hw: pointer to the hardware structure
4768 * @vsi_handle: VSI handle to configure
4769 * @promisc_mask: mask of promiscuous config bits
4770 * @vid: VLAN ID to set VLAN promiscuous
4771 * @lport: logical port number to configure promisc mode
4772 * @sw: pointer to switch info struct for which function add rule
4774 static enum ice_status
4775 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4776 u16 vid, u8 lport, struct ice_switch_info *sw)
4778 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4779 struct ice_fltr_list_entry f_list_entry;
4780 struct ice_fltr_info new_fltr;
4781 enum ice_status status = ICE_SUCCESS;
4787 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4789 if (!ice_is_vsi_valid(hw, vsi_handle))
4790 return ICE_ERR_PARAM;
4791 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4793 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4795 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4796 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4797 new_fltr.l_data.mac_vlan.vlan_id = vid;
4798 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4800 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4801 recipe_id = ICE_SW_LKUP_PROMISC;
4804 /* Separate filters must be set for each direction/packet type
4805 * combination, so we will loop over the mask value, store the
4806 * individual type, and clear it out in the input mask as it
4809 while (promisc_mask) {
4810 struct ice_sw_recipe *recp_list;
4816 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4817 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4818 pkt_type = UCAST_FLTR;
4819 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4820 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4821 pkt_type = UCAST_FLTR;
4823 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4824 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4825 pkt_type = MCAST_FLTR;
4826 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4827 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4828 pkt_type = MCAST_FLTR;
4830 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4831 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4832 pkt_type = BCAST_FLTR;
4833 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4834 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4835 pkt_type = BCAST_FLTR;
4839 /* Check for VLAN promiscuous flag */
4840 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4841 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4842 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4843 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4847 /* Set filter DA based on packet type */
4848 mac_addr = new_fltr.l_data.mac.mac_addr;
4849 if (pkt_type == BCAST_FLTR) {
4850 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4851 } else if (pkt_type == MCAST_FLTR ||
4852 pkt_type == UCAST_FLTR) {
4853 /* Use the dummy ether header DA */
4854 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4855 ICE_NONDMA_TO_NONDMA);
4856 if (pkt_type == MCAST_FLTR)
4857 mac_addr[0] |= 0x1; /* Set multicast bit */
4860 /* Need to reset this to zero for all iterations */
4863 new_fltr.flag |= ICE_FLTR_TX;
4864 new_fltr.src = hw_vsi_id;
4866 new_fltr.flag |= ICE_FLTR_RX;
4867 new_fltr.src = lport;
4870 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4871 new_fltr.vsi_handle = vsi_handle;
4872 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4873 f_list_entry.fltr_info = new_fltr;
4874 recp_list = &sw->recp_list[recipe_id];
4876 status = ice_add_rule_internal(hw, recp_list, lport,
4878 if (status != ICE_SUCCESS)
4879 goto set_promisc_exit;
4887 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4888 * @hw: pointer to the hardware structure
4889 * @vsi_handle: VSI handle to configure
4890 * @promisc_mask: mask of promiscuous config bits
4891 * @vid: VLAN ID to set VLAN promiscuous
4894 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4897 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
4898 hw->port_info->lport,
4903 * _ice_set_vlan_vsi_promisc
4904 * @hw: pointer to the hardware structure
4905 * @vsi_handle: VSI handle to configure
4906 * @promisc_mask: mask of promiscuous config bits
4907 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4908 * @lport: logical port number to configure promisc mode
4909 * @sw: pointer to switch info struct for which function add rule
4911 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4913 static enum ice_status
4914 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4915 bool rm_vlan_promisc, u8 lport,
4916 struct ice_switch_info *sw)
4918 struct ice_fltr_list_entry *list_itr, *tmp;
4919 struct LIST_HEAD_TYPE vsi_list_head;
4920 struct LIST_HEAD_TYPE *vlan_head;
4921 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4922 enum ice_status status;
4925 INIT_LIST_HEAD(&vsi_list_head);
4926 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4927 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4928 ice_acquire_lock(vlan_lock);
4929 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4931 ice_release_lock(vlan_lock);
4933 goto free_fltr_list;
4935 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4937 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4938 if (rm_vlan_promisc)
4939 status = _ice_clear_vsi_promisc(hw, vsi_handle,
4943 status = _ice_set_vsi_promisc(hw, vsi_handle,
4944 promisc_mask, vlan_id,
4951 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4952 ice_fltr_list_entry, list_entry) {
4953 LIST_DEL(&list_itr->list_entry);
4954 ice_free(hw, list_itr);
4960 * ice_set_vlan_vsi_promisc
4961 * @hw: pointer to the hardware structure
4962 * @vsi_handle: VSI handle to configure
4963 * @promisc_mask: mask of promiscuous config bits
4964 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4966 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4969 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4970 bool rm_vlan_promisc)
4972 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
4973 rm_vlan_promisc, hw->port_info->lport,
4978 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4979 * @hw: pointer to the hardware structure
4980 * @vsi_handle: VSI handle to remove filters from
4981 * @recp_list: recipe list from which function remove fltr
4982 * @lkup: switch rule filter lookup type
4985 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4986 struct ice_sw_recipe *recp_list,
4987 enum ice_sw_lkup_type lkup)
4989 struct ice_fltr_list_entry *fm_entry;
4990 struct LIST_HEAD_TYPE remove_list_head;
4991 struct LIST_HEAD_TYPE *rule_head;
4992 struct ice_fltr_list_entry *tmp;
4993 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4994 enum ice_status status;
4996 INIT_LIST_HEAD(&remove_list_head);
4997 rule_lock = &recp_list[lkup].filt_rule_lock;
4998 rule_head = &recp_list[lkup].filt_rules;
4999 ice_acquire_lock(rule_lock);
5000 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5002 ice_release_lock(rule_lock);
5007 case ICE_SW_LKUP_MAC:
5008 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5010 case ICE_SW_LKUP_VLAN:
5011 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5013 case ICE_SW_LKUP_PROMISC:
5014 case ICE_SW_LKUP_PROMISC_VLAN:
5015 ice_remove_promisc(hw, lkup, &remove_list_head);
5017 case ICE_SW_LKUP_MAC_VLAN:
5018 ice_remove_mac_vlan(hw, &remove_list_head);
5020 case ICE_SW_LKUP_ETHERTYPE:
5021 case ICE_SW_LKUP_ETHERTYPE_MAC:
5022 ice_remove_eth_mac(hw, &remove_list_head);
5024 case ICE_SW_LKUP_DFLT:
5025 ice_debug(hw, ICE_DBG_SW,
5026 "Remove filters for this lookup type hasn't been implemented yet\n");
5028 case ICE_SW_LKUP_LAST:
5029 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5033 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5034 ice_fltr_list_entry, list_entry) {
5035 LIST_DEL(&fm_entry->list_entry);
5036 ice_free(hw, fm_entry);
5041 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5042 * @hw: pointer to the hardware structure
5043 * @vsi_handle: VSI handle to remove filters from
5044 * @sw: pointer to switch info struct
5047 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5048 struct ice_switch_info *sw)
5050 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5052 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5053 sw->recp_list, ICE_SW_LKUP_MAC);
5054 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5055 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5056 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5057 sw->recp_list, ICE_SW_LKUP_PROMISC);
5058 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5059 sw->recp_list, ICE_SW_LKUP_VLAN);
5060 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5061 sw->recp_list, ICE_SW_LKUP_DFLT);
5062 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5063 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5064 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5065 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5066 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5067 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5071 * ice_remove_vsi_fltr - Remove all filters for a VSI
5072 * @hw: pointer to the hardware structure
5073 * @vsi_handle: VSI handle to remove filters from
5075 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5077 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5081 * ice_alloc_res_cntr - allocating resource counter
5082 * @hw: pointer to the hardware structure
5083 * @type: type of resource
5084 * @alloc_shared: if set it is shared else dedicated
5085 * @num_items: number of entries requested for FD resource type
5086 * @counter_id: counter index returned by AQ call
5089 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5092 struct ice_aqc_alloc_free_res_elem *buf;
5093 enum ice_status status;
5096 /* Allocate resource */
5097 buf_len = sizeof(*buf);
5098 buf = (struct ice_aqc_alloc_free_res_elem *)
5099 ice_malloc(hw, buf_len);
5101 return ICE_ERR_NO_MEMORY;
5103 buf->num_elems = CPU_TO_LE16(num_items);
5104 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5105 ICE_AQC_RES_TYPE_M) | alloc_shared);
5107 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5108 ice_aqc_opc_alloc_res, NULL);
5112 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5120 * ice_free_res_cntr - free resource counter
5121 * @hw: pointer to the hardware structure
5122 * @type: type of resource
5123 * @alloc_shared: if set it is shared else dedicated
5124 * @num_items: number of entries to be freed for FD resource type
5125 * @counter_id: counter ID resource which needs to be freed
5128 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5131 struct ice_aqc_alloc_free_res_elem *buf;
5132 enum ice_status status;
5136 buf_len = sizeof(*buf);
5137 buf = (struct ice_aqc_alloc_free_res_elem *)
5138 ice_malloc(hw, buf_len);
5140 return ICE_ERR_NO_MEMORY;
5142 buf->num_elems = CPU_TO_LE16(num_items);
5143 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5144 ICE_AQC_RES_TYPE_M) | alloc_shared);
5145 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5147 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5148 ice_aqc_opc_free_res, NULL);
5150 ice_debug(hw, ICE_DBG_SW,
5151 "counter resource could not be freed\n");
5158 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5159 * @hw: pointer to the hardware structure
5160 * @counter_id: returns counter index
5162 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5164 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5165 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5170 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5171 * @hw: pointer to the hardware structure
5172 * @counter_id: counter index to be freed
5174 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5176 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5177 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5182 * ice_alloc_res_lg_act - add large action resource
5183 * @hw: pointer to the hardware structure
5184 * @l_id: large action ID to fill it in
5185 * @num_acts: number of actions to hold with a large action entry
5187 static enum ice_status
5188 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5190 struct ice_aqc_alloc_free_res_elem *sw_buf;
5191 enum ice_status status;
5194 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5195 return ICE_ERR_PARAM;
5197 /* Allocate resource for large action */
5198 buf_len = sizeof(*sw_buf);
5199 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5200 ice_malloc(hw, buf_len);
5202 return ICE_ERR_NO_MEMORY;
5204 sw_buf->num_elems = CPU_TO_LE16(1);
5206 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5207 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5208 * If num_acts is greater than 2, then use
5209 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5210 * The num_acts cannot exceed 4. This was ensured at the
5211 * beginning of the function.
5214 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5215 else if (num_acts == 2)
5216 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5218 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5220 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5221 ice_aqc_opc_alloc_res, NULL);
5223 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5225 ice_free(hw, sw_buf);
5230 * ice_add_mac_with_sw_marker - add filter with sw marker
5231 * @hw: pointer to the hardware structure
5232 * @f_info: filter info structure containing the MAC filter information
5233 * @sw_marker: sw marker to tag the Rx descriptor with
5236 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5239 struct ice_fltr_mgmt_list_entry *m_entry;
5240 struct ice_fltr_list_entry fl_info;
5241 struct ice_sw_recipe *recp_list;
5242 struct LIST_HEAD_TYPE l_head;
5243 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5244 enum ice_status ret;
5248 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5249 return ICE_ERR_PARAM;
5251 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5252 return ICE_ERR_PARAM;
5254 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5255 return ICE_ERR_PARAM;
5257 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5258 return ICE_ERR_PARAM;
5259 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5261 /* Add filter if it doesn't exist so then the adding of large
5262 * action always results in update
5265 INIT_LIST_HEAD(&l_head);
5266 fl_info.fltr_info = *f_info;
5267 LIST_ADD(&fl_info.list_entry, &l_head);
5269 entry_exists = false;
5270 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5271 hw->port_info->lport);
5272 if (ret == ICE_ERR_ALREADY_EXISTS)
5273 entry_exists = true;
5277 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5278 rule_lock = &recp_list->filt_rule_lock;
5279 ice_acquire_lock(rule_lock);
5280 /* Get the book keeping entry for the filter */
5281 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5285 /* If counter action was enabled for this rule then don't enable
5286 * sw marker large action
5288 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5289 ret = ICE_ERR_PARAM;
5293 /* if same marker was added before */
5294 if (m_entry->sw_marker_id == sw_marker) {
5295 ret = ICE_ERR_ALREADY_EXISTS;
5299 /* Allocate a hardware table entry to hold large act. Three actions
5300 * for marker based large action
5302 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5306 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5309 /* Update the switch rule to add the marker action */
5310 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5312 ice_release_lock(rule_lock);
5317 ice_release_lock(rule_lock);
5318 /* only remove entry if it did not exist previously */
5320 ret = ice_remove_mac(hw, &l_head);
5326 * ice_add_mac_with_counter - add filter with counter enabled
5327 * @hw: pointer to the hardware structure
5328 * @f_info: pointer to filter info structure containing the MAC filter
5332 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5334 struct ice_fltr_mgmt_list_entry *m_entry;
5335 struct ice_fltr_list_entry fl_info;
5336 struct ice_sw_recipe *recp_list;
5337 struct LIST_HEAD_TYPE l_head;
5338 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5339 enum ice_status ret;
5344 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5345 return ICE_ERR_PARAM;
5347 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5348 return ICE_ERR_PARAM;
5350 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5351 return ICE_ERR_PARAM;
5352 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5353 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5355 entry_exist = false;
5357 rule_lock = &recp_list->filt_rule_lock;
5359 /* Add filter if it doesn't exist so then the adding of large
5360 * action always results in update
5362 INIT_LIST_HEAD(&l_head);
5364 fl_info.fltr_info = *f_info;
5365 LIST_ADD(&fl_info.list_entry, &l_head);
5367 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5368 hw->port_info->lport);
5369 if (ret == ICE_ERR_ALREADY_EXISTS)
5374 ice_acquire_lock(rule_lock);
5375 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5377 ret = ICE_ERR_BAD_PTR;
5381 /* Don't enable counter for a filter for which sw marker was enabled */
5382 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5383 ret = ICE_ERR_PARAM;
5387 /* If a counter was already enabled then don't need to add again */
5388 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5389 ret = ICE_ERR_ALREADY_EXISTS;
5393 /* Allocate a hardware table entry to VLAN counter */
5394 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5398 /* Allocate a hardware table entry to hold large act. Two actions for
5399 * counter based large action
5401 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5405 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5408 /* Update the switch rule to add the counter action */
5409 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5411 ice_release_lock(rule_lock);
5416 ice_release_lock(rule_lock);
5417 /* only remove entry if it did not exist previously */
5419 ret = ice_remove_mac(hw, &l_head);
5424 /* This is mapping table entry that maps every word within a given protocol
5425 * structure to the real byte offset as per the specification of that
5427 * for example dst address is 3 words in ethertype header and corresponding
5428 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5429 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5430 * matching entry describing its field. This needs to be updated if new
5431 * structure is added to that union.
5433 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5434 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5435 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5436 { ICE_ETYPE_OL, { 0 } },
5437 { ICE_VLAN_OFOS, { 0, 2 } },
5438 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5439 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5440 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5441 26, 28, 30, 32, 34, 36, 38 } },
5442 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5443 26, 28, 30, 32, 34, 36, 38 } },
5444 { ICE_TCP_IL, { 0, 2 } },
5445 { ICE_UDP_OF, { 0, 2 } },
5446 { ICE_UDP_ILOS, { 0, 2 } },
5447 { ICE_SCTP_IL, { 0, 2 } },
5448 { ICE_VXLAN, { 8, 10, 12, 14 } },
5449 { ICE_GENEVE, { 8, 10, 12, 14 } },
5450 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5451 { ICE_NVGRE, { 0, 2, 4, 6 } },
5452 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5453 { ICE_PPPOE, { 0, 2, 4, 6 } },
5454 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5455 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5456 { ICE_ESP, { 0, 2, 4, 6 } },
5457 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5458 { ICE_NAT_T, { 8, 10, 12, 14 } },
5461 /* The following table describes preferred grouping of recipes.
5462 * If a recipe that needs to be programmed is a superset or matches one of the
5463 * following combinations, then the recipe needs to be chained as per the
5467 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5468 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5469 { ICE_MAC_IL, ICE_MAC_IL_HW },
5470 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5471 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5472 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5473 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5474 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5475 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5476 { ICE_TCP_IL, ICE_TCP_IL_HW },
5477 { ICE_UDP_OF, ICE_UDP_OF_HW },
5478 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5479 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5480 { ICE_VXLAN, ICE_UDP_OF_HW },
5481 { ICE_GENEVE, ICE_UDP_OF_HW },
5482 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5483 { ICE_NVGRE, ICE_GRE_OF_HW },
5484 { ICE_GTP, ICE_UDP_OF_HW },
5485 { ICE_PPPOE, ICE_PPPOE_HW },
5486 { ICE_PFCP, ICE_UDP_ILOS_HW },
5487 { ICE_L2TPV3, ICE_L2TPV3_HW },
5488 { ICE_ESP, ICE_ESP_HW },
5489 { ICE_AH, ICE_AH_HW },
5490 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5494 * ice_find_recp - find a recipe
5495 * @hw: pointer to the hardware structure
5496 * @lkup_exts: extension sequence to match
5498 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5500 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5501 enum ice_sw_tunnel_type tun_type)
5503 bool refresh_required = true;
5504 struct ice_sw_recipe *recp;
5507 /* Walk through existing recipes to find a match */
5508 recp = hw->switch_info->recp_list;
5509 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5510 /* If recipe was not created for this ID, in SW bookkeeping,
5511 * check if FW has an entry for this recipe. If the FW has an
5512 * entry update it in our SW bookkeeping and continue with the
5515 if (!recp[i].recp_created)
5516 if (ice_get_recp_frm_fw(hw,
5517 hw->switch_info->recp_list, i,
5521 /* Skip inverse action recipes */
5522 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5523 ICE_AQ_RECIPE_ACT_INV_ACT)
5526 /* if number of words we are looking for match */
5527 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5528 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5529 struct ice_fv_word *be = lkup_exts->fv_words;
5530 u16 *cr = recp[i].lkup_exts.field_mask;
5531 u16 *de = lkup_exts->field_mask;
5535 /* ar, cr, and qr are related to the recipe words, while
5536 * be, de, and pe are related to the lookup words
5538 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5539 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5541 if (ar[qr].off == be[pe].off &&
5542 ar[qr].prot_id == be[pe].prot_id &&
5544 /* Found the "pe"th word in the
5549 /* After walking through all the words in the
5550 * "i"th recipe if "p"th word was not found then
5551 * this recipe is not what we are looking for.
5552 * So break out from this loop and try the next
5555 if (qr >= recp[i].lkup_exts.n_val_words) {
5560 /* If for "i"th recipe the found was never set to false
5561 * then it means we found our match
5563 if ((tun_type == recp[i].tun_type ||
5564 tun_type == ICE_SW_TUN_AND_NON_TUN) && found)
5565 return i; /* Return the recipe ID */
5568 return ICE_MAX_NUM_RECIPES;
5572 * ice_prot_type_to_id - get protocol ID from protocol type
5573 * @type: protocol type
5574 * @id: pointer to variable that will receive the ID
5576 * Returns true if found, false otherwise
5578 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5582 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5583 if (ice_prot_id_tbl[i].type == type) {
5584 *id = ice_prot_id_tbl[i].protocol_id;
5591 * ice_find_valid_words - count valid words
5592 * @rule: advanced rule with lookup information
5593 * @lkup_exts: byte offset extractions of the words that are valid
5595 * calculate valid words in a lookup rule using mask value
5598 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5599 struct ice_prot_lkup_ext *lkup_exts)
5601 u8 j, word, prot_id, ret_val;
5603 if (!ice_prot_type_to_id(rule->type, &prot_id))
5606 word = lkup_exts->n_val_words;
5608 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5609 if (((u16 *)&rule->m_u)[j] &&
5610 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5611 /* No more space to accommodate */
5612 if (word >= ICE_MAX_CHAIN_WORDS)
5614 lkup_exts->fv_words[word].off =
5615 ice_prot_ext[rule->type].offs[j];
5616 lkup_exts->fv_words[word].prot_id =
5617 ice_prot_id_tbl[rule->type].protocol_id;
5618 lkup_exts->field_mask[word] =
5619 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5623 ret_val = word - lkup_exts->n_val_words;
5624 lkup_exts->n_val_words = word;
5630 * ice_create_first_fit_recp_def - Create a recipe grouping
5631 * @hw: pointer to the hardware structure
5632 * @lkup_exts: an array of protocol header extractions
5633 * @rg_list: pointer to a list that stores new recipe groups
5634 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5636 * Using first fit algorithm, take all the words that are still not done
5637 * and start grouping them in 4-word groups. Each group makes up one
5640 static enum ice_status
5641 ice_create_first_fit_recp_def(struct ice_hw *hw,
5642 struct ice_prot_lkup_ext *lkup_exts,
5643 struct LIST_HEAD_TYPE *rg_list,
5646 struct ice_pref_recipe_group *grp = NULL;
5651 if (!lkup_exts->n_val_words) {
5652 struct ice_recp_grp_entry *entry;
5654 entry = (struct ice_recp_grp_entry *)
5655 ice_malloc(hw, sizeof(*entry));
5657 return ICE_ERR_NO_MEMORY;
5658 LIST_ADD(&entry->l_entry, rg_list);
5659 grp = &entry->r_group;
5661 grp->n_val_pairs = 0;
5664 /* Walk through every word in the rule to check if it is not done. If so
5665 * then this word needs to be part of a new recipe.
5667 for (j = 0; j < lkup_exts->n_val_words; j++)
5668 if (!ice_is_bit_set(lkup_exts->done, j)) {
5670 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5671 struct ice_recp_grp_entry *entry;
5673 entry = (struct ice_recp_grp_entry *)
5674 ice_malloc(hw, sizeof(*entry));
5676 return ICE_ERR_NO_MEMORY;
5677 LIST_ADD(&entry->l_entry, rg_list);
5678 grp = &entry->r_group;
5682 grp->pairs[grp->n_val_pairs].prot_id =
5683 lkup_exts->fv_words[j].prot_id;
5684 grp->pairs[grp->n_val_pairs].off =
5685 lkup_exts->fv_words[j].off;
5686 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5694 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5695 * @hw: pointer to the hardware structure
5696 * @fv_list: field vector with the extraction sequence information
5697 * @rg_list: recipe groupings with protocol-offset pairs
5699 * Helper function to fill in the field vector indices for protocol-offset
5700 * pairs. These indexes are then ultimately programmed into a recipe.
5702 static enum ice_status
5703 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5704 struct LIST_HEAD_TYPE *rg_list)
5706 struct ice_sw_fv_list_entry *fv;
5707 struct ice_recp_grp_entry *rg;
5708 struct ice_fv_word *fv_ext;
5710 if (LIST_EMPTY(fv_list))
5713 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5714 fv_ext = fv->fv_ptr->ew;
5716 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5719 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5720 struct ice_fv_word *pr;
5725 pr = &rg->r_group.pairs[i];
5726 mask = rg->r_group.mask[i];
5728 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5729 if (fv_ext[j].prot_id == pr->prot_id &&
5730 fv_ext[j].off == pr->off) {
5733 /* Store index of field vector */
5735 rg->fv_mask[i] = mask;
5739 /* Protocol/offset could not be found, caller gave an
5743 return ICE_ERR_PARAM;
5751 * ice_find_free_recp_res_idx - find free result indexes for recipe
5752 * @hw: pointer to hardware structure
5753 * @profiles: bitmap of profiles that will be associated with the new recipe
5754 * @free_idx: pointer to variable to receive the free index bitmap
5756 * The algorithm used here is:
5757 * 1. When creating a new recipe, create a set P which contains all
5758 * Profiles that will be associated with our new recipe
5760 * 2. For each Profile p in set P:
5761 * a. Add all recipes associated with Profile p into set R
5762 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5763 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5764 * i. Or just assume they all have the same possible indexes:
5766 * i.e., PossibleIndexes = 0x0000F00000000000
5768 * 3. For each Recipe r in set R:
5769 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5770 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5772 * FreeIndexes will contain the bits indicating the indexes free for use,
5773 * then the code needs to update the recipe[r].used_result_idx_bits to
5774 * indicate which indexes were selected for use by this recipe.
5777 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5778 ice_bitmap_t *free_idx)
5780 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5781 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5782 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5786 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5787 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5788 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5789 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5791 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5792 ice_set_bit(count, possible_idx);
5794 /* For each profile we are going to associate the recipe with, add the
5795 * recipes that are associated with that profile. This will give us
5796 * the set of recipes that our recipe may collide with. Also, determine
5797 * what possible result indexes are usable given this set of profiles.
5800 while (ICE_MAX_NUM_PROFILES >
5801 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5802 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5803 ICE_MAX_NUM_RECIPES);
5804 ice_and_bitmap(possible_idx, possible_idx,
5805 hw->switch_info->prof_res_bm[bit],
5810 /* For each recipe that our new recipe may collide with, determine
5811 * which indexes have been used.
5813 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5814 if (ice_is_bit_set(recipes, bit)) {
5815 ice_or_bitmap(used_idx, used_idx,
5816 hw->switch_info->recp_list[bit].res_idxs,
5820 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5822 /* return number of free indexes */
5825 while (ICE_MAX_FV_WORDS >
5826 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5835 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5836 * @hw: pointer to hardware structure
5837 * @rm: recipe management list entry
5838 * @match_tun_mask: tunnel mask that needs to be programmed
5839 * @profiles: bitmap of profiles that will be associated.
5841 static enum ice_status
5842 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5843 u16 match_tun_mask, ice_bitmap_t *profiles)
5845 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5846 struct ice_aqc_recipe_data_elem *tmp;
5847 struct ice_aqc_recipe_data_elem *buf;
5848 struct ice_recp_grp_entry *entry;
5849 enum ice_status status;
5855 /* When more than one recipe are required, another recipe is needed to
5856 * chain them together. Matching a tunnel metadata ID takes up one of
5857 * the match fields in the chaining recipe reducing the number of
5858 * chained recipes by one.
5860 /* check number of free result indices */
5861 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5862 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5864 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5865 free_res_idx, rm->n_grp_count);
5867 if (rm->n_grp_count > 1) {
5868 if (rm->n_grp_count > free_res_idx)
5869 return ICE_ERR_MAX_LIMIT;
5874 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5875 return ICE_ERR_MAX_LIMIT;
5877 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5878 ICE_MAX_NUM_RECIPES,
5881 return ICE_ERR_NO_MEMORY;
5883 buf = (struct ice_aqc_recipe_data_elem *)
5884 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5886 status = ICE_ERR_NO_MEMORY;
5890 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5891 recipe_count = ICE_MAX_NUM_RECIPES;
5892 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5894 if (status || recipe_count == 0)
5897 /* Allocate the recipe resources, and configure them according to the
5898 * match fields from protocol headers and extracted field vectors.
5900 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5901 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5904 status = ice_alloc_recipe(hw, &entry->rid);
5908 /* Clear the result index of the located recipe, as this will be
5909 * updated, if needed, later in the recipe creation process.
5911 tmp[0].content.result_indx = 0;
5913 buf[recps] = tmp[0];
5914 buf[recps].recipe_indx = (u8)entry->rid;
5915 /* if the recipe is a non-root recipe RID should be programmed
5916 * as 0 for the rules to be applied correctly.
5918 buf[recps].content.rid = 0;
5919 ice_memset(&buf[recps].content.lkup_indx, 0,
5920 sizeof(buf[recps].content.lkup_indx),
5923 /* All recipes use look-up index 0 to match switch ID. */
5924 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5925 buf[recps].content.mask[0] =
5926 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5927 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5930 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5931 buf[recps].content.lkup_indx[i] = 0x80;
5932 buf[recps].content.mask[i] = 0;
5935 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5936 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5937 buf[recps].content.mask[i + 1] =
5938 CPU_TO_LE16(entry->fv_mask[i]);
5941 if (rm->n_grp_count > 1) {
5942 /* Checks to see if there really is a valid result index
5945 if (chain_idx >= ICE_MAX_FV_WORDS) {
5946 ice_debug(hw, ICE_DBG_SW,
5947 "No chain index available\n");
5948 status = ICE_ERR_MAX_LIMIT;
5952 entry->chain_idx = chain_idx;
5953 buf[recps].content.result_indx =
5954 ICE_AQ_RECIPE_RESULT_EN |
5955 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5956 ICE_AQ_RECIPE_RESULT_DATA_M);
5957 ice_clear_bit(chain_idx, result_idx_bm);
5958 chain_idx = ice_find_first_bit(result_idx_bm,
5962 /* fill recipe dependencies */
5963 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5964 ICE_MAX_NUM_RECIPES);
5965 ice_set_bit(buf[recps].recipe_indx,
5966 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5967 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5971 if (rm->n_grp_count == 1) {
5972 rm->root_rid = buf[0].recipe_indx;
5973 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5974 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5975 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5976 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5977 sizeof(buf[0].recipe_bitmap),
5978 ICE_NONDMA_TO_NONDMA);
5980 status = ICE_ERR_BAD_PTR;
5983 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5984 * the recipe which is getting created if specified
5985 * by user. Usually any advanced switch filter, which results
5986 * into new extraction sequence, ended up creating a new recipe
5987 * of type ROOT and usually recipes are associated with profiles
5988 * Switch rule referreing newly created recipe, needs to have
5989 * either/or 'fwd' or 'join' priority, otherwise switch rule
5990 * evaluation will not happen correctly. In other words, if
5991 * switch rule to be evaluated on priority basis, then recipe
5992 * needs to have priority, otherwise it will be evaluated last.
5994 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5996 struct ice_recp_grp_entry *last_chain_entry;
5999 /* Allocate the last recipe that will chain the outcomes of the
6000 * other recipes together
6002 status = ice_alloc_recipe(hw, &rid);
6006 buf[recps].recipe_indx = (u8)rid;
6007 buf[recps].content.rid = (u8)rid;
6008 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6009 /* the new entry created should also be part of rg_list to
6010 * make sure we have complete recipe
6012 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6013 sizeof(*last_chain_entry));
6014 if (!last_chain_entry) {
6015 status = ICE_ERR_NO_MEMORY;
6018 last_chain_entry->rid = rid;
6019 ice_memset(&buf[recps].content.lkup_indx, 0,
6020 sizeof(buf[recps].content.lkup_indx),
6022 /* All recipes use look-up index 0 to match switch ID. */
6023 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6024 buf[recps].content.mask[0] =
6025 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6026 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6027 buf[recps].content.lkup_indx[i] =
6028 ICE_AQ_RECIPE_LKUP_IGNORE;
6029 buf[recps].content.mask[i] = 0;
6033 /* update r_bitmap with the recp that is used for chaining */
6034 ice_set_bit(rid, rm->r_bitmap);
6035 /* this is the recipe that chains all the other recipes so it
6036 * should not have a chaining ID to indicate the same
6038 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6039 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6041 last_chain_entry->fv_idx[i] = entry->chain_idx;
6042 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6043 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6044 ice_set_bit(entry->rid, rm->r_bitmap);
6046 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6047 if (sizeof(buf[recps].recipe_bitmap) >=
6048 sizeof(rm->r_bitmap)) {
6049 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6050 sizeof(buf[recps].recipe_bitmap),
6051 ICE_NONDMA_TO_NONDMA);
6053 status = ICE_ERR_BAD_PTR;
6056 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6058 /* To differentiate among different UDP tunnels, a meta data ID
6061 if (match_tun_mask) {
6062 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
6063 buf[recps].content.mask[i] =
6064 CPU_TO_LE16(match_tun_mask);
6068 rm->root_rid = (u8)rid;
6070 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6074 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6075 ice_release_change_lock(hw);
6079 /* Every recipe that just got created add it to the recipe
6082 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6083 struct ice_switch_info *sw = hw->switch_info;
6084 bool is_root, idx_found = false;
6085 struct ice_sw_recipe *recp;
6086 u16 idx, buf_idx = 0;
6088 /* find buffer index for copying some data */
6089 for (idx = 0; idx < rm->n_grp_count; idx++)
6090 if (buf[idx].recipe_indx == entry->rid) {
6096 status = ICE_ERR_OUT_OF_RANGE;
6100 recp = &sw->recp_list[entry->rid];
6101 is_root = (rm->root_rid == entry->rid);
6102 recp->is_root = is_root;
6104 recp->root_rid = entry->rid;
6105 recp->big_recp = (is_root && rm->n_grp_count > 1);
6107 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6108 entry->r_group.n_val_pairs *
6109 sizeof(struct ice_fv_word),
6110 ICE_NONDMA_TO_NONDMA);
6112 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6113 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6115 /* Copy non-result fv index values and masks to recipe. This
6116 * call will also update the result recipe bitmask.
6118 ice_collect_result_idx(&buf[buf_idx], recp);
6120 /* for non-root recipes, also copy to the root, this allows
6121 * easier matching of a complete chained recipe
6124 ice_collect_result_idx(&buf[buf_idx],
6125 &sw->recp_list[rm->root_rid]);
6127 recp->n_ext_words = entry->r_group.n_val_pairs;
6128 recp->chain_idx = entry->chain_idx;
6129 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6130 recp->n_grp_count = rm->n_grp_count;
6131 recp->tun_type = rm->tun_type;
6132 recp->recp_created = true;
6147 * ice_create_recipe_group - creates recipe group
6148 * @hw: pointer to hardware structure
6149 * @rm: recipe management list entry
6150 * @lkup_exts: lookup elements
6152 static enum ice_status
6153 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6154 struct ice_prot_lkup_ext *lkup_exts)
6156 enum ice_status status;
6159 rm->n_grp_count = 0;
6161 /* Create recipes for words that are marked not done by packing them
6164 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6165 &rm->rg_list, &recp_count);
6167 rm->n_grp_count += recp_count;
6168 rm->n_ext_words = lkup_exts->n_val_words;
6169 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6170 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6171 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6172 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6179 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6180 * @hw: pointer to hardware structure
6181 * @lkups: lookup elements or match criteria for the advanced recipe, one
6182 * structure per protocol header
6183 * @lkups_cnt: number of protocols
6184 * @bm: bitmap of field vectors to consider
6185 * @fv_list: pointer to a list that holds the returned field vectors
6187 static enum ice_status
6188 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6189 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6191 enum ice_status status;
6198 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6200 return ICE_ERR_NO_MEMORY;
6202 for (i = 0; i < lkups_cnt; i++)
6203 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6204 status = ICE_ERR_CFG;
6208 /* Find field vectors that include all specified protocol types */
6209 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6212 ice_free(hw, prot_ids);
6217 * ice_tun_type_match_mask - determine if tun type needs a match mask
6218 * @tun_type: tunnel type
6219 * @mask: mask to be used for the tunnel
6221 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6224 case ICE_SW_TUN_VXLAN_GPE:
6225 case ICE_SW_TUN_GENEVE:
6226 case ICE_SW_TUN_VXLAN:
6227 case ICE_SW_TUN_NVGRE:
6228 case ICE_SW_TUN_UDP:
6229 case ICE_ALL_TUNNELS:
6230 *mask = ICE_TUN_FLAG_MASK;
6233 case ICE_SW_TUN_GENEVE_VLAN:
6234 case ICE_SW_TUN_VXLAN_VLAN:
6235 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6245 * ice_add_special_words - Add words that are not protocols, such as metadata
6246 * @rinfo: other information regarding the rule e.g. priority and action info
6247 * @lkup_exts: lookup word structure
6249 static enum ice_status
6250 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6251 struct ice_prot_lkup_ext *lkup_exts)
6255 /* If this is a tunneled packet, then add recipe index to match the
6256 * tunnel bit in the packet metadata flags.
6258 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6259 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6260 u8 word = lkup_exts->n_val_words++;
6262 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6263 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6264 lkup_exts->field_mask[word] = mask;
6266 return ICE_ERR_MAX_LIMIT;
6273 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6274 * @hw: pointer to hardware structure
6275 * @rinfo: other information regarding the rule e.g. priority and action info
6276 * @bm: pointer to memory for returning the bitmap of field vectors
6279 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6282 enum ice_prof_type prof_type;
6284 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6286 switch (rinfo->tun_type) {
6288 prof_type = ICE_PROF_NON_TUN;
6290 case ICE_ALL_TUNNELS:
6291 prof_type = ICE_PROF_TUN_ALL;
6293 case ICE_SW_TUN_VXLAN_GPE:
6294 case ICE_SW_TUN_GENEVE:
6295 case ICE_SW_TUN_GENEVE_VLAN:
6296 case ICE_SW_TUN_VXLAN:
6297 case ICE_SW_TUN_VXLAN_VLAN:
6298 case ICE_SW_TUN_UDP:
6299 case ICE_SW_TUN_GTP:
6300 prof_type = ICE_PROF_TUN_UDP;
6302 case ICE_SW_TUN_NVGRE:
6303 prof_type = ICE_PROF_TUN_GRE;
6305 case ICE_SW_TUN_PPPOE:
6306 prof_type = ICE_PROF_TUN_PPPOE;
6308 case ICE_SW_TUN_PPPOE_PAY:
6309 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6311 case ICE_SW_TUN_PPPOE_IPV4:
6312 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6313 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6314 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6316 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6317 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6319 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6320 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6322 case ICE_SW_TUN_PPPOE_IPV6:
6323 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6324 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6325 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6327 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6328 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6330 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6331 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6333 case ICE_SW_TUN_PROFID_IPV6_ESP:
6334 case ICE_SW_TUN_IPV6_ESP:
6335 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6337 case ICE_SW_TUN_PROFID_IPV6_AH:
6338 case ICE_SW_TUN_IPV6_AH:
6339 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6341 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6342 case ICE_SW_TUN_IPV6_L2TPV3:
6343 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6345 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6346 case ICE_SW_TUN_IPV6_NAT_T:
6347 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6349 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6350 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6352 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6353 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6355 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6356 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6358 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6359 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6361 case ICE_SW_TUN_IPV4_NAT_T:
6362 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6364 case ICE_SW_TUN_IPV4_L2TPV3:
6365 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6367 case ICE_SW_TUN_IPV4_ESP:
6368 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6370 case ICE_SW_TUN_IPV4_AH:
6371 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6373 case ICE_SW_TUN_AND_NON_TUN:
6375 prof_type = ICE_PROF_ALL;
6379 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6383 * ice_is_prof_rule - determine if rule type is a profile rule
6384 * @type: the rule type
6386 * if the rule type is a profile rule, that means that there no field value
6387 * match required, in this case just a profile hit is required.
6389 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6392 case ICE_SW_TUN_PROFID_IPV6_ESP:
6393 case ICE_SW_TUN_PROFID_IPV6_AH:
6394 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6395 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6396 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6397 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6398 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6399 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6409 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6410 * @hw: pointer to hardware structure
6411 * @lkups: lookup elements or match criteria for the advanced recipe, one
6412 * structure per protocol header
6413 * @lkups_cnt: number of protocols
6414 * @rinfo: other information regarding the rule e.g. priority and action info
6415 * @rid: return the recipe ID of the recipe created
6417 static enum ice_status
6418 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6419 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6421 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6422 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6423 struct ice_prot_lkup_ext *lkup_exts;
6424 struct ice_recp_grp_entry *r_entry;
6425 struct ice_sw_fv_list_entry *fvit;
6426 struct ice_recp_grp_entry *r_tmp;
6427 struct ice_sw_fv_list_entry *tmp;
6428 enum ice_status status = ICE_SUCCESS;
6429 struct ice_sw_recipe *rm;
6430 u16 match_tun_mask = 0;
6434 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6435 return ICE_ERR_PARAM;
6437 lkup_exts = (struct ice_prot_lkup_ext *)
6438 ice_malloc(hw, sizeof(*lkup_exts));
6440 return ICE_ERR_NO_MEMORY;
6442 /* Determine the number of words to be matched and if it exceeds a
6443 * recipe's restrictions
6445 for (i = 0; i < lkups_cnt; i++) {
6448 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6449 status = ICE_ERR_CFG;
6450 goto err_free_lkup_exts;
6453 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6455 status = ICE_ERR_CFG;
6456 goto err_free_lkup_exts;
6460 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6462 status = ICE_ERR_NO_MEMORY;
6463 goto err_free_lkup_exts;
6466 /* Get field vectors that contain fields extracted from all the protocol
6467 * headers being programmed.
6469 INIT_LIST_HEAD(&rm->fv_list);
6470 INIT_LIST_HEAD(&rm->rg_list);
6472 /* Get bitmap of field vectors (profiles) that are compatible with the
6473 * rule request; only these will be searched in the subsequent call to
6476 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6478 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6482 /* Group match words into recipes using preferred recipe grouping
6485 status = ice_create_recipe_group(hw, rm, lkup_exts);
6489 /* For certain tunnel types it is necessary to use a metadata ID flag to
6490 * differentiate different tunnel types. A separate recipe needs to be
6491 * used for the metadata.
6493 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6494 rm->n_grp_count > 1)
6495 match_tun_mask = mask;
6497 /* set the recipe priority if specified */
6498 rm->priority = (u8)rinfo->priority;
6500 /* Find offsets from the field vector. Pick the first one for all the
6503 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6507 /* An empty FV list means to use all the profiles returned in the
6510 if (LIST_EMPTY(&rm->fv_list)) {
6513 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6514 if (ice_is_bit_set(fv_bitmap, j)) {
6515 struct ice_sw_fv_list_entry *fvl;
6517 fvl = (struct ice_sw_fv_list_entry *)
6518 ice_malloc(hw, sizeof(*fvl));
6522 fvl->profile_id = j;
6523 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6527 /* get bitmap of all profiles the recipe will be associated with */
6528 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6529 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6531 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6532 ice_set_bit((u16)fvit->profile_id, profiles);
6535 /* Create any special protocol/offset pairs, such as looking at tunnel
6536 * bits by extracting metadata
6538 status = ice_add_special_words(rinfo, lkup_exts);
6540 goto err_free_lkup_exts;
6542 /* Look for a recipe which matches our requested fv / mask list */
6543 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6544 if (*rid < ICE_MAX_NUM_RECIPES)
6545 /* Success if found a recipe that match the existing criteria */
6548 rm->tun_type = rinfo->tun_type;
6549 /* Recipe we need does not exist, add a recipe */
6550 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
6554 /* Associate all the recipes created with all the profiles in the
6555 * common field vector.
6557 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6559 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6562 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6563 (u8 *)r_bitmap, NULL);
6567 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6568 ICE_MAX_NUM_RECIPES);
6569 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6573 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6576 ice_release_change_lock(hw);
6581 /* Update profile to recipe bitmap array */
6582 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6583 ICE_MAX_NUM_RECIPES);
6585 /* Update recipe to profile bitmap array */
6586 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6587 if (ice_is_bit_set(r_bitmap, j))
6588 ice_set_bit((u16)fvit->profile_id,
6589 recipe_to_profile[j]);
6592 *rid = rm->root_rid;
6593 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6594 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6596 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6597 ice_recp_grp_entry, l_entry) {
6598 LIST_DEL(&r_entry->l_entry);
6599 ice_free(hw, r_entry);
6602 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6604 LIST_DEL(&fvit->list_entry);
6609 ice_free(hw, rm->root_buf);
6614 ice_free(hw, lkup_exts);
6620 * ice_find_dummy_packet - find dummy packet by tunnel type
6622 * @lkups: lookup elements or match criteria for the advanced recipe, one
6623 * structure per protocol header
6624 * @lkups_cnt: number of protocols
6625 * @tun_type: tunnel type from the match criteria
6626 * @pkt: dummy packet to fill according to filter match criteria
6627 * @pkt_len: packet length of dummy packet
6628 * @offsets: pointer to receive the pointer to the offsets for the packet
6631 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6632 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6634 const struct ice_dummy_pkt_offsets **offsets)
6636 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6640 for (i = 0; i < lkups_cnt; i++) {
6641 if (lkups[i].type == ICE_UDP_ILOS)
6643 else if (lkups[i].type == ICE_TCP_IL)
6645 else if (lkups[i].type == ICE_IPV6_OFOS)
6647 else if (lkups[i].type == ICE_VLAN_OFOS)
6649 else if (lkups[i].type == ICE_IPV4_OFOS &&
6650 lkups[i].h_u.ipv4_hdr.protocol ==
6651 ICE_IPV4_NVGRE_PROTO_ID &&
6652 lkups[i].m_u.ipv4_hdr.protocol ==
6655 else if (lkups[i].type == ICE_PPPOE &&
6656 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6657 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6658 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6661 else if (lkups[i].type == ICE_ETYPE_OL &&
6662 lkups[i].h_u.ethertype.ethtype_id ==
6663 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6664 lkups[i].m_u.ethertype.ethtype_id ==
6669 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6670 *pkt = dummy_ipv4_esp_pkt;
6671 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6672 *offsets = dummy_ipv4_esp_packet_offsets;
6676 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6677 *pkt = dummy_ipv6_esp_pkt;
6678 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6679 *offsets = dummy_ipv6_esp_packet_offsets;
6683 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6684 *pkt = dummy_ipv4_ah_pkt;
6685 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6686 *offsets = dummy_ipv4_ah_packet_offsets;
6690 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6691 *pkt = dummy_ipv6_ah_pkt;
6692 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6693 *offsets = dummy_ipv6_ah_packet_offsets;
6697 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6698 *pkt = dummy_ipv4_nat_pkt;
6699 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6700 *offsets = dummy_ipv4_nat_packet_offsets;
6704 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6705 *pkt = dummy_ipv6_nat_pkt;
6706 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6707 *offsets = dummy_ipv6_nat_packet_offsets;
6711 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6712 *pkt = dummy_ipv4_l2tpv3_pkt;
6713 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6714 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6718 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6719 *pkt = dummy_ipv6_l2tpv3_pkt;
6720 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6721 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6725 if (tun_type == ICE_SW_TUN_GTP) {
6726 *pkt = dummy_udp_gtp_packet;
6727 *pkt_len = sizeof(dummy_udp_gtp_packet);
6728 *offsets = dummy_udp_gtp_packet_offsets;
6732 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6733 *pkt = dummy_pppoe_ipv6_packet;
6734 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6735 *offsets = dummy_pppoe_packet_offsets;
6737 } else if (tun_type == ICE_SW_TUN_PPPOE ||
6738 tun_type == ICE_SW_TUN_PPPOE_PAY) {
6739 *pkt = dummy_pppoe_ipv4_packet;
6740 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6741 *offsets = dummy_pppoe_packet_offsets;
6745 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
6746 *pkt = dummy_pppoe_ipv4_packet;
6747 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6748 *offsets = dummy_pppoe_packet_ipv4_offsets;
6752 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
6753 *pkt = dummy_pppoe_ipv4_tcp_packet;
6754 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
6755 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
6759 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
6760 *pkt = dummy_pppoe_ipv4_udp_packet;
6761 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
6762 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
6766 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
6767 *pkt = dummy_pppoe_ipv6_packet;
6768 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6769 *offsets = dummy_pppoe_packet_ipv6_offsets;
6773 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
6774 *pkt = dummy_pppoe_ipv6_tcp_packet;
6775 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
6776 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
6780 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
6781 *pkt = dummy_pppoe_ipv6_udp_packet;
6782 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
6783 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
6787 if (tun_type == ICE_ALL_TUNNELS) {
6788 *pkt = dummy_gre_udp_packet;
6789 *pkt_len = sizeof(dummy_gre_udp_packet);
6790 *offsets = dummy_gre_udp_packet_offsets;
6794 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6796 *pkt = dummy_gre_tcp_packet;
6797 *pkt_len = sizeof(dummy_gre_tcp_packet);
6798 *offsets = dummy_gre_tcp_packet_offsets;
6802 *pkt = dummy_gre_udp_packet;
6803 *pkt_len = sizeof(dummy_gre_udp_packet);
6804 *offsets = dummy_gre_udp_packet_offsets;
6808 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6809 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
6810 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
6811 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
6813 *pkt = dummy_udp_tun_tcp_packet;
6814 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6815 *offsets = dummy_udp_tun_tcp_packet_offsets;
6819 *pkt = dummy_udp_tun_udp_packet;
6820 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6821 *offsets = dummy_udp_tun_udp_packet_offsets;
6827 *pkt = dummy_vlan_udp_packet;
6828 *pkt_len = sizeof(dummy_vlan_udp_packet);
6829 *offsets = dummy_vlan_udp_packet_offsets;
6832 *pkt = dummy_udp_packet;
6833 *pkt_len = sizeof(dummy_udp_packet);
6834 *offsets = dummy_udp_packet_offsets;
6836 } else if (udp && ipv6) {
6838 *pkt = dummy_vlan_udp_ipv6_packet;
6839 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6840 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6843 *pkt = dummy_udp_ipv6_packet;
6844 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6845 *offsets = dummy_udp_ipv6_packet_offsets;
6847 } else if ((tcp && ipv6) || ipv6) {
6849 *pkt = dummy_vlan_tcp_ipv6_packet;
6850 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6851 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6854 *pkt = dummy_tcp_ipv6_packet;
6855 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6856 *offsets = dummy_tcp_ipv6_packet_offsets;
6861 *pkt = dummy_vlan_tcp_packet;
6862 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6863 *offsets = dummy_vlan_tcp_packet_offsets;
6865 *pkt = dummy_tcp_packet;
6866 *pkt_len = sizeof(dummy_tcp_packet);
6867 *offsets = dummy_tcp_packet_offsets;
6872 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6874 * @lkups: lookup elements or match criteria for the advanced recipe, one
6875 * structure per protocol header
6876 * @lkups_cnt: number of protocols
6877 * @s_rule: stores rule information from the match criteria
6878 * @dummy_pkt: dummy packet to fill according to filter match criteria
6879 * @pkt_len: packet length of dummy packet
6880 * @offsets: offset info for the dummy packet
6882 static enum ice_status
6883 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6884 struct ice_aqc_sw_rules_elem *s_rule,
6885 const u8 *dummy_pkt, u16 pkt_len,
6886 const struct ice_dummy_pkt_offsets *offsets)
6891 /* Start with a packet with a pre-defined/dummy content. Then, fill
6892 * in the header values to be looked up or matched.
6894 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6896 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6898 for (i = 0; i < lkups_cnt; i++) {
6899 enum ice_protocol_type type;
6900 u16 offset = 0, len = 0, j;
6903 /* find the start of this layer; it should be found since this
6904 * was already checked when search for the dummy packet
6906 type = lkups[i].type;
6907 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6908 if (type == offsets[j].type) {
6909 offset = offsets[j].offset;
6914 /* this should never happen in a correct calling sequence */
6916 return ICE_ERR_PARAM;
6918 switch (lkups[i].type) {
6921 len = sizeof(struct ice_ether_hdr);
6924 len = sizeof(struct ice_ethtype_hdr);
6927 len = sizeof(struct ice_vlan_hdr);
6931 len = sizeof(struct ice_ipv4_hdr);
6935 len = sizeof(struct ice_ipv6_hdr);
6940 len = sizeof(struct ice_l4_hdr);
6943 len = sizeof(struct ice_sctp_hdr);
6946 len = sizeof(struct ice_nvgre);
6951 len = sizeof(struct ice_udp_tnl_hdr);
6955 len = sizeof(struct ice_udp_gtp_hdr);
6958 len = sizeof(struct ice_pppoe_hdr);
6961 len = sizeof(struct ice_esp_hdr);
6964 len = sizeof(struct ice_nat_t_hdr);
6967 len = sizeof(struct ice_ah_hdr);
6970 len = sizeof(struct ice_l2tpv3_sess_hdr);
6973 return ICE_ERR_PARAM;
6976 /* the length should be a word multiple */
6977 if (len % ICE_BYTES_PER_WORD)
6980 /* We have the offset to the header start, the length, the
6981 * caller's header values and mask. Use this information to
6982 * copy the data into the dummy packet appropriately based on
6983 * the mask. Note that we need to only write the bits as
6984 * indicated by the mask to make sure we don't improperly write
6985 * over any significant packet data.
6987 for (j = 0; j < len / sizeof(u16); j++)
6988 if (((u16 *)&lkups[i].m_u)[j])
6989 ((u16 *)(pkt + offset))[j] =
6990 (((u16 *)(pkt + offset))[j] &
6991 ~((u16 *)&lkups[i].m_u)[j]) |
6992 (((u16 *)&lkups[i].h_u)[j] &
6993 ((u16 *)&lkups[i].m_u)[j]);
6996 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7002 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7003 * @hw: pointer to the hardware structure
7004 * @tun_type: tunnel type
7005 * @pkt: dummy packet to fill in
7006 * @offsets: offset info for the dummy packet
7008 static enum ice_status
7009 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7010 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7015 case ICE_SW_TUN_AND_NON_TUN:
7016 case ICE_SW_TUN_VXLAN_GPE:
7017 case ICE_SW_TUN_VXLAN:
7018 case ICE_SW_TUN_VXLAN_VLAN:
7019 case ICE_SW_TUN_UDP:
7020 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7024 case ICE_SW_TUN_GENEVE:
7025 case ICE_SW_TUN_GENEVE_VLAN:
7026 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7031 /* Nothing needs to be done for this tunnel type */
7035 /* Find the outer UDP protocol header and insert the port number */
7036 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7037 if (offsets[i].type == ICE_UDP_OF) {
7038 struct ice_l4_hdr *hdr;
7041 offset = offsets[i].offset;
7042 hdr = (struct ice_l4_hdr *)&pkt[offset];
7043 hdr->dst_port = CPU_TO_BE16(open_port);
7053 * ice_find_adv_rule_entry - Search a rule entry
7054 * @hw: pointer to the hardware structure
7055 * @lkups: lookup elements or match criteria for the advanced recipe, one
7056 * structure per protocol header
7057 * @lkups_cnt: number of protocols
7058 * @recp_id: recipe ID for which we are finding the rule
7059 * @rinfo: other information regarding the rule e.g. priority and action info
7061 * Helper function to search for a given advance rule entry
7062 * Returns pointer to entry storing the rule if found
7064 static struct ice_adv_fltr_mgmt_list_entry *
7065 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7066 u16 lkups_cnt, u16 recp_id,
7067 struct ice_adv_rule_info *rinfo)
7069 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7070 struct ice_switch_info *sw = hw->switch_info;
7073 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7074 ice_adv_fltr_mgmt_list_entry, list_entry) {
7075 bool lkups_matched = true;
7077 if (lkups_cnt != list_itr->lkups_cnt)
7079 for (i = 0; i < list_itr->lkups_cnt; i++)
7080 if (memcmp(&list_itr->lkups[i], &lkups[i],
7082 lkups_matched = false;
7085 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7086 rinfo->tun_type == list_itr->rule_info.tun_type &&
7094 * ice_adv_add_update_vsi_list
7095 * @hw: pointer to the hardware structure
7096 * @m_entry: pointer to current adv filter management list entry
7097 * @cur_fltr: filter information from the book keeping entry
7098 * @new_fltr: filter information with the new VSI to be added
7100 * Call AQ command to add or update previously created VSI list with new VSI.
7102 * Helper function to do book keeping associated with adding filter information
7103 * The algorithm to do the booking keeping is described below :
7104 * When a VSI needs to subscribe to a given advanced filter
7105 * if only one VSI has been added till now
7106 * Allocate a new VSI list and add two VSIs
7107 * to this list using switch rule command
7108 * Update the previously created switch rule with the
7109 * newly created VSI list ID
7110 * if a VSI list was previously created
7111 * Add the new VSI to the previously created VSI list set
7112 * using the update switch rule command
7114 static enum ice_status
7115 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7116 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7117 struct ice_adv_rule_info *cur_fltr,
7118 struct ice_adv_rule_info *new_fltr)
7120 enum ice_status status;
7121 u16 vsi_list_id = 0;
7123 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7124 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7125 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7126 return ICE_ERR_NOT_IMPL;
7128 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7129 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7130 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7131 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7132 return ICE_ERR_NOT_IMPL;
7134 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7135 /* Only one entry existed in the mapping and it was not already
7136 * a part of a VSI list. So, create a VSI list with the old and
7139 struct ice_fltr_info tmp_fltr;
7140 u16 vsi_handle_arr[2];
7142 /* A rule already exists with the new VSI being added */
7143 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7144 new_fltr->sw_act.fwd_id.hw_vsi_id)
7145 return ICE_ERR_ALREADY_EXISTS;
7147 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7148 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7149 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7155 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7156 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7157 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7158 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7159 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7160 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7162 /* Update the previous switch rule of "forward to VSI" to
7165 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7169 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7170 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7171 m_entry->vsi_list_info =
7172 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7175 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7177 if (!m_entry->vsi_list_info)
7180 /* A rule already exists with the new VSI being added */
7181 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7184 /* Update the previously created VSI list set with
7185 * the new VSI ID passed in
7187 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7189 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7191 ice_aqc_opc_update_sw_rules,
7193 /* update VSI list mapping info with new VSI ID */
7195 ice_set_bit(vsi_handle,
7196 m_entry->vsi_list_info->vsi_map);
7199 m_entry->vsi_count++;
7204 * ice_add_adv_rule - helper function to create an advanced switch rule
7205 * @hw: pointer to the hardware structure
7206 * @lkups: information on the words that needs to be looked up. All words
7207 * together makes one recipe
7208 * @lkups_cnt: num of entries in the lkups array
7209 * @rinfo: other information related to the rule that needs to be programmed
7210 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7211 * ignored is case of error.
7213 * This function can program only 1 rule at a time. The lkups is used to
7214 * describe the all the words that forms the "lookup" portion of the recipe.
7215 * These words can span multiple protocols. Callers to this function need to
7216 * pass in a list of protocol headers with lookup information along and mask
7217 * that determines which words are valid from the given protocol header.
7218 * rinfo describes other information related to this rule such as forwarding
7219 * IDs, priority of this rule, etc.
7222 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7223 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7224 struct ice_rule_query_data *added_entry)
7226 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7227 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7228 const struct ice_dummy_pkt_offsets *pkt_offsets;
7229 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7230 struct LIST_HEAD_TYPE *rule_head;
7231 struct ice_switch_info *sw;
7232 enum ice_status status;
7233 const u8 *pkt = NULL;
7239 /* Initialize profile to result index bitmap */
7240 if (!hw->switch_info->prof_res_bm_init) {
7241 hw->switch_info->prof_res_bm_init = 1;
7242 ice_init_prof_result_bm(hw);
7245 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7246 if (!prof_rule && !lkups_cnt)
7247 return ICE_ERR_PARAM;
7249 /* get # of words we need to match */
7251 for (i = 0; i < lkups_cnt; i++) {
7254 ptr = (u16 *)&lkups[i].m_u;
7255 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7261 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7262 return ICE_ERR_PARAM;
7264 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7265 return ICE_ERR_PARAM;
7268 /* make sure that we can locate a dummy packet */
7269 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7272 status = ICE_ERR_PARAM;
7273 goto err_ice_add_adv_rule;
7276 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7277 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7278 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7279 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7282 vsi_handle = rinfo->sw_act.vsi_handle;
7283 if (!ice_is_vsi_valid(hw, vsi_handle))
7284 return ICE_ERR_PARAM;
7286 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7287 rinfo->sw_act.fwd_id.hw_vsi_id =
7288 ice_get_hw_vsi_num(hw, vsi_handle);
7289 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7290 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7292 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7295 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7297 /* we have to add VSI to VSI_LIST and increment vsi_count.
7298 * Also Update VSI list so that we can change forwarding rule
7299 * if the rule already exists, we will check if it exists with
7300 * same vsi_id, if not then add it to the VSI list if it already
7301 * exists if not then create a VSI list and add the existing VSI
7302 * ID and the new VSI ID to the list
7303 * We will add that VSI to the list
7305 status = ice_adv_add_update_vsi_list(hw, m_entry,
7306 &m_entry->rule_info,
7309 added_entry->rid = rid;
7310 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7311 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7315 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7316 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7318 return ICE_ERR_NO_MEMORY;
7319 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7320 switch (rinfo->sw_act.fltr_act) {
7321 case ICE_FWD_TO_VSI:
7322 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7323 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7324 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7327 act |= ICE_SINGLE_ACT_TO_Q;
7328 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7329 ICE_SINGLE_ACT_Q_INDEX_M;
7331 case ICE_FWD_TO_QGRP:
7332 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7333 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7334 act |= ICE_SINGLE_ACT_TO_Q;
7335 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7336 ICE_SINGLE_ACT_Q_INDEX_M;
7337 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7338 ICE_SINGLE_ACT_Q_REGION_M;
7340 case ICE_DROP_PACKET:
7341 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7342 ICE_SINGLE_ACT_VALID_BIT;
7345 status = ICE_ERR_CFG;
7346 goto err_ice_add_adv_rule;
7349 /* set the rule LOOKUP type based on caller specified 'RX'
7350 * instead of hardcoding it to be either LOOKUP_TX/RX
7352 * for 'RX' set the source to be the port number
7353 * for 'TX' set the source to be the source HW VSI number (determined
7357 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7358 s_rule->pdata.lkup_tx_rx.src =
7359 CPU_TO_LE16(hw->port_info->lport);
7361 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7362 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7365 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7366 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7368 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7369 pkt_len, pkt_offsets);
7371 goto err_ice_add_adv_rule;
7373 if (rinfo->tun_type != ICE_NON_TUN &&
7374 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7375 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7376 s_rule->pdata.lkup_tx_rx.hdr,
7379 goto err_ice_add_adv_rule;
7382 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7383 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7386 goto err_ice_add_adv_rule;
7387 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7388 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7390 status = ICE_ERR_NO_MEMORY;
7391 goto err_ice_add_adv_rule;
7394 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7395 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7396 ICE_NONDMA_TO_NONDMA);
7397 if (!adv_fltr->lkups && !prof_rule) {
7398 status = ICE_ERR_NO_MEMORY;
7399 goto err_ice_add_adv_rule;
7402 adv_fltr->lkups_cnt = lkups_cnt;
7403 adv_fltr->rule_info = *rinfo;
7404 adv_fltr->rule_info.fltr_rule_id =
7405 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7406 sw = hw->switch_info;
7407 sw->recp_list[rid].adv_rule = true;
7408 rule_head = &sw->recp_list[rid].filt_rules;
7410 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7411 adv_fltr->vsi_count = 1;
7413 /* Add rule entry to book keeping list */
7414 LIST_ADD(&adv_fltr->list_entry, rule_head);
7416 added_entry->rid = rid;
7417 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7418 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7420 err_ice_add_adv_rule:
7421 if (status && adv_fltr) {
7422 ice_free(hw, adv_fltr->lkups);
7423 ice_free(hw, adv_fltr);
7426 ice_free(hw, s_rule);
7432 * ice_adv_rem_update_vsi_list
7433 * @hw: pointer to the hardware structure
7434 * @vsi_handle: VSI handle of the VSI to remove
7435 * @fm_list: filter management entry for which the VSI list management needs to
7438 static enum ice_status
7439 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7440 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7442 struct ice_vsi_list_map_info *vsi_list_info;
7443 enum ice_sw_lkup_type lkup_type;
7444 enum ice_status status;
7447 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7448 fm_list->vsi_count == 0)
7449 return ICE_ERR_PARAM;
7451 /* A rule with the VSI being removed does not exist */
7452 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7453 return ICE_ERR_DOES_NOT_EXIST;
7455 lkup_type = ICE_SW_LKUP_LAST;
7456 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7457 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7458 ice_aqc_opc_update_sw_rules,
7463 fm_list->vsi_count--;
7464 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7465 vsi_list_info = fm_list->vsi_list_info;
7466 if (fm_list->vsi_count == 1) {
7467 struct ice_fltr_info tmp_fltr;
7470 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7472 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7473 return ICE_ERR_OUT_OF_RANGE;
7475 /* Make sure VSI list is empty before removing it below */
7476 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7478 ice_aqc_opc_update_sw_rules,
7483 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7484 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7485 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7486 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7487 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7488 tmp_fltr.fwd_id.hw_vsi_id =
7489 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7490 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7491 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7493 /* Update the previous switch rule of "MAC forward to VSI" to
7494 * "MAC fwd to VSI list"
7496 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7498 ice_debug(hw, ICE_DBG_SW,
7499 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7500 tmp_fltr.fwd_id.hw_vsi_id, status);
7504 /* Remove the VSI list since it is no longer used */
7505 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7507 ice_debug(hw, ICE_DBG_SW,
7508 "Failed to remove VSI list %d, error %d\n",
7509 vsi_list_id, status);
7513 LIST_DEL(&vsi_list_info->list_entry);
7514 ice_free(hw, vsi_list_info);
7515 fm_list->vsi_list_info = NULL;
7522 * ice_rem_adv_rule - removes existing advanced switch rule
7523 * @hw: pointer to the hardware structure
7524 * @lkups: information on the words that needs to be looked up. All words
7525 * together makes one recipe
7526 * @lkups_cnt: num of entries in the lkups array
7527 * @rinfo: Its the pointer to the rule information for the rule
7529 * This function can be used to remove 1 rule at a time. The lkups is
7530 * used to describe all the words that forms the "lookup" portion of the
7531 * rule. These words can span multiple protocols. Callers to this function
7532 * need to pass in a list of protocol headers with lookup information along
7533 * and mask that determines which words are valid from the given protocol
7534 * header. rinfo describes other information related to this rule such as
7535 * forwarding IDs, priority of this rule, etc.
7538 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7539 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7541 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7542 struct ice_prot_lkup_ext lkup_exts;
7543 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7544 enum ice_status status = ICE_SUCCESS;
7545 bool remove_rule = false;
7546 u16 i, rid, vsi_handle;
7548 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7549 for (i = 0; i < lkups_cnt; i++) {
7552 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7555 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7560 /* Create any special protocol/offset pairs, such as looking at tunnel
7561 * bits by extracting metadata
7563 status = ice_add_special_words(rinfo, &lkup_exts);
7567 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7568 /* If did not find a recipe that match the existing criteria */
7569 if (rid == ICE_MAX_NUM_RECIPES)
7570 return ICE_ERR_PARAM;
7572 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7573 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7574 /* the rule is already removed */
7577 ice_acquire_lock(rule_lock);
7578 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7580 } else if (list_elem->vsi_count > 1) {
7581 list_elem->vsi_list_info->ref_cnt--;
7582 remove_rule = false;
7583 vsi_handle = rinfo->sw_act.vsi_handle;
7584 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7586 vsi_handle = rinfo->sw_act.vsi_handle;
7587 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7589 ice_release_lock(rule_lock);
7592 if (list_elem->vsi_count == 0)
7595 ice_release_lock(rule_lock);
7597 struct ice_aqc_sw_rules_elem *s_rule;
7600 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7602 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7605 return ICE_ERR_NO_MEMORY;
7606 s_rule->pdata.lkup_tx_rx.act = 0;
7607 s_rule->pdata.lkup_tx_rx.index =
7608 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7609 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7610 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7612 ice_aqc_opc_remove_sw_rules, NULL);
7613 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
7614 ice_acquire_lock(rule_lock);
7615 LIST_DEL(&list_elem->list_entry);
7616 ice_free(hw, list_elem->lkups);
7617 ice_free(hw, list_elem);
7618 ice_release_lock(rule_lock);
7620 ice_free(hw, s_rule);
7626 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7627 * @hw: pointer to the hardware structure
7628 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7630 * This function is used to remove 1 rule at a time. The removal is based on
7631 * the remove_entry parameter. This function will remove rule for a given
7632 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7635 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7636 struct ice_rule_query_data *remove_entry)
7638 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7639 struct LIST_HEAD_TYPE *list_head;
7640 struct ice_adv_rule_info rinfo;
7641 struct ice_switch_info *sw;
7643 sw = hw->switch_info;
7644 if (!sw->recp_list[remove_entry->rid].recp_created)
7645 return ICE_ERR_PARAM;
7646 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7647 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7649 if (list_itr->rule_info.fltr_rule_id ==
7650 remove_entry->rule_id) {
7651 rinfo = list_itr->rule_info;
7652 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7653 return ice_rem_adv_rule(hw, list_itr->lkups,
7654 list_itr->lkups_cnt, &rinfo);
7657 return ICE_ERR_PARAM;
7661 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7663 * @hw: pointer to the hardware structure
7664 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7666 * This function is used to remove all the rules for a given VSI and as soon
7667 * as removing a rule fails, it will return immediately with the error code,
7668 * else it will return ICE_SUCCESS
7670 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7672 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7673 struct ice_vsi_list_map_info *map_info;
7674 struct LIST_HEAD_TYPE *list_head;
7675 struct ice_adv_rule_info rinfo;
7676 struct ice_switch_info *sw;
7677 enum ice_status status;
7678 u16 vsi_list_id = 0;
7681 sw = hw->switch_info;
7682 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7683 if (!sw->recp_list[rid].recp_created)
7685 if (!sw->recp_list[rid].adv_rule)
7687 list_head = &sw->recp_list[rid].filt_rules;
7689 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7690 ice_adv_fltr_mgmt_list_entry, list_entry) {
7691 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7696 rinfo = list_itr->rule_info;
7697 rinfo.sw_act.vsi_handle = vsi_handle;
7698 status = ice_rem_adv_rule(hw, list_itr->lkups,
7699 list_itr->lkups_cnt, &rinfo);
7709 * ice_replay_fltr - Replay all the filters stored by a specific list head
7710 * @hw: pointer to the hardware structure
7711 * @list_head: list for which filters needs to be replayed
7712 * @recp_id: Recipe ID for which rules need to be replayed
7714 static enum ice_status
7715 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7717 struct ice_fltr_mgmt_list_entry *itr;
7718 enum ice_status status = ICE_SUCCESS;
7719 struct ice_sw_recipe *recp_list;
7720 u8 lport = hw->port_info->lport;
7721 struct LIST_HEAD_TYPE l_head;
7723 if (LIST_EMPTY(list_head))
7726 recp_list = &hw->switch_info->recp_list[recp_id];
7727 /* Move entries from the given list_head to a temporary l_head so that
7728 * they can be replayed. Otherwise when trying to re-add the same
7729 * filter, the function will return already exists
7731 LIST_REPLACE_INIT(list_head, &l_head);
7733 /* Mark the given list_head empty by reinitializing it so filters
7734 * could be added again by *handler
7736 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7738 struct ice_fltr_list_entry f_entry;
7740 f_entry.fltr_info = itr->fltr_info;
7741 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7742 status = ice_add_rule_internal(hw, recp_list, lport,
7744 if (status != ICE_SUCCESS)
7749 /* Add a filter per VSI separately */
7754 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7756 if (!ice_is_vsi_valid(hw, vsi_handle))
7759 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7760 f_entry.fltr_info.vsi_handle = vsi_handle;
7761 f_entry.fltr_info.fwd_id.hw_vsi_id =
7762 ice_get_hw_vsi_num(hw, vsi_handle);
7763 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7764 if (recp_id == ICE_SW_LKUP_VLAN)
7765 status = ice_add_vlan_internal(hw, recp_list,
7768 status = ice_add_rule_internal(hw, recp_list,
7771 if (status != ICE_SUCCESS)
7776 /* Clear the filter management list */
7777 ice_rem_sw_rule_info(hw, &l_head);
7782 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7783 * @hw: pointer to the hardware structure
7785 * NOTE: This function does not clean up partially added filters on error.
7786 * It is up to caller of the function to issue a reset or fail early.
7788 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7790 struct ice_switch_info *sw = hw->switch_info;
7791 enum ice_status status = ICE_SUCCESS;
7794 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7795 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7797 status = ice_replay_fltr(hw, i, head);
7798 if (status != ICE_SUCCESS)
7805 * ice_replay_vsi_fltr - Replay filters for requested VSI
7806 * @hw: pointer to the hardware structure
7807 * @pi: pointer to port information structure
7808 * @sw: pointer to switch info struct for which function replays filters
7809 * @vsi_handle: driver VSI handle
7810 * @recp_id: Recipe ID for which rules need to be replayed
7811 * @list_head: list for which filters need to be replayed
7813 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7814 * It is required to pass valid VSI handle.
7816 static enum ice_status
7817 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7818 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
7819 struct LIST_HEAD_TYPE *list_head)
7821 struct ice_fltr_mgmt_list_entry *itr;
7822 enum ice_status status = ICE_SUCCESS;
7823 struct ice_sw_recipe *recp_list;
7826 if (LIST_EMPTY(list_head))
7828 recp_list = &sw->recp_list[recp_id];
7829 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7831 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7833 struct ice_fltr_list_entry f_entry;
7835 f_entry.fltr_info = itr->fltr_info;
7836 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7837 itr->fltr_info.vsi_handle == vsi_handle) {
7838 /* update the src in case it is VSI num */
7839 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7840 f_entry.fltr_info.src = hw_vsi_id;
7841 status = ice_add_rule_internal(hw, recp_list,
7844 if (status != ICE_SUCCESS)
7848 if (!itr->vsi_list_info ||
7849 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7851 /* Clearing it so that the logic can add it back */
7852 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7853 f_entry.fltr_info.vsi_handle = vsi_handle;
7854 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7855 /* update the src in case it is VSI num */
7856 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7857 f_entry.fltr_info.src = hw_vsi_id;
7858 if (recp_id == ICE_SW_LKUP_VLAN)
7859 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7861 status = ice_add_rule_internal(hw, recp_list,
7864 if (status != ICE_SUCCESS)
7872 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7873 * @hw: pointer to the hardware structure
7874 * @vsi_handle: driver VSI handle
7875 * @list_head: list for which filters need to be replayed
7877 * Replay the advanced rule for the given VSI.
7879 static enum ice_status
7880 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7881 struct LIST_HEAD_TYPE *list_head)
7883 struct ice_rule_query_data added_entry = { 0 };
7884 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7885 enum ice_status status = ICE_SUCCESS;
7887 if (LIST_EMPTY(list_head))
7889 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7891 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7892 u16 lk_cnt = adv_fltr->lkups_cnt;
7894 if (vsi_handle != rinfo->sw_act.vsi_handle)
7896 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7905 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7906 * @hw: pointer to the hardware structure
7907 * @pi: pointer to port information structure
7908 * @vsi_handle: driver VSI handle
7910 * Replays filters for requested VSI via vsi_handle.
7913 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7916 struct ice_switch_info *sw = hw->switch_info;
7917 enum ice_status status;
7920 /* Update the recipes that were created */
7921 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7922 struct LIST_HEAD_TYPE *head;
7924 head = &sw->recp_list[i].filt_replay_rules;
7925 if (!sw->recp_list[i].adv_rule)
7926 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
7929 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7930 if (status != ICE_SUCCESS)
7938 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
7939 * @hw: pointer to the HW struct
7940 * @sw: pointer to switch info struct for which function removes filters
7942 * Deletes the filter replay rules for given switch
7944 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
7951 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7952 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7953 struct LIST_HEAD_TYPE *l_head;
7955 l_head = &sw->recp_list[i].filt_replay_rules;
7956 if (!sw->recp_list[i].adv_rule)
7957 ice_rem_sw_rule_info(hw, l_head);
7959 ice_rem_adv_rule_info(hw, l_head);
7965 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7966 * @hw: pointer to the HW struct
7968 * Deletes the filter replay rules.
7970 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7972 ice_rm_sw_replay_rule_info(hw, hw->switch_info);