1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 struct ice_dummy_pkt_offsets {
37 enum ice_protocol_type type;
38 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
41 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
44 { ICE_IPV4_OFOS, 14 },
49 { ICE_PROTOCOL_LAST, 0 },
52 static const u8 dummy_gre_tcp_packet[] = {
53 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
54 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00,
57 0x08, 0x00, /* ICE_ETYPE_OL 12 */
59 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x2F, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
65 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
66 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
69 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00,
73 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x06, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x50, 0x02, 0x20, 0x00,
83 0x00, 0x00, 0x00, 0x00
86 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
89 { ICE_IPV4_OFOS, 14 },
94 { ICE_PROTOCOL_LAST, 0 },
97 static const u8 dummy_gre_udp_packet[] = {
98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
102 0x08, 0x00, /* ICE_ETYPE_OL 12 */
104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
105 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x2F, 0x00, 0x00,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
111 0x00, 0x00, 0x00, 0x00,
113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
118 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x11, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
125 0x00, 0x08, 0x00, 0x00,
128 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 { ICE_ETYPE_OL, 12 },
131 { ICE_IPV4_OFOS, 14 },
135 { ICE_VXLAN_GPE, 42 },
139 { ICE_PROTOCOL_LAST, 0 },
142 static const u8 dummy_udp_tun_tcp_packet[] = {
143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x08, 0x00, /* ICE_ETYPE_OL 12 */
149 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
150 0x00, 0x01, 0x00, 0x00,
151 0x40, 0x11, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
156 0x00, 0x46, 0x00, 0x00,
158 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
159 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
166 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
167 0x00, 0x01, 0x00, 0x00,
168 0x40, 0x06, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x50, 0x02, 0x20, 0x00,
176 0x00, 0x00, 0x00, 0x00
179 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 { ICE_ETYPE_OL, 12 },
182 { ICE_IPV4_OFOS, 14 },
186 { ICE_VXLAN_GPE, 42 },
189 { ICE_UDP_ILOS, 84 },
190 { ICE_PROTOCOL_LAST, 0 },
193 static const u8 dummy_udp_tun_udp_packet[] = {
194 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
198 0x08, 0x00, /* ICE_ETYPE_OL 12 */
200 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
201 0x00, 0x01, 0x00, 0x00,
202 0x00, 0x11, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
206 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
207 0x00, 0x3a, 0x00, 0x00,
209 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
213 0x00, 0x00, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
217 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
218 0x00, 0x01, 0x00, 0x00,
219 0x00, 0x11, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
224 0x00, 0x08, 0x00, 0x00,
227 /* offset info for MAC + IPv4 + UDP dummy packet */
228 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 { ICE_ETYPE_OL, 12 },
231 { ICE_IPV4_OFOS, 14 },
232 { ICE_UDP_ILOS, 34 },
233 { ICE_PROTOCOL_LAST, 0 },
236 /* Dummy packet for MAC + IPv4 + UDP */
237 static const u8 dummy_udp_packet[] = {
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x08, 0x00, /* ICE_ETYPE_OL 12 */
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
251 0x00, 0x08, 0x00, 0x00,
253 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
257 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 { ICE_ETYPE_OL, 12 },
260 { ICE_VLAN_OFOS, 14 },
261 { ICE_IPV4_OFOS, 18 },
262 { ICE_UDP_ILOS, 38 },
263 { ICE_PROTOCOL_LAST, 0 },
266 /* C-tag (801.1Q), IPv4:UDP dummy packet */
267 static const u8 dummy_vlan_udp_packet[] = {
268 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x81, 0x00, /* ICE_ETYPE_OL 12 */
274 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
277 0x00, 0x01, 0x00, 0x00,
278 0x00, 0x11, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
283 0x00, 0x08, 0x00, 0x00,
285 0x00, 0x00, /* 2 bytes for 4 byte alignment */
288 /* offset info for MAC + IPv4 + TCP dummy packet */
289 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 { ICE_ETYPE_OL, 12 },
292 { ICE_IPV4_OFOS, 14 },
294 { ICE_PROTOCOL_LAST, 0 },
297 /* Dummy packet for MAC + IPv4 + TCP */
298 static const u8 dummy_tcp_packet[] = {
299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
306 0x00, 0x01, 0x00, 0x00,
307 0x00, 0x06, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x50, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, /* 2 bytes for 4 byte alignment */
320 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
321 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 { ICE_ETYPE_OL, 12 },
324 { ICE_VLAN_OFOS, 14 },
325 { ICE_IPV4_OFOS, 18 },
327 { ICE_PROTOCOL_LAST, 0 },
330 /* C-tag (801.1Q), IPv4:TCP dummy packet */
331 static const u8 dummy_vlan_tcp_packet[] = {
332 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x81, 0x00, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
341 0x00, 0x01, 0x00, 0x00,
342 0x00, 0x06, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
347 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00,
349 0x50, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, /* 2 bytes for 4 byte alignment */
355 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 { ICE_ETYPE_OL, 12 },
358 { ICE_IPV6_OFOS, 14 },
360 { ICE_PROTOCOL_LAST, 0 },
363 static const u8 dummy_tcp_ipv6_packet[] = {
364 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
371 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
384 0x50, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, /* 2 bytes for 4 byte alignment */
390 /* C-tag (802.1Q): IPv6 + TCP */
391 static const struct ice_dummy_pkt_offsets
392 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 { ICE_ETYPE_OL, 12 },
395 { ICE_VLAN_OFOS, 14 },
396 { ICE_IPV6_OFOS, 18 },
398 { ICE_PROTOCOL_LAST, 0 },
401 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
402 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
403 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x81, 0x00, /* ICE_ETYPE_OL 12 */
409 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
412 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425 0x50, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 { ICE_ETYPE_OL, 12 },
435 { ICE_IPV6_OFOS, 14 },
436 { ICE_UDP_ILOS, 54 },
437 { ICE_PROTOCOL_LAST, 0 },
440 /* IPv6 + UDP dummy packet */
441 static const u8 dummy_udp_ipv6_packet[] = {
442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
446 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
448 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
449 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
460 0x00, 0x10, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
463 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, /* 2 bytes for 4 byte alignment */
468 /* C-tag (802.1Q): IPv6 + UDP */
469 static const struct ice_dummy_pkt_offsets
470 dummy_vlan_udp_ipv6_packet_offsets[] = {
472 { ICE_ETYPE_OL, 12 },
473 { ICE_VLAN_OFOS, 14 },
474 { ICE_IPV6_OFOS, 18 },
475 { ICE_UDP_ILOS, 58 },
476 { ICE_PROTOCOL_LAST, 0 },
479 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
480 static const u8 dummy_vlan_udp_ipv6_packet[] = {
481 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
485 0x81, 0x00, /* ICE_ETYPE_OL 12 */
487 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
490 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
501 0x00, 0x08, 0x00, 0x00,
503 0x00, 0x00, /* 2 bytes for 4 byte alignment */
506 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508 { ICE_IPV4_OFOS, 14 },
511 { ICE_PROTOCOL_LAST, 0 },
514 static const u8 dummy_udp_gtp_packet[] = {
515 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
520 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x11, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
526 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
527 0x00, 0x1c, 0x00, 0x00,
529 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
530 0x00, 0x00, 0x00, 0x00,
531 0x00, 0x00, 0x00, 0x85,
533 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
534 0x00, 0x00, 0x00, 0x00,
537 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
539 { ICE_ETYPE_OL, 12 },
540 { ICE_VLAN_OFOS, 14},
542 { ICE_PROTOCOL_LAST, 0 },
545 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
547 { ICE_ETYPE_OL, 12 },
548 { ICE_VLAN_OFOS, 14},
550 { ICE_IPV4_OFOS, 26 },
551 { ICE_PROTOCOL_LAST, 0 },
554 static const u8 dummy_pppoe_ipv4_packet[] = {
555 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
556 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
559 0x81, 0x00, /* ICE_ETYPE_OL 12 */
561 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
563 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
566 0x00, 0x21, /* PPP Link Layer 24 */
568 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
578 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
580 { ICE_ETYPE_OL, 12 },
581 { ICE_VLAN_OFOS, 14},
583 { ICE_IPV4_OFOS, 26 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
593 0x81, 0x00, /* ICE_ETYPE_OL 12 */
595 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
597 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
600 0x00, 0x21, /* PPP Link Layer 24 */
602 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
603 0x00, 0x01, 0x00, 0x00,
604 0x00, 0x06, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
611 0x50, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
618 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
620 { ICE_ETYPE_OL, 12 },
621 { ICE_VLAN_OFOS, 14},
623 { ICE_IPV4_OFOS, 26 },
624 { ICE_UDP_ILOS, 46 },
625 { ICE_PROTOCOL_LAST, 0 },
628 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
629 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
630 0x00, 0x00, 0x00, 0x00,
631 0x00, 0x00, 0x00, 0x00,
633 0x81, 0x00, /* ICE_ETYPE_OL 12 */
635 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
637 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
640 0x00, 0x21, /* PPP Link Layer 24 */
642 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
643 0x00, 0x01, 0x00, 0x00,
644 0x00, 0x11, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
649 0x00, 0x08, 0x00, 0x00,
651 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
654 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
656 { ICE_ETYPE_OL, 12 },
657 { ICE_VLAN_OFOS, 14},
659 { ICE_IPV6_OFOS, 26 },
660 { ICE_PROTOCOL_LAST, 0 },
663 static const u8 dummy_pppoe_ipv6_packet[] = {
664 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
665 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00,
668 0x81, 0x00, /* ICE_ETYPE_OL 12 */
670 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
672 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
675 0x00, 0x57, /* PPP Link Layer 24 */
677 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
678 0x00, 0x00, 0x3b, 0x00,
679 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
685 0x00, 0x00, 0x00, 0x00,
686 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
692 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
694 { ICE_ETYPE_OL, 12 },
695 { ICE_VLAN_OFOS, 14},
697 { ICE_IPV6_OFOS, 26 },
699 { ICE_PROTOCOL_LAST, 0 },
702 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
703 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
704 0x00, 0x00, 0x00, 0x00,
705 0x00, 0x00, 0x00, 0x00,
707 0x81, 0x00, /* ICE_ETYPE_OL 12 */
709 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
711 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
714 0x00, 0x57, /* PPP Link Layer 24 */
716 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
717 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
728 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, 0x00, 0x00,
730 0x50, 0x00, 0x00, 0x00,
731 0x00, 0x00, 0x00, 0x00,
733 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
737 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
739 { ICE_ETYPE_OL, 12 },
740 { ICE_VLAN_OFOS, 14},
742 { ICE_IPV6_OFOS, 26 },
743 { ICE_UDP_ILOS, 66 },
744 { ICE_PROTOCOL_LAST, 0 },
747 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
748 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
749 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x00, 0x00,
752 0x81, 0x00, /* ICE_ETYPE_OL 12 */
754 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
756 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
759 0x00, 0x57, /* PPP Link Layer 24 */
761 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
762 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
763 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
773 0x00, 0x08, 0x00, 0x00,
775 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
778 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
780 { ICE_IPV4_OFOS, 14 },
782 { ICE_PROTOCOL_LAST, 0 },
785 static const u8 dummy_ipv4_esp_pkt[] = {
786 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
787 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x00, 0x00, 0x00,
791 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
792 0x00, 0x00, 0x40, 0x00,
793 0x40, 0x32, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
798 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
802 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
804 { ICE_IPV6_OFOS, 14 },
806 { ICE_PROTOCOL_LAST, 0 },
809 static const u8 dummy_ipv6_esp_pkt[] = {
810 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
815 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
816 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
831 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
833 { ICE_IPV4_OFOS, 14 },
835 { ICE_PROTOCOL_LAST, 0 },
838 static const u8 dummy_ipv4_ah_pkt[] = {
839 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
840 0x00, 0x00, 0x00, 0x00,
841 0x00, 0x00, 0x00, 0x00,
844 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
845 0x00, 0x00, 0x40, 0x00,
846 0x40, 0x33, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
851 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
856 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
858 { ICE_IPV6_OFOS, 14 },
860 { ICE_PROTOCOL_LAST, 0 },
863 static const u8 dummy_ipv6_ah_pkt[] = {
864 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
865 0x00, 0x00, 0x00, 0x00,
866 0x00, 0x00, 0x00, 0x00,
869 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
870 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
871 0x00, 0x00, 0x00, 0x00,
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
886 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
888 { ICE_IPV4_OFOS, 14 },
889 { ICE_UDP_ILOS, 34 },
891 { ICE_PROTOCOL_LAST, 0 },
894 static const u8 dummy_ipv4_nat_pkt[] = {
895 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
896 0x00, 0x00, 0x00, 0x00,
897 0x00, 0x00, 0x00, 0x00,
900 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
901 0x00, 0x00, 0x40, 0x00,
902 0x40, 0x11, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
914 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
916 { ICE_IPV6_OFOS, 14 },
917 { ICE_UDP_ILOS, 54 },
919 { ICE_PROTOCOL_LAST, 0 },
922 static const u8 dummy_ipv6_nat_pkt[] = {
923 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
928 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
929 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x00, 0x00,
936 0x00, 0x00, 0x00, 0x00,
937 0x00, 0x00, 0x00, 0x00,
939 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
940 0x00, 0x00, 0x00, 0x00,
942 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
948 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
950 { ICE_IPV4_OFOS, 14 },
952 { ICE_PROTOCOL_LAST, 0 },
955 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
956 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
957 0x00, 0x00, 0x00, 0x00,
958 0x00, 0x00, 0x00, 0x00,
961 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
962 0x00, 0x00, 0x40, 0x00,
963 0x40, 0x73, 0x00, 0x00,
964 0x00, 0x00, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
968 0x00, 0x00, 0x00, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
973 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
975 { ICE_IPV6_OFOS, 14 },
977 { ICE_PROTOCOL_LAST, 0 },
980 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
981 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
982 0x00, 0x00, 0x00, 0x00,
983 0x00, 0x00, 0x00, 0x00,
986 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
987 0x00, 0x0c, 0x73, 0x40,
988 0x00, 0x00, 0x00, 0x00,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
997 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
998 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1003 /* this is a recipe to profile association bitmap */
1004 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1005 ICE_MAX_NUM_PROFILES);
1007 /* this is a profile to recipe association bitmap */
1008 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1009 ICE_MAX_NUM_RECIPES);
1011 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1014 * ice_collect_result_idx - copy result index values
1015 * @buf: buffer that contains the result index
1016 * @recp: the recipe struct to copy data into
1018 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1019 struct ice_sw_recipe *recp)
1021 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1022 ice_set_bit(buf->content.result_indx &
1023 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1027 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1028 * @hw: pointer to hardware structure
1029 * @recps: struct that we need to populate
1030 * @rid: recipe ID that we are populating
1031 * @refresh_required: true if we should get recipe to profile mapping from FW
1033 * This function is used to populate all the necessary entries into our
1034 * bookkeeping so that we have a current list of all the recipes that are
1035 * programmed in the firmware.
1037 static enum ice_status
1038 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1039 bool *refresh_required)
1041 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1042 struct ice_aqc_recipe_data_elem *tmp;
1043 u16 num_recps = ICE_MAX_NUM_RECIPES;
1044 struct ice_prot_lkup_ext *lkup_exts;
1045 enum ice_status status;
1049 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1051 /* we need a buffer big enough to accommodate all the recipes */
1052 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1053 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1055 return ICE_ERR_NO_MEMORY;
1057 tmp[0].recipe_indx = rid;
1058 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1059 /* non-zero status meaning recipe doesn't exist */
1063 /* Get recipe to profile map so that we can get the fv from lkups that
1064 * we read for a recipe from FW. Since we want to minimize the number of
1065 * times we make this FW call, just make one call and cache the copy
1066 * until a new recipe is added. This operation is only required the
1067 * first time to get the changes from FW. Then to search existing
1068 * entries we don't need to update the cache again until another recipe
1071 if (*refresh_required) {
1072 ice_get_recp_to_prof_map(hw);
1073 *refresh_required = false;
1076 /* Start populating all the entries for recps[rid] based on lkups from
1077 * firmware. Note that we are only creating the root recipe in our
1080 lkup_exts = &recps[rid].lkup_exts;
1082 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1083 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1084 struct ice_recp_grp_entry *rg_entry;
1085 u8 i, prof, idx, prot = 0;
1089 rg_entry = (struct ice_recp_grp_entry *)
1090 ice_malloc(hw, sizeof(*rg_entry));
1092 status = ICE_ERR_NO_MEMORY;
1096 idx = root_bufs.recipe_indx;
1097 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1099 /* Mark all result indices in this chain */
1100 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1101 ice_set_bit(root_bufs.content.result_indx &
1102 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1104 /* get the first profile that is associated with rid */
1105 prof = ice_find_first_bit(recipe_to_profile[idx],
1106 ICE_MAX_NUM_PROFILES);
1107 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1108 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1110 rg_entry->fv_idx[i] = lkup_indx;
1111 rg_entry->fv_mask[i] =
1112 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1114 /* If the recipe is a chained recipe then all its
1115 * child recipe's result will have a result index.
1116 * To fill fv_words we should not use those result
1117 * index, we only need the protocol ids and offsets.
1118 * We will skip all the fv_idx which stores result
1119 * index in them. We also need to skip any fv_idx which
1120 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1121 * valid offset value.
1123 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1124 rg_entry->fv_idx[i]) ||
1125 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1126 rg_entry->fv_idx[i] == 0)
1129 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1130 rg_entry->fv_idx[i], &prot, &off);
1131 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1132 lkup_exts->fv_words[fv_word_idx].off = off;
1133 lkup_exts->field_mask[fv_word_idx] =
1134 rg_entry->fv_mask[i];
1137 /* populate rg_list with the data from the child entry of this
1140 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1142 /* Propagate some data to the recipe database */
1143 recps[idx].is_root = !!is_root;
1144 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1145 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1146 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1147 recps[idx].chain_idx = root_bufs.content.result_indx &
1148 ~ICE_AQ_RECIPE_RESULT_EN;
1149 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1151 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1157 /* Only do the following for root recipes entries */
1158 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1159 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1160 recps[idx].root_rid = root_bufs.content.rid &
1161 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1162 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1165 /* Complete initialization of the root recipe entry */
1166 lkup_exts->n_val_words = fv_word_idx;
1167 recps[rid].big_recp = (num_recps > 1);
1168 recps[rid].n_grp_count = (u8)num_recps;
1169 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1170 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1171 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1172 if (!recps[rid].root_buf)
1175 /* Copy result indexes */
1176 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1177 recps[rid].recp_created = true;
1185 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1186 * @hw: pointer to hardware structure
1188 * This function is used to populate recipe_to_profile matrix where index to
1189 * this array is the recipe ID and the element is the mapping of which profiles
1190 * is this recipe mapped to.
1192 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1194 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1197 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1200 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1201 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1202 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1204 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1205 ICE_MAX_NUM_RECIPES);
1206 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1207 if (ice_is_bit_set(r_bitmap, j))
1208 ice_set_bit(i, recipe_to_profile[j]);
1213 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1214 * @hw: pointer to the HW struct
1215 * @recp_list: pointer to sw recipe list
1217 * Allocate memory for the entire recipe table and initialize the structures/
1218 * entries corresponding to basic recipes.
1221 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1223 struct ice_sw_recipe *recps;
1226 recps = (struct ice_sw_recipe *)
1227 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1229 return ICE_ERR_NO_MEMORY;
1231 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1232 recps[i].root_rid = i;
1233 INIT_LIST_HEAD(&recps[i].filt_rules);
1234 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1235 INIT_LIST_HEAD(&recps[i].rg_list);
1236 ice_init_lock(&recps[i].filt_rule_lock);
1245 * ice_aq_get_sw_cfg - get switch configuration
1246 * @hw: pointer to the hardware structure
1247 * @buf: pointer to the result buffer
1248 * @buf_size: length of the buffer available for response
1249 * @req_desc: pointer to requested descriptor
1250 * @num_elems: pointer to number of elements
1251 * @cd: pointer to command details structure or NULL
1253 * Get switch configuration (0x0200) to be placed in 'buff'.
1254 * This admin command returns information such as initial VSI/port number
1255 * and switch ID it belongs to.
1257 * NOTE: *req_desc is both an input/output parameter.
1258 * The caller of this function first calls this function with *request_desc set
1259 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1260 * configuration information has been returned; if non-zero (meaning not all
1261 * the information was returned), the caller should call this function again
1262 * with *req_desc set to the previous value returned by f/w to get the
1263 * next block of switch configuration information.
1265 * *num_elems is output only parameter. This reflects the number of elements
1266 * in response buffer. The caller of this function to use *num_elems while
1267 * parsing the response buffer.
1269 static enum ice_status
1270 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1271 u16 buf_size, u16 *req_desc, u16 *num_elems,
1272 struct ice_sq_cd *cd)
1274 struct ice_aqc_get_sw_cfg *cmd;
1275 enum ice_status status;
1276 struct ice_aq_desc desc;
1278 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1279 cmd = &desc.params.get_sw_conf;
1280 cmd->element = CPU_TO_LE16(*req_desc);
1282 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1284 *req_desc = LE16_TO_CPU(cmd->element);
1285 *num_elems = LE16_TO_CPU(cmd->num_elems);
1292 * ice_alloc_sw - allocate resources specific to switch
1293 * @hw: pointer to the HW struct
1294 * @ena_stats: true to turn on VEB stats
1295 * @shared_res: true for shared resource, false for dedicated resource
1296 * @sw_id: switch ID returned
1297 * @counter_id: VEB counter ID returned
1299 * allocates switch resources (SWID and VEB counter) (0x0208)
1302 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1305 struct ice_aqc_alloc_free_res_elem *sw_buf;
1306 struct ice_aqc_res_elem *sw_ele;
1307 enum ice_status status;
1310 buf_len = sizeof(*sw_buf);
1311 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1312 ice_malloc(hw, buf_len);
1314 return ICE_ERR_NO_MEMORY;
1316 /* Prepare buffer for switch ID.
1317 * The number of resource entries in buffer is passed as 1 since only a
1318 * single switch/VEB instance is allocated, and hence a single sw_id
1321 sw_buf->num_elems = CPU_TO_LE16(1);
1323 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1324 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1325 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1327 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1328 ice_aqc_opc_alloc_res, NULL);
1331 goto ice_alloc_sw_exit;
1333 sw_ele = &sw_buf->elem[0];
1334 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1337 /* Prepare buffer for VEB Counter */
1338 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1339 struct ice_aqc_alloc_free_res_elem *counter_buf;
1340 struct ice_aqc_res_elem *counter_ele;
1342 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1343 ice_malloc(hw, buf_len);
1345 status = ICE_ERR_NO_MEMORY;
1346 goto ice_alloc_sw_exit;
1349 /* The number of resource entries in buffer is passed as 1 since
1350 * only a single switch/VEB instance is allocated, and hence a
1351 * single VEB counter is requested.
1353 counter_buf->num_elems = CPU_TO_LE16(1);
1354 counter_buf->res_type =
1355 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1356 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1357 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1361 ice_free(hw, counter_buf);
1362 goto ice_alloc_sw_exit;
1364 counter_ele = &counter_buf->elem[0];
1365 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1366 ice_free(hw, counter_buf);
1370 ice_free(hw, sw_buf);
1375 * ice_free_sw - free resources specific to switch
1376 * @hw: pointer to the HW struct
1377 * @sw_id: switch ID returned
1378 * @counter_id: VEB counter ID returned
1380 * free switch resources (SWID and VEB counter) (0x0209)
1382 * NOTE: This function frees multiple resources. It continues
1383 * releasing other resources even after it encounters error.
1384 * The error code returned is the last error it encountered.
1386 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1388 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1389 enum ice_status status, ret_status;
1392 buf_len = sizeof(*sw_buf);
1393 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1394 ice_malloc(hw, buf_len);
1396 return ICE_ERR_NO_MEMORY;
1398 /* Prepare buffer to free for switch ID res.
1399 * The number of resource entries in buffer is passed as 1 since only a
1400 * single switch/VEB instance is freed, and hence a single sw_id
1403 sw_buf->num_elems = CPU_TO_LE16(1);
1404 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1405 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1407 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1408 ice_aqc_opc_free_res, NULL);
1411 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1413 /* Prepare buffer to free for VEB Counter resource */
1414 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1415 ice_malloc(hw, buf_len);
1417 ice_free(hw, sw_buf);
1418 return ICE_ERR_NO_MEMORY;
1421 /* The number of resource entries in buffer is passed as 1 since only a
1422 * single switch/VEB instance is freed, and hence a single VEB counter
1425 counter_buf->num_elems = CPU_TO_LE16(1);
1426 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1427 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1429 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1430 ice_aqc_opc_free_res, NULL);
1432 ice_debug(hw, ICE_DBG_SW,
1433 "VEB counter resource could not be freed\n");
1434 ret_status = status;
1437 ice_free(hw, counter_buf);
1438 ice_free(hw, sw_buf);
1444 * @hw: pointer to the HW struct
1445 * @vsi_ctx: pointer to a VSI context struct
1446 * @cd: pointer to command details structure or NULL
1448 * Add a VSI context to the hardware (0x0210)
1451 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1452 struct ice_sq_cd *cd)
1454 struct ice_aqc_add_update_free_vsi_resp *res;
1455 struct ice_aqc_add_get_update_free_vsi *cmd;
1456 struct ice_aq_desc desc;
1457 enum ice_status status;
1459 cmd = &desc.params.vsi_cmd;
1460 res = &desc.params.add_update_free_vsi_res;
1462 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1464 if (!vsi_ctx->alloc_from_pool)
1465 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1466 ICE_AQ_VSI_IS_VALID);
1468 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1470 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1472 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1473 sizeof(vsi_ctx->info), cd);
1476 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1477 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1478 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1486 * @hw: pointer to the HW struct
1487 * @vsi_ctx: pointer to a VSI context struct
1488 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1489 * @cd: pointer to command details structure or NULL
1491 * Free VSI context info from hardware (0x0213)
1494 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1495 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1497 struct ice_aqc_add_update_free_vsi_resp *resp;
1498 struct ice_aqc_add_get_update_free_vsi *cmd;
1499 struct ice_aq_desc desc;
1500 enum ice_status status;
1502 cmd = &desc.params.vsi_cmd;
1503 resp = &desc.params.add_update_free_vsi_res;
1505 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1507 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1509 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1511 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1513 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1514 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1522 * @hw: pointer to the HW struct
1523 * @vsi_ctx: pointer to a VSI context struct
1524 * @cd: pointer to command details structure or NULL
1526 * Update VSI context in the hardware (0x0211)
1529 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1530 struct ice_sq_cd *cd)
1532 struct ice_aqc_add_update_free_vsi_resp *resp;
1533 struct ice_aqc_add_get_update_free_vsi *cmd;
1534 struct ice_aq_desc desc;
1535 enum ice_status status;
1537 cmd = &desc.params.vsi_cmd;
1538 resp = &desc.params.add_update_free_vsi_res;
1540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1542 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1544 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1546 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1547 sizeof(vsi_ctx->info), cd);
1550 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1551 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1558 * ice_is_vsi_valid - check whether the VSI is valid or not
1559 * @hw: pointer to the HW struct
1560 * @vsi_handle: VSI handle
1562 * check whether the VSI is valid or not
1564 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1566 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1570 * ice_get_hw_vsi_num - return the HW VSI number
1571 * @hw: pointer to the HW struct
1572 * @vsi_handle: VSI handle
1574 * return the HW VSI number
1575 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1577 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1579 return hw->vsi_ctx[vsi_handle]->vsi_num;
1583 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1584 * @hw: pointer to the HW struct
1585 * @vsi_handle: VSI handle
1587 * return the VSI context entry for a given VSI handle
1589 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1591 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1595 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1596 * @hw: pointer to the HW struct
1597 * @vsi_handle: VSI handle
1598 * @vsi: VSI context pointer
1600 * save the VSI context entry for a given VSI handle
1603 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1605 hw->vsi_ctx[vsi_handle] = vsi;
1609 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1610 * @hw: pointer to the HW struct
1611 * @vsi_handle: VSI handle
1613 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1615 struct ice_vsi_ctx *vsi;
1618 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1621 ice_for_each_traffic_class(i) {
1622 if (vsi->lan_q_ctx[i]) {
1623 ice_free(hw, vsi->lan_q_ctx[i]);
1624 vsi->lan_q_ctx[i] = NULL;
1630 * ice_clear_vsi_ctx - clear the VSI context entry
1631 * @hw: pointer to the HW struct
1632 * @vsi_handle: VSI handle
1634 * clear the VSI context entry
1636 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1638 struct ice_vsi_ctx *vsi;
1640 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1642 ice_clear_vsi_q_ctx(hw, vsi_handle);
1644 hw->vsi_ctx[vsi_handle] = NULL;
1649 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1650 * @hw: pointer to the HW struct
1652 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1656 for (i = 0; i < ICE_MAX_VSI; i++)
1657 ice_clear_vsi_ctx(hw, i);
1661 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1662 * @hw: pointer to the HW struct
1663 * @vsi_handle: unique VSI handle provided by drivers
1664 * @vsi_ctx: pointer to a VSI context struct
1665 * @cd: pointer to command details structure or NULL
1667 * Add a VSI context to the hardware also add it into the VSI handle list.
1668 * If this function gets called after reset for existing VSIs then update
1669 * with the new HW VSI number in the corresponding VSI handle list entry.
1672 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1673 struct ice_sq_cd *cd)
1675 struct ice_vsi_ctx *tmp_vsi_ctx;
1676 enum ice_status status;
1678 if (vsi_handle >= ICE_MAX_VSI)
1679 return ICE_ERR_PARAM;
1680 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1683 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1685 /* Create a new VSI context */
1686 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1687 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1689 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1690 return ICE_ERR_NO_MEMORY;
1692 *tmp_vsi_ctx = *vsi_ctx;
1694 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1696 /* update with new HW VSI num */
1697 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1704 * ice_free_vsi- free VSI context from hardware and VSI handle list
1705 * @hw: pointer to the HW struct
1706 * @vsi_handle: unique VSI handle
1707 * @vsi_ctx: pointer to a VSI context struct
1708 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1709 * @cd: pointer to command details structure or NULL
1711 * Free VSI context info from hardware as well as from VSI handle list
1714 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1715 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1717 enum ice_status status;
1719 if (!ice_is_vsi_valid(hw, vsi_handle))
1720 return ICE_ERR_PARAM;
1721 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1722 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1724 ice_clear_vsi_ctx(hw, vsi_handle);
1730 * @hw: pointer to the HW struct
1731 * @vsi_handle: unique VSI handle
1732 * @vsi_ctx: pointer to a VSI context struct
1733 * @cd: pointer to command details structure or NULL
1735 * Update VSI context in the hardware
1738 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1739 struct ice_sq_cd *cd)
1741 if (!ice_is_vsi_valid(hw, vsi_handle))
1742 return ICE_ERR_PARAM;
1743 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1744 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1748 * ice_aq_get_vsi_params
1749 * @hw: pointer to the HW struct
1750 * @vsi_ctx: pointer to a VSI context struct
1751 * @cd: pointer to command details structure or NULL
1753 * Get VSI context info from hardware (0x0212)
1756 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1757 struct ice_sq_cd *cd)
1759 struct ice_aqc_add_get_update_free_vsi *cmd;
1760 struct ice_aqc_get_vsi_resp *resp;
1761 struct ice_aq_desc desc;
1762 enum ice_status status;
1764 cmd = &desc.params.vsi_cmd;
1765 resp = &desc.params.get_vsi_resp;
1767 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1769 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1771 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1772 sizeof(vsi_ctx->info), cd);
1774 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1776 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1777 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1784 * ice_aq_add_update_mir_rule - add/update a mirror rule
1785 * @hw: pointer to the HW struct
1786 * @rule_type: Rule Type
1787 * @dest_vsi: VSI number to which packets will be mirrored
1788 * @count: length of the list
1789 * @mr_buf: buffer for list of mirrored VSI numbers
1790 * @cd: pointer to command details structure or NULL
1793 * Add/Update Mirror Rule (0x260).
1796 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1797 u16 count, struct ice_mir_rule_buf *mr_buf,
1798 struct ice_sq_cd *cd, u16 *rule_id)
1800 struct ice_aqc_add_update_mir_rule *cmd;
1801 struct ice_aq_desc desc;
1802 enum ice_status status;
1803 __le16 *mr_list = NULL;
1806 switch (rule_type) {
1807 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1808 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1809 /* Make sure count and mr_buf are set for these rule_types */
1810 if (!(count && mr_buf))
1811 return ICE_ERR_PARAM;
1813 buf_size = count * sizeof(__le16);
1814 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1816 return ICE_ERR_NO_MEMORY;
1818 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1819 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1820 /* Make sure count and mr_buf are not set for these
1823 if (count || mr_buf)
1824 return ICE_ERR_PARAM;
1827 ice_debug(hw, ICE_DBG_SW,
1828 "Error due to unsupported rule_type %u\n", rule_type);
1829 return ICE_ERR_OUT_OF_RANGE;
1832 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1834 /* Pre-process 'mr_buf' items for add/update of virtual port
1835 * ingress/egress mirroring (but not physical port ingress/egress
1841 for (i = 0; i < count; i++) {
1844 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1846 /* Validate specified VSI number, make sure it is less
1847 * than ICE_MAX_VSI, if not return with error.
1849 if (id >= ICE_MAX_VSI) {
1850 ice_debug(hw, ICE_DBG_SW,
1851 "Error VSI index (%u) out-of-range\n",
1853 ice_free(hw, mr_list);
1854 return ICE_ERR_OUT_OF_RANGE;
1857 /* add VSI to mirror rule */
1860 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1861 else /* remove VSI from mirror rule */
1862 mr_list[i] = CPU_TO_LE16(id);
1866 cmd = &desc.params.add_update_rule;
1867 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1868 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1869 ICE_AQC_RULE_ID_VALID_M);
1870 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1871 cmd->num_entries = CPU_TO_LE16(count);
1872 cmd->dest = CPU_TO_LE16(dest_vsi);
1874 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1876 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1878 ice_free(hw, mr_list);
1884 * ice_aq_delete_mir_rule - delete a mirror rule
1885 * @hw: pointer to the HW struct
1886 * @rule_id: Mirror rule ID (to be deleted)
1887 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1888 * otherwise it is returned to the shared pool
1889 * @cd: pointer to command details structure or NULL
1891 * Delete Mirror Rule (0x261).
1894 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1895 struct ice_sq_cd *cd)
1897 struct ice_aqc_delete_mir_rule *cmd;
1898 struct ice_aq_desc desc;
1900 /* rule_id should be in the range 0...63 */
1901 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1902 return ICE_ERR_OUT_OF_RANGE;
1904 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1906 cmd = &desc.params.del_rule;
1907 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1908 cmd->rule_id = CPU_TO_LE16(rule_id);
1911 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1913 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1917 * ice_aq_alloc_free_vsi_list
1918 * @hw: pointer to the HW struct
1919 * @vsi_list_id: VSI list ID returned or used for lookup
1920 * @lkup_type: switch rule filter lookup type
1921 * @opc: switch rules population command type - pass in the command opcode
1923 * allocates or free a VSI list resource
1925 static enum ice_status
1926 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1927 enum ice_sw_lkup_type lkup_type,
1928 enum ice_adminq_opc opc)
1930 struct ice_aqc_alloc_free_res_elem *sw_buf;
1931 struct ice_aqc_res_elem *vsi_ele;
1932 enum ice_status status;
1935 buf_len = sizeof(*sw_buf);
1936 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1937 ice_malloc(hw, buf_len);
1939 return ICE_ERR_NO_MEMORY;
1940 sw_buf->num_elems = CPU_TO_LE16(1);
1942 if (lkup_type == ICE_SW_LKUP_MAC ||
1943 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1944 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1945 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1946 lkup_type == ICE_SW_LKUP_PROMISC ||
1947 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1948 lkup_type == ICE_SW_LKUP_LAST) {
1949 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1950 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1952 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1954 status = ICE_ERR_PARAM;
1955 goto ice_aq_alloc_free_vsi_list_exit;
1958 if (opc == ice_aqc_opc_free_res)
1959 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1961 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1963 goto ice_aq_alloc_free_vsi_list_exit;
1965 if (opc == ice_aqc_opc_alloc_res) {
1966 vsi_ele = &sw_buf->elem[0];
1967 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1970 ice_aq_alloc_free_vsi_list_exit:
1971 ice_free(hw, sw_buf);
1976 * ice_aq_set_storm_ctrl - Sets storm control configuration
1977 * @hw: pointer to the HW struct
1978 * @bcast_thresh: represents the upper threshold for broadcast storm control
1979 * @mcast_thresh: represents the upper threshold for multicast storm control
1980 * @ctl_bitmask: storm control control knobs
1982 * Sets the storm control configuration (0x0280)
1985 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1988 struct ice_aqc_storm_cfg *cmd;
1989 struct ice_aq_desc desc;
1991 cmd = &desc.params.storm_conf;
1993 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1995 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1996 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1997 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1999 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2003 * ice_aq_get_storm_ctrl - gets storm control configuration
2004 * @hw: pointer to the HW struct
2005 * @bcast_thresh: represents the upper threshold for broadcast storm control
2006 * @mcast_thresh: represents the upper threshold for multicast storm control
2007 * @ctl_bitmask: storm control control knobs
2009 * Gets the storm control configuration (0x0281)
2012 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2015 enum ice_status status;
2016 struct ice_aq_desc desc;
2018 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2020 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2022 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2025 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2028 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2031 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2038 * ice_aq_sw_rules - add/update/remove switch rules
2039 * @hw: pointer to the HW struct
2040 * @rule_list: pointer to switch rule population list
2041 * @rule_list_sz: total size of the rule list in bytes
2042 * @num_rules: number of switch rules in the rule_list
2043 * @opc: switch rules population command type - pass in the command opcode
2044 * @cd: pointer to command details structure or NULL
2046 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2048 static enum ice_status
2049 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2050 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2052 struct ice_aq_desc desc;
2053 enum ice_status status;
2055 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2057 if (opc != ice_aqc_opc_add_sw_rules &&
2058 opc != ice_aqc_opc_update_sw_rules &&
2059 opc != ice_aqc_opc_remove_sw_rules)
2060 return ICE_ERR_PARAM;
2062 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2064 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2065 desc.params.sw_rules.num_rules_fltr_entry_index =
2066 CPU_TO_LE16(num_rules);
2067 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2068 if (opc != ice_aqc_opc_add_sw_rules &&
2069 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2070 status = ICE_ERR_DOES_NOT_EXIST;
2076 * ice_aq_add_recipe - add switch recipe
2077 * @hw: pointer to the HW struct
2078 * @s_recipe_list: pointer to switch rule population list
2079 * @num_recipes: number of switch recipes in the list
2080 * @cd: pointer to command details structure or NULL
2085 ice_aq_add_recipe(struct ice_hw *hw,
2086 struct ice_aqc_recipe_data_elem *s_recipe_list,
2087 u16 num_recipes, struct ice_sq_cd *cd)
2089 struct ice_aqc_add_get_recipe *cmd;
2090 struct ice_aq_desc desc;
2093 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2094 cmd = &desc.params.add_get_recipe;
2095 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2097 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2098 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2100 buf_size = num_recipes * sizeof(*s_recipe_list);
2102 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2106 * ice_aq_get_recipe - get switch recipe
2107 * @hw: pointer to the HW struct
2108 * @s_recipe_list: pointer to switch rule population list
2109 * @num_recipes: pointer to the number of recipes (input and output)
2110 * @recipe_root: root recipe number of recipe(s) to retrieve
2111 * @cd: pointer to command details structure or NULL
2115 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2116 * On output, *num_recipes will equal the number of entries returned in
2119 * The caller must supply enough space in s_recipe_list to hold all possible
2120 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2123 ice_aq_get_recipe(struct ice_hw *hw,
2124 struct ice_aqc_recipe_data_elem *s_recipe_list,
2125 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2127 struct ice_aqc_add_get_recipe *cmd;
2128 struct ice_aq_desc desc;
2129 enum ice_status status;
2132 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2133 return ICE_ERR_PARAM;
2135 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2136 cmd = &desc.params.add_get_recipe;
2137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2139 cmd->return_index = CPU_TO_LE16(recipe_root);
2140 cmd->num_sub_recipes = 0;
2142 buf_size = *num_recipes * sizeof(*s_recipe_list);
2144 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2145 /* cppcheck-suppress constArgument */
2146 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2152 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2153 * @hw: pointer to the HW struct
2154 * @profile_id: package profile ID to associate the recipe with
2155 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2156 * @cd: pointer to command details structure or NULL
2157 * Recipe to profile association (0x0291)
2160 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2161 struct ice_sq_cd *cd)
2163 struct ice_aqc_recipe_to_profile *cmd;
2164 struct ice_aq_desc desc;
2166 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2167 cmd = &desc.params.recipe_to_profile;
2168 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2169 cmd->profile_id = CPU_TO_LE16(profile_id);
2170 /* Set the recipe ID bit in the bitmask to let the device know which
2171 * profile we are associating the recipe to
2173 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2174 ICE_NONDMA_TO_NONDMA);
2176 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2180 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2181 * @hw: pointer to the HW struct
2182 * @profile_id: package profile ID to associate the recipe with
2183 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2184 * @cd: pointer to command details structure or NULL
2185 * Associate profile ID with given recipe (0x0293)
2188 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2189 struct ice_sq_cd *cd)
2191 struct ice_aqc_recipe_to_profile *cmd;
2192 struct ice_aq_desc desc;
2193 enum ice_status status;
2195 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2196 cmd = &desc.params.recipe_to_profile;
2197 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2198 cmd->profile_id = CPU_TO_LE16(profile_id);
2200 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2202 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2203 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2209 * ice_alloc_recipe - add recipe resource
2210 * @hw: pointer to the hardware structure
2211 * @rid: recipe ID returned as response to AQ call
2213 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2215 struct ice_aqc_alloc_free_res_elem *sw_buf;
2216 enum ice_status status;
2219 buf_len = sizeof(*sw_buf);
2220 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2222 return ICE_ERR_NO_MEMORY;
2224 sw_buf->num_elems = CPU_TO_LE16(1);
2225 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2226 ICE_AQC_RES_TYPE_S) |
2227 ICE_AQC_RES_TYPE_FLAG_SHARED);
2228 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2229 ice_aqc_opc_alloc_res, NULL);
2231 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2232 ice_free(hw, sw_buf);
2237 /* ice_init_port_info - Initialize port_info with switch configuration data
2238 * @pi: pointer to port_info
2239 * @vsi_port_num: VSI number or port number
2240 * @type: Type of switch element (port or VSI)
2241 * @swid: switch ID of the switch the element is attached to
2242 * @pf_vf_num: PF or VF number
2243 * @is_vf: true if the element is a VF, false otherwise
2246 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2247 u16 swid, u16 pf_vf_num, bool is_vf)
2250 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2251 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2253 pi->pf_vf_num = pf_vf_num;
2255 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2256 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2259 ice_debug(pi->hw, ICE_DBG_SW,
2260 "incorrect VSI/port type received\n");
2265 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2266 * @hw: pointer to the hardware structure
2268 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2270 struct ice_aqc_get_sw_cfg_resp *rbuf;
2271 enum ice_status status;
2278 num_total_ports = 1;
2280 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2281 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2284 return ICE_ERR_NO_MEMORY;
2286 /* Multiple calls to ice_aq_get_sw_cfg may be required
2287 * to get all the switch configuration information. The need
2288 * for additional calls is indicated by ice_aq_get_sw_cfg
2289 * writing a non-zero value in req_desc
2292 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2293 &req_desc, &num_elems, NULL);
2298 for (i = 0; i < num_elems; i++) {
2299 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2300 u16 pf_vf_num, swid, vsi_port_num;
2304 ele = rbuf[i].elements;
2305 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2306 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2308 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2309 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2311 swid = LE16_TO_CPU(ele->swid);
2313 if (LE16_TO_CPU(ele->pf_vf_num) &
2314 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2317 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2318 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2321 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2322 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2323 if (j == num_total_ports) {
2324 ice_debug(hw, ICE_DBG_SW,
2325 "more ports than expected\n");
2326 status = ICE_ERR_CFG;
2329 ice_init_port_info(hw->port_info,
2330 vsi_port_num, res_type, swid,
2338 } while (req_desc && !status);
2341 ice_free(hw, (void *)rbuf);
2346 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2347 * @hw: pointer to the hardware structure
2348 * @fi: filter info structure to fill/update
2350 * This helper function populates the lb_en and lan_en elements of the provided
2351 * ice_fltr_info struct using the switch's type and characteristics of the
2352 * switch rule being configured.
2354 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2356 if ((fi->flag & ICE_FLTR_RX) &&
2357 (fi->fltr_act == ICE_FWD_TO_VSI ||
2358 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2359 fi->lkup_type == ICE_SW_LKUP_LAST)
2363 if ((fi->flag & ICE_FLTR_TX) &&
2364 (fi->fltr_act == ICE_FWD_TO_VSI ||
2365 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2366 fi->fltr_act == ICE_FWD_TO_Q ||
2367 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2368 /* Setting LB for prune actions will result in replicated
2369 * packets to the internal switch that will be dropped.
2371 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2374 /* Set lan_en to TRUE if
2375 * 1. The switch is a VEB AND
2377 * 2.1 The lookup is a directional lookup like ethertype,
2378 * promiscuous, ethertype-MAC, promiscuous-VLAN
2379 * and default-port OR
2380 * 2.2 The lookup is VLAN, OR
2381 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2382 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2386 * The switch is a VEPA.
2388 * In all other cases, the LAN enable has to be set to false.
2391 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2392 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2393 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2394 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2395 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2396 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2397 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2398 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2399 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2400 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2409 * ice_fill_sw_rule - Helper function to fill switch rule structure
2410 * @hw: pointer to the hardware structure
2411 * @f_info: entry containing packet forwarding information
2412 * @s_rule: switch rule structure to be filled in based on mac_entry
2413 * @opc: switch rules population command type - pass in the command opcode
2416 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2417 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2419 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2427 if (opc == ice_aqc_opc_remove_sw_rules) {
2428 s_rule->pdata.lkup_tx_rx.act = 0;
2429 s_rule->pdata.lkup_tx_rx.index =
2430 CPU_TO_LE16(f_info->fltr_rule_id);
2431 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2435 eth_hdr_sz = sizeof(dummy_eth_header);
2436 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2438 /* initialize the ether header with a dummy header */
2439 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2440 ice_fill_sw_info(hw, f_info);
2442 switch (f_info->fltr_act) {
2443 case ICE_FWD_TO_VSI:
2444 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2445 ICE_SINGLE_ACT_VSI_ID_M;
2446 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2447 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2448 ICE_SINGLE_ACT_VALID_BIT;
2450 case ICE_FWD_TO_VSI_LIST:
2451 act |= ICE_SINGLE_ACT_VSI_LIST;
2452 act |= (f_info->fwd_id.vsi_list_id <<
2453 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2454 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2455 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2456 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2457 ICE_SINGLE_ACT_VALID_BIT;
2460 act |= ICE_SINGLE_ACT_TO_Q;
2461 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2462 ICE_SINGLE_ACT_Q_INDEX_M;
2464 case ICE_DROP_PACKET:
2465 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2466 ICE_SINGLE_ACT_VALID_BIT;
2468 case ICE_FWD_TO_QGRP:
2469 q_rgn = f_info->qgrp_size > 0 ?
2470 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2471 act |= ICE_SINGLE_ACT_TO_Q;
2472 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2473 ICE_SINGLE_ACT_Q_INDEX_M;
2474 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2475 ICE_SINGLE_ACT_Q_REGION_M;
2482 act |= ICE_SINGLE_ACT_LB_ENABLE;
2484 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2486 switch (f_info->lkup_type) {
2487 case ICE_SW_LKUP_MAC:
2488 daddr = f_info->l_data.mac.mac_addr;
2490 case ICE_SW_LKUP_VLAN:
2491 vlan_id = f_info->l_data.vlan.vlan_id;
2492 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2493 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2494 act |= ICE_SINGLE_ACT_PRUNE;
2495 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2498 case ICE_SW_LKUP_ETHERTYPE_MAC:
2499 daddr = f_info->l_data.ethertype_mac.mac_addr;
2501 case ICE_SW_LKUP_ETHERTYPE:
2502 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2503 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2505 case ICE_SW_LKUP_MAC_VLAN:
2506 daddr = f_info->l_data.mac_vlan.mac_addr;
2507 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2509 case ICE_SW_LKUP_PROMISC_VLAN:
2510 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2512 case ICE_SW_LKUP_PROMISC:
2513 daddr = f_info->l_data.mac_vlan.mac_addr;
2519 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2520 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2521 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2523 /* Recipe set depending on lookup type */
2524 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2525 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2526 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2529 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2530 ICE_NONDMA_TO_NONDMA);
2532 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2533 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2534 *off = CPU_TO_BE16(vlan_id);
2537 /* Create the switch rule with the final dummy Ethernet header */
2538 if (opc != ice_aqc_opc_update_sw_rules)
2539 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2543 * ice_add_marker_act
2544 * @hw: pointer to the hardware structure
2545 * @m_ent: the management entry for which sw marker needs to be added
2546 * @sw_marker: sw marker to tag the Rx descriptor with
2547 * @l_id: large action resource ID
2549 * Create a large action to hold software marker and update the switch rule
2550 * entry pointed by m_ent with newly created large action
2552 static enum ice_status
2553 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2554 u16 sw_marker, u16 l_id)
2556 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2557 /* For software marker we need 3 large actions
2558 * 1. FWD action: FWD TO VSI or VSI LIST
2559 * 2. GENERIC VALUE action to hold the profile ID
2560 * 3. GENERIC VALUE action to hold the software marker ID
2562 const u16 num_lg_acts = 3;
2563 enum ice_status status;
2569 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2570 return ICE_ERR_PARAM;
2572 /* Create two back-to-back switch rules and submit them to the HW using
2573 * one memory buffer:
2577 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2578 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2579 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2581 return ICE_ERR_NO_MEMORY;
2583 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2585 /* Fill in the first switch rule i.e. large action */
2586 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2587 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2588 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2590 /* First action VSI forwarding or VSI list forwarding depending on how
2593 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2594 m_ent->fltr_info.fwd_id.hw_vsi_id;
2596 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2597 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2598 ICE_LG_ACT_VSI_LIST_ID_M;
2599 if (m_ent->vsi_count > 1)
2600 act |= ICE_LG_ACT_VSI_LIST;
2601 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2603 /* Second action descriptor type */
2604 act = ICE_LG_ACT_GENERIC;
2606 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2607 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2609 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2610 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2612 /* Third action Marker value */
2613 act |= ICE_LG_ACT_GENERIC;
2614 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2615 ICE_LG_ACT_GENERIC_VALUE_M;
2617 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2619 /* call the fill switch rule to fill the lookup Tx Rx structure */
2620 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2621 ice_aqc_opc_update_sw_rules);
2623 /* Update the action to point to the large action ID */
2624 rx_tx->pdata.lkup_tx_rx.act =
2625 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2626 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2627 ICE_SINGLE_ACT_PTR_VAL_M));
2629 /* Use the filter rule ID of the previously created rule with single
2630 * act. Once the update happens, hardware will treat this as large
2633 rx_tx->pdata.lkup_tx_rx.index =
2634 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2636 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2637 ice_aqc_opc_update_sw_rules, NULL);
2639 m_ent->lg_act_idx = l_id;
2640 m_ent->sw_marker_id = sw_marker;
2643 ice_free(hw, lg_act);
2648 * ice_add_counter_act - add/update filter rule with counter action
2649 * @hw: pointer to the hardware structure
2650 * @m_ent: the management entry for which counter needs to be added
2651 * @counter_id: VLAN counter ID returned as part of allocate resource
2652 * @l_id: large action resource ID
2654 static enum ice_status
2655 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2656 u16 counter_id, u16 l_id)
2658 struct ice_aqc_sw_rules_elem *lg_act;
2659 struct ice_aqc_sw_rules_elem *rx_tx;
2660 enum ice_status status;
2661 /* 2 actions will be added while adding a large action counter */
2662 const int num_acts = 2;
2669 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2670 return ICE_ERR_PARAM;
2672 /* Create two back-to-back switch rules and submit them to the HW using
2673 * one memory buffer:
2677 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2678 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2679 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2682 return ICE_ERR_NO_MEMORY;
2684 rx_tx = (struct ice_aqc_sw_rules_elem *)
2685 ((u8 *)lg_act + lg_act_size);
2687 /* Fill in the first switch rule i.e. large action */
2688 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2689 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2690 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2692 /* First action VSI forwarding or VSI list forwarding depending on how
2695 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2696 m_ent->fltr_info.fwd_id.hw_vsi_id;
2698 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2699 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2700 ICE_LG_ACT_VSI_LIST_ID_M;
2701 if (m_ent->vsi_count > 1)
2702 act |= ICE_LG_ACT_VSI_LIST;
2703 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2705 /* Second action counter ID */
2706 act = ICE_LG_ACT_STAT_COUNT;
2707 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2708 ICE_LG_ACT_STAT_COUNT_M;
2709 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2711 /* call the fill switch rule to fill the lookup Tx Rx structure */
2712 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2713 ice_aqc_opc_update_sw_rules);
2715 act = ICE_SINGLE_ACT_PTR;
2716 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2717 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2719 /* Use the filter rule ID of the previously created rule with single
2720 * act. Once the update happens, hardware will treat this as large
2723 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2724 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2726 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2727 ice_aqc_opc_update_sw_rules, NULL);
2729 m_ent->lg_act_idx = l_id;
2730 m_ent->counter_index = counter_id;
2733 ice_free(hw, lg_act);
2738 * ice_create_vsi_list_map
2739 * @hw: pointer to the hardware structure
2740 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2741 * @num_vsi: number of VSI handles in the array
2742 * @vsi_list_id: VSI list ID generated as part of allocate resource
2744 * Helper function to create a new entry of VSI list ID to VSI mapping
2745 * using the given VSI list ID
2747 static struct ice_vsi_list_map_info *
2748 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2751 struct ice_switch_info *sw = hw->switch_info;
2752 struct ice_vsi_list_map_info *v_map;
2755 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2760 v_map->vsi_list_id = vsi_list_id;
2762 for (i = 0; i < num_vsi; i++)
2763 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2765 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2770 * ice_update_vsi_list_rule
2771 * @hw: pointer to the hardware structure
2772 * @vsi_handle_arr: array of VSI handles to form a VSI list
2773 * @num_vsi: number of VSI handles in the array
2774 * @vsi_list_id: VSI list ID generated as part of allocate resource
2775 * @remove: Boolean value to indicate if this is a remove action
2776 * @opc: switch rules population command type - pass in the command opcode
2777 * @lkup_type: lookup type of the filter
2779 * Call AQ command to add a new switch rule or update existing switch rule
2780 * using the given VSI list ID
2782 static enum ice_status
2783 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2784 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2785 enum ice_sw_lkup_type lkup_type)
2787 struct ice_aqc_sw_rules_elem *s_rule;
2788 enum ice_status status;
2794 return ICE_ERR_PARAM;
2796 if (lkup_type == ICE_SW_LKUP_MAC ||
2797 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2798 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2799 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2800 lkup_type == ICE_SW_LKUP_PROMISC ||
2801 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2802 lkup_type == ICE_SW_LKUP_LAST)
2803 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2804 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2805 else if (lkup_type == ICE_SW_LKUP_VLAN)
2806 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2807 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2809 return ICE_ERR_PARAM;
2811 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2812 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2814 return ICE_ERR_NO_MEMORY;
2815 for (i = 0; i < num_vsi; i++) {
2816 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2817 status = ICE_ERR_PARAM;
2820 /* AQ call requires hw_vsi_id(s) */
2821 s_rule->pdata.vsi_list.vsi[i] =
2822 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2825 s_rule->type = CPU_TO_LE16(rule_type);
2826 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2827 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2829 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2832 ice_free(hw, s_rule);
2837 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2838 * @hw: pointer to the HW struct
2839 * @vsi_handle_arr: array of VSI handles to form a VSI list
2840 * @num_vsi: number of VSI handles in the array
2841 * @vsi_list_id: stores the ID of the VSI list to be created
2842 * @lkup_type: switch rule filter's lookup type
2844 static enum ice_status
2845 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2846 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2848 enum ice_status status;
2850 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2851 ice_aqc_opc_alloc_res);
2855 /* Update the newly created VSI list to include the specified VSIs */
2856 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2857 *vsi_list_id, false,
2858 ice_aqc_opc_add_sw_rules, lkup_type);
2862 * ice_create_pkt_fwd_rule
2863 * @hw: pointer to the hardware structure
2864 * @recp_list: corresponding filter management list
2865 * @f_entry: entry containing packet forwarding information
2867 * Create switch rule with given filter information and add an entry
2868 * to the corresponding filter management list to track this switch rule
2871 static enum ice_status
2872 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2873 struct ice_fltr_list_entry *f_entry)
2875 struct ice_fltr_mgmt_list_entry *fm_entry;
2876 struct ice_aqc_sw_rules_elem *s_rule;
2877 enum ice_status status;
2879 s_rule = (struct ice_aqc_sw_rules_elem *)
2880 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2882 return ICE_ERR_NO_MEMORY;
2883 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2884 ice_malloc(hw, sizeof(*fm_entry));
2886 status = ICE_ERR_NO_MEMORY;
2887 goto ice_create_pkt_fwd_rule_exit;
2890 fm_entry->fltr_info = f_entry->fltr_info;
2892 /* Initialize all the fields for the management entry */
2893 fm_entry->vsi_count = 1;
2894 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2895 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2896 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2898 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2899 ice_aqc_opc_add_sw_rules);
2901 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2902 ice_aqc_opc_add_sw_rules, NULL);
2904 ice_free(hw, fm_entry);
2905 goto ice_create_pkt_fwd_rule_exit;
2908 f_entry->fltr_info.fltr_rule_id =
2909 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2910 fm_entry->fltr_info.fltr_rule_id =
2911 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2913 /* The book keeping entries will get removed when base driver
2914 * calls remove filter AQ command
2916 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2918 ice_create_pkt_fwd_rule_exit:
2919 ice_free(hw, s_rule);
2924 * ice_update_pkt_fwd_rule
2925 * @hw: pointer to the hardware structure
2926 * @f_info: filter information for switch rule
2928 * Call AQ command to update a previously created switch rule with a
2931 static enum ice_status
2932 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2934 struct ice_aqc_sw_rules_elem *s_rule;
2935 enum ice_status status;
2937 s_rule = (struct ice_aqc_sw_rules_elem *)
2938 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2940 return ICE_ERR_NO_MEMORY;
2942 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2944 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2946 /* Update switch rule with new rule set to forward VSI list */
2947 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2948 ice_aqc_opc_update_sw_rules, NULL);
2950 ice_free(hw, s_rule);
2955 * ice_update_sw_rule_bridge_mode
2956 * @hw: pointer to the HW struct
2958 * Updates unicast switch filter rules based on VEB/VEPA mode
2960 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2962 struct ice_switch_info *sw = hw->switch_info;
2963 struct ice_fltr_mgmt_list_entry *fm_entry;
2964 enum ice_status status = ICE_SUCCESS;
2965 struct LIST_HEAD_TYPE *rule_head;
2966 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2968 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2969 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2971 ice_acquire_lock(rule_lock);
2972 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2974 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2975 u8 *addr = fi->l_data.mac.mac_addr;
2977 /* Update unicast Tx rules to reflect the selected
2980 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2981 (fi->fltr_act == ICE_FWD_TO_VSI ||
2982 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2983 fi->fltr_act == ICE_FWD_TO_Q ||
2984 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2985 status = ice_update_pkt_fwd_rule(hw, fi);
2991 ice_release_lock(rule_lock);
2997 * ice_add_update_vsi_list
2998 * @hw: pointer to the hardware structure
2999 * @m_entry: pointer to current filter management list entry
3000 * @cur_fltr: filter information from the book keeping entry
3001 * @new_fltr: filter information with the new VSI to be added
3003 * Call AQ command to add or update previously created VSI list with new VSI.
3005 * Helper function to do book keeping associated with adding filter information
3006 * The algorithm to do the book keeping is described below :
3007 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3008 * if only one VSI has been added till now
3009 * Allocate a new VSI list and add two VSIs
3010 * to this list using switch rule command
3011 * Update the previously created switch rule with the
3012 * newly created VSI list ID
3013 * if a VSI list was previously created
3014 * Add the new VSI to the previously created VSI list set
3015 * using the update switch rule command
3017 static enum ice_status
3018 ice_add_update_vsi_list(struct ice_hw *hw,
3019 struct ice_fltr_mgmt_list_entry *m_entry,
3020 struct ice_fltr_info *cur_fltr,
3021 struct ice_fltr_info *new_fltr)
3023 enum ice_status status = ICE_SUCCESS;
3024 u16 vsi_list_id = 0;
3026 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3027 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3028 return ICE_ERR_NOT_IMPL;
3030 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3031 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3032 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3033 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3034 return ICE_ERR_NOT_IMPL;
3036 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3037 /* Only one entry existed in the mapping and it was not already
3038 * a part of a VSI list. So, create a VSI list with the old and
3041 struct ice_fltr_info tmp_fltr;
3042 u16 vsi_handle_arr[2];
3044 /* A rule already exists with the new VSI being added */
3045 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3046 return ICE_ERR_ALREADY_EXISTS;
3048 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3049 vsi_handle_arr[1] = new_fltr->vsi_handle;
3050 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3052 new_fltr->lkup_type);
3056 tmp_fltr = *new_fltr;
3057 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3058 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3059 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3060 /* Update the previous switch rule of "MAC forward to VSI" to
3061 * "MAC fwd to VSI list"
3063 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3067 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3068 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3069 m_entry->vsi_list_info =
3070 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3073 /* If this entry was large action then the large action needs
3074 * to be updated to point to FWD to VSI list
3076 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3078 ice_add_marker_act(hw, m_entry,
3079 m_entry->sw_marker_id,
3080 m_entry->lg_act_idx);
3082 u16 vsi_handle = new_fltr->vsi_handle;
3083 enum ice_adminq_opc opcode;
3085 if (!m_entry->vsi_list_info)
3088 /* A rule already exists with the new VSI being added */
3089 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3092 /* Update the previously created VSI list set with
3093 * the new VSI ID passed in
3095 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3096 opcode = ice_aqc_opc_update_sw_rules;
3098 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3099 vsi_list_id, false, opcode,
3100 new_fltr->lkup_type);
3101 /* update VSI list mapping info with new VSI ID */
3103 ice_set_bit(vsi_handle,
3104 m_entry->vsi_list_info->vsi_map);
3107 m_entry->vsi_count++;
3112 * ice_find_rule_entry - Search a rule entry
3113 * @list_head: head of rule list
3114 * @f_info: rule information
3116 * Helper function to search for a given rule entry
3117 * Returns pointer to entry storing the rule if found
3119 static struct ice_fltr_mgmt_list_entry *
3120 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3121 struct ice_fltr_info *f_info)
3123 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3125 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3127 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3128 sizeof(f_info->l_data)) &&
3129 f_info->flag == list_itr->fltr_info.flag) {
3138 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3139 * @recp_list: VSI lists needs to be searched
3140 * @vsi_handle: VSI handle to be found in VSI list
3141 * @vsi_list_id: VSI list ID found containing vsi_handle
3143 * Helper function to search a VSI list with single entry containing given VSI
3144 * handle element. This can be extended further to search VSI list with more
3145 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3147 static struct ice_vsi_list_map_info *
3148 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3151 struct ice_vsi_list_map_info *map_info = NULL;
3152 struct LIST_HEAD_TYPE *list_head;
3154 list_head = &recp_list->filt_rules;
3155 if (recp_list->adv_rule) {
3156 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3158 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3159 ice_adv_fltr_mgmt_list_entry,
3161 if (list_itr->vsi_list_info) {
3162 map_info = list_itr->vsi_list_info;
3163 if (ice_is_bit_set(map_info->vsi_map,
3165 *vsi_list_id = map_info->vsi_list_id;
3171 struct ice_fltr_mgmt_list_entry *list_itr;
3173 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3174 ice_fltr_mgmt_list_entry,
3176 if (list_itr->vsi_count == 1 &&
3177 list_itr->vsi_list_info) {
3178 map_info = list_itr->vsi_list_info;
3179 if (ice_is_bit_set(map_info->vsi_map,
3181 *vsi_list_id = map_info->vsi_list_id;
3191 * ice_add_rule_internal - add rule for a given lookup type
3192 * @hw: pointer to the hardware structure
3193 * @recp_list: recipe list for which rule has to be added
3194 * @lport: logic port number on which function add rule
3195 * @f_entry: structure containing MAC forwarding information
3197 * Adds or updates the rule lists for a given recipe
3199 static enum ice_status
3200 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3201 u8 lport, struct ice_fltr_list_entry *f_entry)
3203 struct ice_fltr_info *new_fltr, *cur_fltr;
3204 struct ice_fltr_mgmt_list_entry *m_entry;
3205 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3206 enum ice_status status = ICE_SUCCESS;
3208 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3209 return ICE_ERR_PARAM;
3211 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3212 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3213 f_entry->fltr_info.fwd_id.hw_vsi_id =
3214 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3216 rule_lock = &recp_list->filt_rule_lock;
3218 ice_acquire_lock(rule_lock);
3219 new_fltr = &f_entry->fltr_info;
3220 if (new_fltr->flag & ICE_FLTR_RX)
3221 new_fltr->src = lport;
3222 else if (new_fltr->flag & ICE_FLTR_TX)
3224 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3226 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3228 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3229 goto exit_add_rule_internal;
3232 cur_fltr = &m_entry->fltr_info;
3233 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3235 exit_add_rule_internal:
3236 ice_release_lock(rule_lock);
3241 * ice_remove_vsi_list_rule
3242 * @hw: pointer to the hardware structure
3243 * @vsi_list_id: VSI list ID generated as part of allocate resource
3244 * @lkup_type: switch rule filter lookup type
3246 * The VSI list should be emptied before this function is called to remove the
3249 static enum ice_status
3250 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3251 enum ice_sw_lkup_type lkup_type)
3253 struct ice_aqc_sw_rules_elem *s_rule;
3254 enum ice_status status;
3257 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3258 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3260 return ICE_ERR_NO_MEMORY;
3262 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3263 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3265 /* Free the vsi_list resource that we allocated. It is assumed that the
3266 * list is empty at this point.
3268 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3269 ice_aqc_opc_free_res);
3271 ice_free(hw, s_rule);
3276 * ice_rem_update_vsi_list
3277 * @hw: pointer to the hardware structure
3278 * @vsi_handle: VSI handle of the VSI to remove
3279 * @fm_list: filter management entry for which the VSI list management needs to
3282 static enum ice_status
3283 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3284 struct ice_fltr_mgmt_list_entry *fm_list)
3286 enum ice_sw_lkup_type lkup_type;
3287 enum ice_status status = ICE_SUCCESS;
3290 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3291 fm_list->vsi_count == 0)
3292 return ICE_ERR_PARAM;
3294 /* A rule with the VSI being removed does not exist */
3295 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3296 return ICE_ERR_DOES_NOT_EXIST;
3298 lkup_type = fm_list->fltr_info.lkup_type;
3299 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3300 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3301 ice_aqc_opc_update_sw_rules,
3306 fm_list->vsi_count--;
3307 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3309 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3310 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3311 struct ice_vsi_list_map_info *vsi_list_info =
3312 fm_list->vsi_list_info;
3315 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3317 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3318 return ICE_ERR_OUT_OF_RANGE;
3320 /* Make sure VSI list is empty before removing it below */
3321 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3323 ice_aqc_opc_update_sw_rules,
3328 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3329 tmp_fltr_info.fwd_id.hw_vsi_id =
3330 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3331 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3332 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3334 ice_debug(hw, ICE_DBG_SW,
3335 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3336 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3340 fm_list->fltr_info = tmp_fltr_info;
3343 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3344 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3345 struct ice_vsi_list_map_info *vsi_list_info =
3346 fm_list->vsi_list_info;
3348 /* Remove the VSI list since it is no longer used */
3349 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3351 ice_debug(hw, ICE_DBG_SW,
3352 "Failed to remove VSI list %d, error %d\n",
3353 vsi_list_id, status);
3357 LIST_DEL(&vsi_list_info->list_entry);
3358 ice_free(hw, vsi_list_info);
3359 fm_list->vsi_list_info = NULL;
3366 * ice_remove_rule_internal - Remove a filter rule of a given type
3368 * @hw: pointer to the hardware structure
3369 * @recp_list: recipe list for which the rule needs to removed
3370 * @f_entry: rule entry containing filter information
3372 static enum ice_status
3373 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3374 struct ice_fltr_list_entry *f_entry)
3376 struct ice_fltr_mgmt_list_entry *list_elem;
3377 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3378 enum ice_status status = ICE_SUCCESS;
3379 bool remove_rule = false;
3382 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3383 return ICE_ERR_PARAM;
3384 f_entry->fltr_info.fwd_id.hw_vsi_id =
3385 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3387 rule_lock = &recp_list->filt_rule_lock;
3388 ice_acquire_lock(rule_lock);
3389 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3390 &f_entry->fltr_info);
3392 status = ICE_ERR_DOES_NOT_EXIST;
3396 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3398 } else if (!list_elem->vsi_list_info) {
3399 status = ICE_ERR_DOES_NOT_EXIST;
3401 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3402 /* a ref_cnt > 1 indicates that the vsi_list is being
3403 * shared by multiple rules. Decrement the ref_cnt and
3404 * remove this rule, but do not modify the list, as it
3405 * is in-use by other rules.
3407 list_elem->vsi_list_info->ref_cnt--;
3410 /* a ref_cnt of 1 indicates the vsi_list is only used
3411 * by one rule. However, the original removal request is only
3412 * for a single VSI. Update the vsi_list first, and only
3413 * remove the rule if there are no further VSIs in this list.
3415 vsi_handle = f_entry->fltr_info.vsi_handle;
3416 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3419 /* if VSI count goes to zero after updating the VSI list */
3420 if (list_elem->vsi_count == 0)
3425 /* Remove the lookup rule */
3426 struct ice_aqc_sw_rules_elem *s_rule;
3428 s_rule = (struct ice_aqc_sw_rules_elem *)
3429 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3431 status = ICE_ERR_NO_MEMORY;
3435 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3436 ice_aqc_opc_remove_sw_rules);
3438 status = ice_aq_sw_rules(hw, s_rule,
3439 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3440 ice_aqc_opc_remove_sw_rules, NULL);
3442 /* Remove a book keeping from the list */
3443 ice_free(hw, s_rule);
3448 LIST_DEL(&list_elem->list_entry);
3449 ice_free(hw, list_elem);
3452 ice_release_lock(rule_lock);
3457 * ice_aq_get_res_alloc - get allocated resources
3458 * @hw: pointer to the HW struct
3459 * @num_entries: pointer to u16 to store the number of resource entries returned
3460 * @buf: pointer to user-supplied buffer
3461 * @buf_size: size of buff
3462 * @cd: pointer to command details structure or NULL
3464 * The user-supplied buffer must be large enough to store the resource
3465 * information for all resource types. Each resource type is an
3466 * ice_aqc_get_res_resp_data_elem structure.
3469 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3470 u16 buf_size, struct ice_sq_cd *cd)
3472 struct ice_aqc_get_res_alloc *resp;
3473 enum ice_status status;
3474 struct ice_aq_desc desc;
3477 return ICE_ERR_BAD_PTR;
3479 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3480 return ICE_ERR_INVAL_SIZE;
3482 resp = &desc.params.get_res;
3484 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3485 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3487 if (!status && num_entries)
3488 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3494 * ice_aq_get_res_descs - get allocated resource descriptors
3495 * @hw: pointer to the hardware structure
3496 * @num_entries: number of resource entries in buffer
3497 * @buf: Indirect buffer to hold data parameters and response
3498 * @buf_size: size of buffer for indirect commands
3499 * @res_type: resource type
3500 * @res_shared: is resource shared
3501 * @desc_id: input - first desc ID to start; output - next desc ID
3502 * @cd: pointer to command details structure or NULL
3505 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3506 struct ice_aqc_get_allocd_res_desc_resp *buf,
3507 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3508 struct ice_sq_cd *cd)
3510 struct ice_aqc_get_allocd_res_desc *cmd;
3511 struct ice_aq_desc desc;
3512 enum ice_status status;
3514 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3516 cmd = &desc.params.get_res_desc;
3519 return ICE_ERR_PARAM;
3521 if (buf_size != (num_entries * sizeof(*buf)))
3522 return ICE_ERR_PARAM;
3524 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3526 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3527 ICE_AQC_RES_TYPE_M) | (res_shared ?
3528 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3529 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3531 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3533 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3539 * ice_add_mac_rule - Add a MAC address based filter rule
3540 * @hw: pointer to the hardware structure
3541 * @m_list: list of MAC addresses and forwarding information
3542 * @sw: pointer to switch info struct for which function add rule
3543 * @lport: logic port number on which function add rule
3545 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3546 * multiple unicast addresses, the function assumes that all the
3547 * addresses are unique in a given add_mac call. It doesn't
3548 * check for duplicates in this case, removing duplicates from a given
3549 * list should be taken care of in the caller of this function.
3551 static enum ice_status
3552 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3553 struct ice_switch_info *sw, u8 lport)
3555 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3556 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3557 struct ice_fltr_list_entry *m_list_itr;
3558 struct LIST_HEAD_TYPE *rule_head;
3559 u16 total_elem_left, s_rule_size;
3560 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3561 enum ice_status status = ICE_SUCCESS;
3562 u16 num_unicast = 0;
3566 rule_lock = &recp_list->filt_rule_lock;
3567 rule_head = &recp_list->filt_rules;
3569 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3571 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3575 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3576 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3577 if (!ice_is_vsi_valid(hw, vsi_handle))
3578 return ICE_ERR_PARAM;
3579 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3580 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3581 /* update the src in case it is VSI num */
3582 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3583 return ICE_ERR_PARAM;
3584 m_list_itr->fltr_info.src = hw_vsi_id;
3585 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3586 IS_ZERO_ETHER_ADDR(add))
3587 return ICE_ERR_PARAM;
3588 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3589 /* Don't overwrite the unicast address */
3590 ice_acquire_lock(rule_lock);
3591 if (ice_find_rule_entry(rule_head,
3592 &m_list_itr->fltr_info)) {
3593 ice_release_lock(rule_lock);
3594 return ICE_ERR_ALREADY_EXISTS;
3596 ice_release_lock(rule_lock);
3598 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3599 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3600 m_list_itr->status =
3601 ice_add_rule_internal(hw, recp_list, lport,
3603 if (m_list_itr->status)
3604 return m_list_itr->status;
3608 ice_acquire_lock(rule_lock);
3609 /* Exit if no suitable entries were found for adding bulk switch rule */
3611 status = ICE_SUCCESS;
3612 goto ice_add_mac_exit;
3615 /* Allocate switch rule buffer for the bulk update for unicast */
3616 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3617 s_rule = (struct ice_aqc_sw_rules_elem *)
3618 ice_calloc(hw, num_unicast, s_rule_size);
3620 status = ICE_ERR_NO_MEMORY;
3621 goto ice_add_mac_exit;
3625 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3627 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3628 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3630 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3631 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3632 ice_aqc_opc_add_sw_rules);
3633 r_iter = (struct ice_aqc_sw_rules_elem *)
3634 ((u8 *)r_iter + s_rule_size);
3638 /* Call AQ bulk switch rule update for all unicast addresses */
3640 /* Call AQ switch rule in AQ_MAX chunk */
3641 for (total_elem_left = num_unicast; total_elem_left > 0;
3642 total_elem_left -= elem_sent) {
3643 struct ice_aqc_sw_rules_elem *entry = r_iter;
3645 elem_sent = MIN_T(u8, total_elem_left,
3646 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3647 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3648 elem_sent, ice_aqc_opc_add_sw_rules,
3651 goto ice_add_mac_exit;
3652 r_iter = (struct ice_aqc_sw_rules_elem *)
3653 ((u8 *)r_iter + (elem_sent * s_rule_size));
3656 /* Fill up rule ID based on the value returned from FW */
3658 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3660 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3661 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3662 struct ice_fltr_mgmt_list_entry *fm_entry;
3664 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3665 f_info->fltr_rule_id =
3666 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3667 f_info->fltr_act = ICE_FWD_TO_VSI;
3668 /* Create an entry to track this MAC address */
3669 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3670 ice_malloc(hw, sizeof(*fm_entry));
3672 status = ICE_ERR_NO_MEMORY;
3673 goto ice_add_mac_exit;
3675 fm_entry->fltr_info = *f_info;
3676 fm_entry->vsi_count = 1;
3677 /* The book keeping entries will get removed when
3678 * base driver calls remove filter AQ command
3681 LIST_ADD(&fm_entry->list_entry, rule_head);
3682 r_iter = (struct ice_aqc_sw_rules_elem *)
3683 ((u8 *)r_iter + s_rule_size);
3688 ice_release_lock(rule_lock);
3690 ice_free(hw, s_rule);
3695 * ice_add_mac - Add a MAC address based filter rule
3696 * @hw: pointer to the hardware structure
3697 * @m_list: list of MAC addresses and forwarding information
3699 * Function add MAC rule for logical port from HW struct
3701 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3704 return ICE_ERR_PARAM;
3706 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3707 hw->port_info->lport);
3711 * ice_add_vlan_internal - Add one VLAN based filter rule
3712 * @hw: pointer to the hardware structure
3713 * @recp_list: recipe list for which rule has to be added
3714 * @f_entry: filter entry containing one VLAN information
3716 static enum ice_status
3717 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3718 struct ice_fltr_list_entry *f_entry)
3720 struct ice_fltr_mgmt_list_entry *v_list_itr;
3721 struct ice_fltr_info *new_fltr, *cur_fltr;
3722 enum ice_sw_lkup_type lkup_type;
3723 u16 vsi_list_id = 0, vsi_handle;
3724 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3725 enum ice_status status = ICE_SUCCESS;
3727 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3728 return ICE_ERR_PARAM;
3730 f_entry->fltr_info.fwd_id.hw_vsi_id =
3731 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3732 new_fltr = &f_entry->fltr_info;
3734 /* VLAN ID should only be 12 bits */
3735 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3736 return ICE_ERR_PARAM;
3738 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3739 return ICE_ERR_PARAM;
3741 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3742 lkup_type = new_fltr->lkup_type;
3743 vsi_handle = new_fltr->vsi_handle;
3744 rule_lock = &recp_list->filt_rule_lock;
3745 ice_acquire_lock(rule_lock);
3746 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3748 struct ice_vsi_list_map_info *map_info = NULL;
3750 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3751 /* All VLAN pruning rules use a VSI list. Check if
3752 * there is already a VSI list containing VSI that we
3753 * want to add. If found, use the same vsi_list_id for
3754 * this new VLAN rule or else create a new list.
3756 map_info = ice_find_vsi_list_entry(recp_list,
3760 status = ice_create_vsi_list_rule(hw,
3768 /* Convert the action to forwarding to a VSI list. */
3769 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3770 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3773 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3775 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3778 status = ICE_ERR_DOES_NOT_EXIST;
3781 /* reuse VSI list for new rule and increment ref_cnt */
3783 v_list_itr->vsi_list_info = map_info;
3784 map_info->ref_cnt++;
3786 v_list_itr->vsi_list_info =
3787 ice_create_vsi_list_map(hw, &vsi_handle,
3791 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3792 /* Update existing VSI list to add new VSI ID only if it used
3795 cur_fltr = &v_list_itr->fltr_info;
3796 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3799 /* If VLAN rule exists and VSI list being used by this rule is
3800 * referenced by more than 1 VLAN rule. Then create a new VSI
3801 * list appending previous VSI with new VSI and update existing
3802 * VLAN rule to point to new VSI list ID
3804 struct ice_fltr_info tmp_fltr;
3805 u16 vsi_handle_arr[2];
3808 /* Current implementation only supports reusing VSI list with
3809 * one VSI count. We should never hit below condition
3811 if (v_list_itr->vsi_count > 1 &&
3812 v_list_itr->vsi_list_info->ref_cnt > 1) {
3813 ice_debug(hw, ICE_DBG_SW,
3814 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3815 status = ICE_ERR_CFG;
3820 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3823 /* A rule already exists with the new VSI being added */
3824 if (cur_handle == vsi_handle) {
3825 status = ICE_ERR_ALREADY_EXISTS;
3829 vsi_handle_arr[0] = cur_handle;
3830 vsi_handle_arr[1] = vsi_handle;
3831 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3832 &vsi_list_id, lkup_type);
3836 tmp_fltr = v_list_itr->fltr_info;
3837 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3838 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3839 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3840 /* Update the previous switch rule to a new VSI list which
3841 * includes current VSI that is requested
3843 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3847 /* before overriding VSI list map info. decrement ref_cnt of
3850 v_list_itr->vsi_list_info->ref_cnt--;
3852 /* now update to newly created list */
3853 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3854 v_list_itr->vsi_list_info =
3855 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3857 v_list_itr->vsi_count++;
3861 ice_release_lock(rule_lock);
3866 * ice_add_vlan_rule - Add VLAN based filter rule
3867 * @hw: pointer to the hardware structure
3868 * @v_list: list of VLAN entries and forwarding information
3869 * @sw: pointer to switch info struct for which function add rule
3871 static enum ice_status
3872 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3873 struct ice_switch_info *sw)
3875 struct ice_fltr_list_entry *v_list_itr;
3876 struct ice_sw_recipe *recp_list;
3878 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3879 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3881 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3882 return ICE_ERR_PARAM;
3883 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3884 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3886 if (v_list_itr->status)
3887 return v_list_itr->status;
3893 * ice_add_vlan - Add a VLAN based filter rule
3894 * @hw: pointer to the hardware structure
3895 * @v_list: list of VLAN and forwarding information
3897 * Function add VLAN rule for logical port from HW struct
3899 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3902 return ICE_ERR_PARAM;
3904 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3908 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3909 * @hw: pointer to the hardware structure
3910 * @mv_list: list of MAC and VLAN filters
3911 * @sw: pointer to switch info struct for which function add rule
3912 * @lport: logic port number on which function add rule
3914 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3915 * pruning bits enabled, then it is the responsibility of the caller to make
3916 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3917 * VLAN won't be received on that VSI otherwise.
3919 static enum ice_status
3920 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3921 struct ice_switch_info *sw, u8 lport)
3923 struct ice_fltr_list_entry *mv_list_itr;
3924 struct ice_sw_recipe *recp_list;
3926 if (!mv_list || !hw)
3927 return ICE_ERR_PARAM;
3929 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3930 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3932 enum ice_sw_lkup_type l_type =
3933 mv_list_itr->fltr_info.lkup_type;
3935 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3936 return ICE_ERR_PARAM;
3937 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3938 mv_list_itr->status =
3939 ice_add_rule_internal(hw, recp_list, lport,
3941 if (mv_list_itr->status)
3942 return mv_list_itr->status;
3948 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3949 * @hw: pointer to the hardware structure
3950 * @mv_list: list of MAC VLAN addresses and forwarding information
3952 * Function add MAC VLAN rule for logical port from HW struct
3955 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3957 if (!mv_list || !hw)
3958 return ICE_ERR_PARAM;
3960 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3961 hw->port_info->lport);
3965 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3966 * @hw: pointer to the hardware structure
3967 * @em_list: list of ether type MAC filter, MAC is optional
3968 * @sw: pointer to switch info struct for which function add rule
3969 * @lport: logic port number on which function add rule
3971 * This function requires the caller to populate the entries in
3972 * the filter list with the necessary fields (including flags to
3973 * indicate Tx or Rx rules).
3975 static enum ice_status
3976 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3977 struct ice_switch_info *sw, u8 lport)
3979 struct ice_fltr_list_entry *em_list_itr;
3981 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3983 struct ice_sw_recipe *recp_list;
3984 enum ice_sw_lkup_type l_type;
3986 l_type = em_list_itr->fltr_info.lkup_type;
3987 recp_list = &sw->recp_list[l_type];
3989 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3990 l_type != ICE_SW_LKUP_ETHERTYPE)
3991 return ICE_ERR_PARAM;
3993 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3996 if (em_list_itr->status)
3997 return em_list_itr->status;
4003 * ice_add_eth_mac - Add a ethertype based filter rule
4004 * @hw: pointer to the hardware structure
4005 * @em_list: list of ethertype and forwarding information
4007 * Function add ethertype rule for logical port from HW struct
4010 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4012 if (!em_list || !hw)
4013 return ICE_ERR_PARAM;
4015 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4016 hw->port_info->lport);
4020 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4021 * @hw: pointer to the hardware structure
4022 * @em_list: list of ethertype or ethertype MAC entries
4023 * @sw: pointer to switch info struct for which function add rule
4025 static enum ice_status
4026 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4027 struct ice_switch_info *sw)
4029 struct ice_fltr_list_entry *em_list_itr, *tmp;
4031 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4033 struct ice_sw_recipe *recp_list;
4034 enum ice_sw_lkup_type l_type;
4036 l_type = em_list_itr->fltr_info.lkup_type;
4038 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4039 l_type != ICE_SW_LKUP_ETHERTYPE)
4040 return ICE_ERR_PARAM;
4042 recp_list = &sw->recp_list[l_type];
4043 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4045 if (em_list_itr->status)
4046 return em_list_itr->status;
4052 * ice_remove_eth_mac - remove a ethertype based filter rule
4053 * @hw: pointer to the hardware structure
4054 * @em_list: list of ethertype and forwarding information
4058 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4060 if (!em_list || !hw)
4061 return ICE_ERR_PARAM;
4063 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4067 * ice_rem_sw_rule_info
4068 * @hw: pointer to the hardware structure
4069 * @rule_head: pointer to the switch list structure that we want to delete
4072 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4074 if (!LIST_EMPTY(rule_head)) {
4075 struct ice_fltr_mgmt_list_entry *entry;
4076 struct ice_fltr_mgmt_list_entry *tmp;
4078 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4079 ice_fltr_mgmt_list_entry, list_entry) {
4080 LIST_DEL(&entry->list_entry);
4081 ice_free(hw, entry);
4087 * ice_rem_adv_rule_info
4088 * @hw: pointer to the hardware structure
4089 * @rule_head: pointer to the switch list structure that we want to delete
4092 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4094 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4095 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4097 if (LIST_EMPTY(rule_head))
4100 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4101 ice_adv_fltr_mgmt_list_entry, list_entry) {
4102 LIST_DEL(&lst_itr->list_entry);
4103 ice_free(hw, lst_itr->lkups);
4104 ice_free(hw, lst_itr);
4109 * ice_rem_all_sw_rules_info
4110 * @hw: pointer to the hardware structure
4112 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4114 struct ice_switch_info *sw = hw->switch_info;
4117 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4118 struct LIST_HEAD_TYPE *rule_head;
4120 rule_head = &sw->recp_list[i].filt_rules;
4121 if (!sw->recp_list[i].adv_rule)
4122 ice_rem_sw_rule_info(hw, rule_head);
4124 ice_rem_adv_rule_info(hw, rule_head);
4125 if (sw->recp_list[i].adv_rule &&
4126 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4127 sw->recp_list[i].adv_rule = false;
4132 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4133 * @pi: pointer to the port_info structure
4134 * @vsi_handle: VSI handle to set as default
4135 * @set: true to add the above mentioned switch rule, false to remove it
4136 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4138 * add filter rule to set/unset given VSI as default VSI for the switch
4139 * (represented by swid)
4142 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4145 struct ice_aqc_sw_rules_elem *s_rule;
4146 struct ice_fltr_info f_info;
4147 struct ice_hw *hw = pi->hw;
4148 enum ice_adminq_opc opcode;
4149 enum ice_status status;
4153 if (!ice_is_vsi_valid(hw, vsi_handle))
4154 return ICE_ERR_PARAM;
4155 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4157 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4158 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4159 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4161 return ICE_ERR_NO_MEMORY;
4163 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4165 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4166 f_info.flag = direction;
4167 f_info.fltr_act = ICE_FWD_TO_VSI;
4168 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4170 if (f_info.flag & ICE_FLTR_RX) {
4171 f_info.src = pi->lport;
4172 f_info.src_id = ICE_SRC_ID_LPORT;
4174 f_info.fltr_rule_id =
4175 pi->dflt_rx_vsi_rule_id;
4176 } else if (f_info.flag & ICE_FLTR_TX) {
4177 f_info.src_id = ICE_SRC_ID_VSI;
4178 f_info.src = hw_vsi_id;
4180 f_info.fltr_rule_id =
4181 pi->dflt_tx_vsi_rule_id;
4185 opcode = ice_aqc_opc_add_sw_rules;
4187 opcode = ice_aqc_opc_remove_sw_rules;
4189 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4191 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4192 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4195 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4197 if (f_info.flag & ICE_FLTR_TX) {
4198 pi->dflt_tx_vsi_num = hw_vsi_id;
4199 pi->dflt_tx_vsi_rule_id = index;
4200 } else if (f_info.flag & ICE_FLTR_RX) {
4201 pi->dflt_rx_vsi_num = hw_vsi_id;
4202 pi->dflt_rx_vsi_rule_id = index;
4205 if (f_info.flag & ICE_FLTR_TX) {
4206 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4207 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4208 } else if (f_info.flag & ICE_FLTR_RX) {
4209 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4210 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4215 ice_free(hw, s_rule);
4220 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4221 * @list_head: head of rule list
4222 * @f_info: rule information
4224 * Helper function to search for a unicast rule entry - this is to be used
4225 * to remove unicast MAC filter that is not shared with other VSIs on the
4228 * Returns pointer to entry storing the rule if found
4230 static struct ice_fltr_mgmt_list_entry *
4231 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4232 struct ice_fltr_info *f_info)
4234 struct ice_fltr_mgmt_list_entry *list_itr;
4236 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4238 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4239 sizeof(f_info->l_data)) &&
4240 f_info->fwd_id.hw_vsi_id ==
4241 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4242 f_info->flag == list_itr->fltr_info.flag)
4249 * ice_remove_mac_rule - remove a MAC based filter rule
4250 * @hw: pointer to the hardware structure
4251 * @m_list: list of MAC addresses and forwarding information
4252 * @recp_list: list from which function remove MAC address
4254 * This function removes either a MAC filter rule or a specific VSI from a
4255 * VSI list for a multicast MAC address.
4257 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4258 * ice_add_mac. Caller should be aware that this call will only work if all
4259 * the entries passed into m_list were added previously. It will not attempt to
4260 * do a partial remove of entries that were found.
4262 static enum ice_status
4263 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4264 struct ice_sw_recipe *recp_list)
4266 struct ice_fltr_list_entry *list_itr, *tmp;
4267 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4270 return ICE_ERR_PARAM;
4272 rule_lock = &recp_list->filt_rule_lock;
4273 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4275 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4276 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4279 if (l_type != ICE_SW_LKUP_MAC)
4280 return ICE_ERR_PARAM;
4282 vsi_handle = list_itr->fltr_info.vsi_handle;
4283 if (!ice_is_vsi_valid(hw, vsi_handle))
4284 return ICE_ERR_PARAM;
4286 list_itr->fltr_info.fwd_id.hw_vsi_id =
4287 ice_get_hw_vsi_num(hw, vsi_handle);
4288 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4289 /* Don't remove the unicast address that belongs to
4290 * another VSI on the switch, since it is not being
4293 ice_acquire_lock(rule_lock);
4294 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4295 &list_itr->fltr_info)) {
4296 ice_release_lock(rule_lock);
4297 return ICE_ERR_DOES_NOT_EXIST;
4299 ice_release_lock(rule_lock);
4301 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4303 if (list_itr->status)
4304 return list_itr->status;
4310 * ice_remove_mac - remove a MAC address based filter rule
4311 * @hw: pointer to the hardware structure
4312 * @m_list: list of MAC addresses and forwarding information
4315 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4317 struct ice_sw_recipe *recp_list;
4319 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4320 return ice_remove_mac_rule(hw, m_list, recp_list);
4324 * ice_remove_vlan_rule - Remove VLAN based filter rule
4325 * @hw: pointer to the hardware structure
4326 * @v_list: list of VLAN entries and forwarding information
4327 * @recp_list: list from which function remove VLAN
4329 static enum ice_status
4330 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4331 struct ice_sw_recipe *recp_list)
4333 struct ice_fltr_list_entry *v_list_itr, *tmp;
4335 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4337 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4339 if (l_type != ICE_SW_LKUP_VLAN)
4340 return ICE_ERR_PARAM;
4341 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4343 if (v_list_itr->status)
4344 return v_list_itr->status;
4350 * ice_remove_vlan - remove a VLAN address based filter rule
4351 * @hw: pointer to the hardware structure
4352 * @v_list: list of VLAN and forwarding information
4356 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4358 struct ice_sw_recipe *recp_list;
4361 return ICE_ERR_PARAM;
4363 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4364 return ice_remove_vlan_rule(hw, v_list, recp_list);
4368 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4369 * @hw: pointer to the hardware structure
4370 * @v_list: list of MAC VLAN entries and forwarding information
4371 * @recp_list: list from which function remove MAC VLAN
4373 static enum ice_status
4374 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4375 struct ice_sw_recipe *recp_list)
4377 struct ice_fltr_list_entry *v_list_itr, *tmp;
4379 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4380 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4382 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4384 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4385 return ICE_ERR_PARAM;
4386 v_list_itr->status =
4387 ice_remove_rule_internal(hw, recp_list,
4389 if (v_list_itr->status)
4390 return v_list_itr->status;
4396 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4397 * @hw: pointer to the hardware structure
4398 * @mv_list: list of MAC VLAN and forwarding information
4401 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4403 struct ice_sw_recipe *recp_list;
4405 if (!mv_list || !hw)
4406 return ICE_ERR_PARAM;
4408 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4409 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4413 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4414 * @fm_entry: filter entry to inspect
4415 * @vsi_handle: VSI handle to compare with filter info
4418 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4420 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4421 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4422 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4423 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4428 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4429 * @hw: pointer to the hardware structure
4430 * @vsi_handle: VSI handle to remove filters from
4431 * @vsi_list_head: pointer to the list to add entry to
4432 * @fi: pointer to fltr_info of filter entry to copy & add
4434 * Helper function, used when creating a list of filters to remove from
4435 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4436 * original filter entry, with the exception of fltr_info.fltr_act and
4437 * fltr_info.fwd_id fields. These are set such that later logic can
4438 * extract which VSI to remove the fltr from, and pass on that information.
4440 static enum ice_status
4441 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4442 struct LIST_HEAD_TYPE *vsi_list_head,
4443 struct ice_fltr_info *fi)
4445 struct ice_fltr_list_entry *tmp;
4447 /* this memory is freed up in the caller function
4448 * once filters for this VSI are removed
4450 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4452 return ICE_ERR_NO_MEMORY;
4454 tmp->fltr_info = *fi;
4456 /* Overwrite these fields to indicate which VSI to remove filter from,
4457 * so find and remove logic can extract the information from the
4458 * list entries. Note that original entries will still have proper
4461 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4462 tmp->fltr_info.vsi_handle = vsi_handle;
4463 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4465 LIST_ADD(&tmp->list_entry, vsi_list_head);
4471 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4472 * @hw: pointer to the hardware structure
4473 * @vsi_handle: VSI handle to remove filters from
4474 * @lkup_list_head: pointer to the list that has certain lookup type filters
4475 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4477 * Locates all filters in lkup_list_head that are used by the given VSI,
4478 * and adds COPIES of those entries to vsi_list_head (intended to be used
4479 * to remove the listed filters).
4480 * Note that this means all entries in vsi_list_head must be explicitly
4481 * deallocated by the caller when done with list.
4483 static enum ice_status
4484 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4485 struct LIST_HEAD_TYPE *lkup_list_head,
4486 struct LIST_HEAD_TYPE *vsi_list_head)
4488 struct ice_fltr_mgmt_list_entry *fm_entry;
4489 enum ice_status status = ICE_SUCCESS;
4491 /* check to make sure VSI ID is valid and within boundary */
4492 if (!ice_is_vsi_valid(hw, vsi_handle))
4493 return ICE_ERR_PARAM;
4495 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4496 ice_fltr_mgmt_list_entry, list_entry) {
4497 struct ice_fltr_info *fi;
4499 fi = &fm_entry->fltr_info;
4500 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4503 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4512 * ice_determine_promisc_mask
4513 * @fi: filter info to parse
4515 * Helper function to determine which ICE_PROMISC_ mask corresponds
4516 * to given filter into.
4518 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4520 u16 vid = fi->l_data.mac_vlan.vlan_id;
4521 u8 *macaddr = fi->l_data.mac.mac_addr;
4522 bool is_tx_fltr = false;
4523 u8 promisc_mask = 0;
4525 if (fi->flag == ICE_FLTR_TX)
4528 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4529 promisc_mask |= is_tx_fltr ?
4530 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4531 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4532 promisc_mask |= is_tx_fltr ?
4533 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4534 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4535 promisc_mask |= is_tx_fltr ?
4536 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4538 promisc_mask |= is_tx_fltr ?
4539 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4541 return promisc_mask;
4545 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4546 * @hw: pointer to the hardware structure
4547 * @vsi_handle: VSI handle to retrieve info from
4548 * @promisc_mask: pointer to mask to be filled in
4549 * @vid: VLAN ID of promisc VLAN VSI
4550 * @sw: pointer to switch info struct for which function add rule
4552 static enum ice_status
4553 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4554 u16 *vid, struct ice_switch_info *sw)
4556 struct ice_fltr_mgmt_list_entry *itr;
4557 struct LIST_HEAD_TYPE *rule_head;
4558 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4560 if (!ice_is_vsi_valid(hw, vsi_handle))
4561 return ICE_ERR_PARAM;
4565 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4566 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4568 ice_acquire_lock(rule_lock);
4569 LIST_FOR_EACH_ENTRY(itr, rule_head,
4570 ice_fltr_mgmt_list_entry, list_entry) {
4571 /* Continue if this filter doesn't apply to this VSI or the
4572 * VSI ID is not in the VSI map for this filter
4574 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4577 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4579 ice_release_lock(rule_lock);
4585 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4586 * @hw: pointer to the hardware structure
4587 * @vsi_handle: VSI handle to retrieve info from
4588 * @promisc_mask: pointer to mask to be filled in
4589 * @vid: VLAN ID of promisc VLAN VSI
4592 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4595 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4596 vid, hw->switch_info);
4600 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4601 * @hw: pointer to the hardware structure
4602 * @vsi_handle: VSI handle to retrieve info from
4603 * @promisc_mask: pointer to mask to be filled in
4604 * @vid: VLAN ID of promisc VLAN VSI
4605 * @sw: pointer to switch info struct for which function add rule
4607 static enum ice_status
4608 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4609 u16 *vid, struct ice_switch_info *sw)
4611 struct ice_fltr_mgmt_list_entry *itr;
4612 struct LIST_HEAD_TYPE *rule_head;
4613 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4615 if (!ice_is_vsi_valid(hw, vsi_handle))
4616 return ICE_ERR_PARAM;
4620 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4621 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4623 ice_acquire_lock(rule_lock);
4624 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4626 /* Continue if this filter doesn't apply to this VSI or the
4627 * VSI ID is not in the VSI map for this filter
4629 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4632 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4634 ice_release_lock(rule_lock);
4640 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4641 * @hw: pointer to the hardware structure
4642 * @vsi_handle: VSI handle to retrieve info from
4643 * @promisc_mask: pointer to mask to be filled in
4644 * @vid: VLAN ID of promisc VLAN VSI
4647 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4650 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4651 vid, hw->switch_info);
4655 * ice_remove_promisc - Remove promisc based filter rules
4656 * @hw: pointer to the hardware structure
4657 * @recp_id: recipe ID for which the rule needs to removed
4658 * @v_list: list of promisc entries
4660 static enum ice_status
4661 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4662 struct LIST_HEAD_TYPE *v_list)
4664 struct ice_fltr_list_entry *v_list_itr, *tmp;
4665 struct ice_sw_recipe *recp_list;
4667 recp_list = &hw->switch_info->recp_list[recp_id];
4668 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4670 v_list_itr->status =
4671 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4672 if (v_list_itr->status)
4673 return v_list_itr->status;
4679 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4680 * @hw: pointer to the hardware structure
4681 * @vsi_handle: VSI handle to clear mode
4682 * @promisc_mask: mask of promiscuous config bits to clear
4683 * @vid: VLAN ID to clear VLAN promiscuous
4684 * @sw: pointer to switch info struct for which function add rule
4686 static enum ice_status
4687 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4688 u16 vid, struct ice_switch_info *sw)
4690 struct ice_fltr_list_entry *fm_entry, *tmp;
4691 struct LIST_HEAD_TYPE remove_list_head;
4692 struct ice_fltr_mgmt_list_entry *itr;
4693 struct LIST_HEAD_TYPE *rule_head;
4694 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4695 enum ice_status status = ICE_SUCCESS;
4698 if (!ice_is_vsi_valid(hw, vsi_handle))
4699 return ICE_ERR_PARAM;
4701 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4702 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4704 recipe_id = ICE_SW_LKUP_PROMISC;
4706 rule_head = &sw->recp_list[recipe_id].filt_rules;
4707 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4709 INIT_LIST_HEAD(&remove_list_head);
4711 ice_acquire_lock(rule_lock);
4712 LIST_FOR_EACH_ENTRY(itr, rule_head,
4713 ice_fltr_mgmt_list_entry, list_entry) {
4714 struct ice_fltr_info *fltr_info;
4715 u8 fltr_promisc_mask = 0;
4717 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4719 fltr_info = &itr->fltr_info;
4721 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4722 vid != fltr_info->l_data.mac_vlan.vlan_id)
4725 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4727 /* Skip if filter is not completely specified by given mask */
4728 if (fltr_promisc_mask & ~promisc_mask)
4731 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4735 ice_release_lock(rule_lock);
4736 goto free_fltr_list;
4739 ice_release_lock(rule_lock);
4741 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4744 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4745 ice_fltr_list_entry, list_entry) {
4746 LIST_DEL(&fm_entry->list_entry);
4747 ice_free(hw, fm_entry);
4754 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4755 * @hw: pointer to the hardware structure
4756 * @vsi_handle: VSI handle to clear mode
4757 * @promisc_mask: mask of promiscuous config bits to clear
4758 * @vid: VLAN ID to clear VLAN promiscuous
4761 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4762 u8 promisc_mask, u16 vid)
4764 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4765 vid, hw->switch_info);
4769 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4770 * @hw: pointer to the hardware structure
4771 * @vsi_handle: VSI handle to configure
4772 * @promisc_mask: mask of promiscuous config bits
4773 * @vid: VLAN ID to set VLAN promiscuous
4774 * @lport: logical port number to configure promisc mode
4775 * @sw: pointer to switch info struct for which function add rule
4777 static enum ice_status
4778 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4779 u16 vid, u8 lport, struct ice_switch_info *sw)
4781 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4782 struct ice_fltr_list_entry f_list_entry;
4783 struct ice_fltr_info new_fltr;
4784 enum ice_status status = ICE_SUCCESS;
4790 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4792 if (!ice_is_vsi_valid(hw, vsi_handle))
4793 return ICE_ERR_PARAM;
4794 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4796 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4798 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4799 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4800 new_fltr.l_data.mac_vlan.vlan_id = vid;
4801 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4803 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4804 recipe_id = ICE_SW_LKUP_PROMISC;
4807 /* Separate filters must be set for each direction/packet type
4808 * combination, so we will loop over the mask value, store the
4809 * individual type, and clear it out in the input mask as it
4812 while (promisc_mask) {
4813 struct ice_sw_recipe *recp_list;
4819 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4820 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4821 pkt_type = UCAST_FLTR;
4822 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4823 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4824 pkt_type = UCAST_FLTR;
4826 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4827 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4828 pkt_type = MCAST_FLTR;
4829 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4830 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4831 pkt_type = MCAST_FLTR;
4833 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4834 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4835 pkt_type = BCAST_FLTR;
4836 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4837 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4838 pkt_type = BCAST_FLTR;
4842 /* Check for VLAN promiscuous flag */
4843 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4844 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4845 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4846 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4850 /* Set filter DA based on packet type */
4851 mac_addr = new_fltr.l_data.mac.mac_addr;
4852 if (pkt_type == BCAST_FLTR) {
4853 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4854 } else if (pkt_type == MCAST_FLTR ||
4855 pkt_type == UCAST_FLTR) {
4856 /* Use the dummy ether header DA */
4857 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4858 ICE_NONDMA_TO_NONDMA);
4859 if (pkt_type == MCAST_FLTR)
4860 mac_addr[0] |= 0x1; /* Set multicast bit */
4863 /* Need to reset this to zero for all iterations */
4866 new_fltr.flag |= ICE_FLTR_TX;
4867 new_fltr.src = hw_vsi_id;
4869 new_fltr.flag |= ICE_FLTR_RX;
4870 new_fltr.src = lport;
4873 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4874 new_fltr.vsi_handle = vsi_handle;
4875 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4876 f_list_entry.fltr_info = new_fltr;
4877 recp_list = &sw->recp_list[recipe_id];
4879 status = ice_add_rule_internal(hw, recp_list, lport,
4881 if (status != ICE_SUCCESS)
4882 goto set_promisc_exit;
4890 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4891 * @hw: pointer to the hardware structure
4892 * @vsi_handle: VSI handle to configure
4893 * @promisc_mask: mask of promiscuous config bits
4894 * @vid: VLAN ID to set VLAN promiscuous
4897 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4900 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
4901 hw->port_info->lport,
4906 * _ice_set_vlan_vsi_promisc
4907 * @hw: pointer to the hardware structure
4908 * @vsi_handle: VSI handle to configure
4909 * @promisc_mask: mask of promiscuous config bits
4910 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4911 * @lport: logical port number to configure promisc mode
4912 * @sw: pointer to switch info struct for which function add rule
4914 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4916 static enum ice_status
4917 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4918 bool rm_vlan_promisc, u8 lport,
4919 struct ice_switch_info *sw)
4921 struct ice_fltr_list_entry *list_itr, *tmp;
4922 struct LIST_HEAD_TYPE vsi_list_head;
4923 struct LIST_HEAD_TYPE *vlan_head;
4924 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4925 enum ice_status status;
4928 INIT_LIST_HEAD(&vsi_list_head);
4929 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4930 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4931 ice_acquire_lock(vlan_lock);
4932 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4934 ice_release_lock(vlan_lock);
4936 goto free_fltr_list;
4938 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4940 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4941 if (rm_vlan_promisc)
4942 status = _ice_clear_vsi_promisc(hw, vsi_handle,
4946 status = _ice_set_vsi_promisc(hw, vsi_handle,
4947 promisc_mask, vlan_id,
4954 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4955 ice_fltr_list_entry, list_entry) {
4956 LIST_DEL(&list_itr->list_entry);
4957 ice_free(hw, list_itr);
4963 * ice_set_vlan_vsi_promisc
4964 * @hw: pointer to the hardware structure
4965 * @vsi_handle: VSI handle to configure
4966 * @promisc_mask: mask of promiscuous config bits
4967 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4969 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4972 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4973 bool rm_vlan_promisc)
4975 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
4976 rm_vlan_promisc, hw->port_info->lport,
4981 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4982 * @hw: pointer to the hardware structure
4983 * @vsi_handle: VSI handle to remove filters from
4984 * @recp_list: recipe list from which function remove fltr
4985 * @lkup: switch rule filter lookup type
4988 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4989 struct ice_sw_recipe *recp_list,
4990 enum ice_sw_lkup_type lkup)
4992 struct ice_fltr_list_entry *fm_entry;
4993 struct LIST_HEAD_TYPE remove_list_head;
4994 struct LIST_HEAD_TYPE *rule_head;
4995 struct ice_fltr_list_entry *tmp;
4996 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4997 enum ice_status status;
4999 INIT_LIST_HEAD(&remove_list_head);
5000 rule_lock = &recp_list[lkup].filt_rule_lock;
5001 rule_head = &recp_list[lkup].filt_rules;
5002 ice_acquire_lock(rule_lock);
5003 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5005 ice_release_lock(rule_lock);
5010 case ICE_SW_LKUP_MAC:
5011 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5013 case ICE_SW_LKUP_VLAN:
5014 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5016 case ICE_SW_LKUP_PROMISC:
5017 case ICE_SW_LKUP_PROMISC_VLAN:
5018 ice_remove_promisc(hw, lkup, &remove_list_head);
5020 case ICE_SW_LKUP_MAC_VLAN:
5021 ice_remove_mac_vlan(hw, &remove_list_head);
5023 case ICE_SW_LKUP_ETHERTYPE:
5024 case ICE_SW_LKUP_ETHERTYPE_MAC:
5025 ice_remove_eth_mac(hw, &remove_list_head);
5027 case ICE_SW_LKUP_DFLT:
5028 ice_debug(hw, ICE_DBG_SW,
5029 "Remove filters for this lookup type hasn't been implemented yet\n");
5031 case ICE_SW_LKUP_LAST:
5032 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5036 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5037 ice_fltr_list_entry, list_entry) {
5038 LIST_DEL(&fm_entry->list_entry);
5039 ice_free(hw, fm_entry);
5044 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5045 * @hw: pointer to the hardware structure
5046 * @vsi_handle: VSI handle to remove filters from
5047 * @sw: pointer to switch info struct
5050 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5051 struct ice_switch_info *sw)
5053 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5055 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5056 sw->recp_list, ICE_SW_LKUP_MAC);
5057 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5058 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5059 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5060 sw->recp_list, ICE_SW_LKUP_PROMISC);
5061 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5062 sw->recp_list, ICE_SW_LKUP_VLAN);
5063 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5064 sw->recp_list, ICE_SW_LKUP_DFLT);
5065 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5066 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5067 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5068 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5069 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5070 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5074 * ice_remove_vsi_fltr - Remove all filters for a VSI
5075 * @hw: pointer to the hardware structure
5076 * @vsi_handle: VSI handle to remove filters from
5078 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5080 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5084 * ice_alloc_res_cntr - allocating resource counter
5085 * @hw: pointer to the hardware structure
5086 * @type: type of resource
5087 * @alloc_shared: if set it is shared else dedicated
5088 * @num_items: number of entries requested for FD resource type
5089 * @counter_id: counter index returned by AQ call
5092 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5095 struct ice_aqc_alloc_free_res_elem *buf;
5096 enum ice_status status;
5099 /* Allocate resource */
5100 buf_len = sizeof(*buf);
5101 buf = (struct ice_aqc_alloc_free_res_elem *)
5102 ice_malloc(hw, buf_len);
5104 return ICE_ERR_NO_MEMORY;
5106 buf->num_elems = CPU_TO_LE16(num_items);
5107 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5108 ICE_AQC_RES_TYPE_M) | alloc_shared);
5110 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5111 ice_aqc_opc_alloc_res, NULL);
5115 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5123 * ice_free_res_cntr - free resource counter
5124 * @hw: pointer to the hardware structure
5125 * @type: type of resource
5126 * @alloc_shared: if set it is shared else dedicated
5127 * @num_items: number of entries to be freed for FD resource type
5128 * @counter_id: counter ID resource which needs to be freed
5131 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5134 struct ice_aqc_alloc_free_res_elem *buf;
5135 enum ice_status status;
5139 buf_len = sizeof(*buf);
5140 buf = (struct ice_aqc_alloc_free_res_elem *)
5141 ice_malloc(hw, buf_len);
5143 return ICE_ERR_NO_MEMORY;
5145 buf->num_elems = CPU_TO_LE16(num_items);
5146 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5147 ICE_AQC_RES_TYPE_M) | alloc_shared);
5148 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5150 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5151 ice_aqc_opc_free_res, NULL);
5153 ice_debug(hw, ICE_DBG_SW,
5154 "counter resource could not be freed\n");
5161 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5162 * @hw: pointer to the hardware structure
5163 * @counter_id: returns counter index
5165 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5167 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5168 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5173 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5174 * @hw: pointer to the hardware structure
5175 * @counter_id: counter index to be freed
5177 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5179 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5180 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5185 * ice_alloc_res_lg_act - add large action resource
5186 * @hw: pointer to the hardware structure
5187 * @l_id: large action ID to fill it in
5188 * @num_acts: number of actions to hold with a large action entry
5190 static enum ice_status
5191 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5193 struct ice_aqc_alloc_free_res_elem *sw_buf;
5194 enum ice_status status;
5197 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5198 return ICE_ERR_PARAM;
5200 /* Allocate resource for large action */
5201 buf_len = sizeof(*sw_buf);
5202 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5203 ice_malloc(hw, buf_len);
5205 return ICE_ERR_NO_MEMORY;
5207 sw_buf->num_elems = CPU_TO_LE16(1);
5209 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5210 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5211 * If num_acts is greater than 2, then use
5212 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5213 * The num_acts cannot exceed 4. This was ensured at the
5214 * beginning of the function.
5217 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5218 else if (num_acts == 2)
5219 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5221 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5223 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5224 ice_aqc_opc_alloc_res, NULL);
5226 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5228 ice_free(hw, sw_buf);
5233 * ice_add_mac_with_sw_marker - add filter with sw marker
5234 * @hw: pointer to the hardware structure
5235 * @f_info: filter info structure containing the MAC filter information
5236 * @sw_marker: sw marker to tag the Rx descriptor with
5239 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5242 struct ice_fltr_mgmt_list_entry *m_entry;
5243 struct ice_fltr_list_entry fl_info;
5244 struct ice_sw_recipe *recp_list;
5245 struct LIST_HEAD_TYPE l_head;
5246 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5247 enum ice_status ret;
5251 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5252 return ICE_ERR_PARAM;
5254 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5255 return ICE_ERR_PARAM;
5257 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5258 return ICE_ERR_PARAM;
5260 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5261 return ICE_ERR_PARAM;
5262 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5264 /* Add filter if it doesn't exist so then the adding of large
5265 * action always results in update
5268 INIT_LIST_HEAD(&l_head);
5269 fl_info.fltr_info = *f_info;
5270 LIST_ADD(&fl_info.list_entry, &l_head);
5272 entry_exists = false;
5273 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5274 hw->port_info->lport);
5275 if (ret == ICE_ERR_ALREADY_EXISTS)
5276 entry_exists = true;
5280 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5281 rule_lock = &recp_list->filt_rule_lock;
5282 ice_acquire_lock(rule_lock);
5283 /* Get the book keeping entry for the filter */
5284 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5288 /* If counter action was enabled for this rule then don't enable
5289 * sw marker large action
5291 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5292 ret = ICE_ERR_PARAM;
5296 /* if same marker was added before */
5297 if (m_entry->sw_marker_id == sw_marker) {
5298 ret = ICE_ERR_ALREADY_EXISTS;
5302 /* Allocate a hardware table entry to hold large act. Three actions
5303 * for marker based large action
5305 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5309 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5312 /* Update the switch rule to add the marker action */
5313 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5315 ice_release_lock(rule_lock);
5320 ice_release_lock(rule_lock);
5321 /* only remove entry if it did not exist previously */
5323 ret = ice_remove_mac(hw, &l_head);
5329 * ice_add_mac_with_counter - add filter with counter enabled
5330 * @hw: pointer to the hardware structure
5331 * @f_info: pointer to filter info structure containing the MAC filter
5335 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5337 struct ice_fltr_mgmt_list_entry *m_entry;
5338 struct ice_fltr_list_entry fl_info;
5339 struct ice_sw_recipe *recp_list;
5340 struct LIST_HEAD_TYPE l_head;
5341 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5342 enum ice_status ret;
5347 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5348 return ICE_ERR_PARAM;
5350 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5351 return ICE_ERR_PARAM;
5353 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5354 return ICE_ERR_PARAM;
5355 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5356 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5358 entry_exist = false;
5360 rule_lock = &recp_list->filt_rule_lock;
5362 /* Add filter if it doesn't exist so then the adding of large
5363 * action always results in update
5365 INIT_LIST_HEAD(&l_head);
5367 fl_info.fltr_info = *f_info;
5368 LIST_ADD(&fl_info.list_entry, &l_head);
5370 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5371 hw->port_info->lport);
5372 if (ret == ICE_ERR_ALREADY_EXISTS)
5377 ice_acquire_lock(rule_lock);
5378 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5380 ret = ICE_ERR_BAD_PTR;
5384 /* Don't enable counter for a filter for which sw marker was enabled */
5385 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5386 ret = ICE_ERR_PARAM;
5390 /* If a counter was already enabled then don't need to add again */
5391 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5392 ret = ICE_ERR_ALREADY_EXISTS;
5396 /* Allocate a hardware table entry to VLAN counter */
5397 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5401 /* Allocate a hardware table entry to hold large act. Two actions for
5402 * counter based large action
5404 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5408 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5411 /* Update the switch rule to add the counter action */
5412 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5414 ice_release_lock(rule_lock);
5419 ice_release_lock(rule_lock);
5420 /* only remove entry if it did not exist previously */
5422 ret = ice_remove_mac(hw, &l_head);
5427 /* This is mapping table entry that maps every word within a given protocol
5428 * structure to the real byte offset as per the specification of that
5430 * for example dst address is 3 words in ethertype header and corresponding
5431 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5432 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5433 * matching entry describing its field. This needs to be updated if new
5434 * structure is added to that union.
5436 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5437 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5438 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5439 { ICE_ETYPE_OL, { 0 } },
5440 { ICE_VLAN_OFOS, { 0, 2 } },
5441 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5442 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5443 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5444 26, 28, 30, 32, 34, 36, 38 } },
5445 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5446 26, 28, 30, 32, 34, 36, 38 } },
5447 { ICE_TCP_IL, { 0, 2 } },
5448 { ICE_UDP_OF, { 0, 2 } },
5449 { ICE_UDP_ILOS, { 0, 2 } },
5450 { ICE_SCTP_IL, { 0, 2 } },
5451 { ICE_VXLAN, { 8, 10, 12, 14 } },
5452 { ICE_GENEVE, { 8, 10, 12, 14 } },
5453 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5454 { ICE_NVGRE, { 0, 2, 4, 6 } },
5455 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5456 { ICE_PPPOE, { 0, 2, 4, 6 } },
5457 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5458 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5459 { ICE_ESP, { 0, 2, 4, 6 } },
5460 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5461 { ICE_NAT_T, { 8, 10, 12, 14 } },
5464 /* The following table describes preferred grouping of recipes.
5465 * If a recipe that needs to be programmed is a superset or matches one of the
5466 * following combinations, then the recipe needs to be chained as per the
5470 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5471 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5472 { ICE_MAC_IL, ICE_MAC_IL_HW },
5473 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5474 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5475 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5476 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5477 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5478 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5479 { ICE_TCP_IL, ICE_TCP_IL_HW },
5480 { ICE_UDP_OF, ICE_UDP_OF_HW },
5481 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5482 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5483 { ICE_VXLAN, ICE_UDP_OF_HW },
5484 { ICE_GENEVE, ICE_UDP_OF_HW },
5485 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5486 { ICE_NVGRE, ICE_GRE_OF_HW },
5487 { ICE_GTP, ICE_UDP_OF_HW },
5488 { ICE_PPPOE, ICE_PPPOE_HW },
5489 { ICE_PFCP, ICE_UDP_ILOS_HW },
5490 { ICE_L2TPV3, ICE_L2TPV3_HW },
5491 { ICE_ESP, ICE_ESP_HW },
5492 { ICE_AH, ICE_AH_HW },
5493 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5497 * ice_find_recp - find a recipe
5498 * @hw: pointer to the hardware structure
5499 * @lkup_exts: extension sequence to match
5501 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5503 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5504 enum ice_sw_tunnel_type tun_type)
5506 bool refresh_required = true;
5507 struct ice_sw_recipe *recp;
5510 /* Walk through existing recipes to find a match */
5511 recp = hw->switch_info->recp_list;
5512 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5513 /* If recipe was not created for this ID, in SW bookkeeping,
5514 * check if FW has an entry for this recipe. If the FW has an
5515 * entry update it in our SW bookkeeping and continue with the
5518 if (!recp[i].recp_created)
5519 if (ice_get_recp_frm_fw(hw,
5520 hw->switch_info->recp_list, i,
5524 /* Skip inverse action recipes */
5525 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5526 ICE_AQ_RECIPE_ACT_INV_ACT)
5529 /* if number of words we are looking for match */
5530 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5531 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5532 struct ice_fv_word *be = lkup_exts->fv_words;
5533 u16 *cr = recp[i].lkup_exts.field_mask;
5534 u16 *de = lkup_exts->field_mask;
5538 /* ar, cr, and qr are related to the recipe words, while
5539 * be, de, and pe are related to the lookup words
5541 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5542 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5544 if (ar[qr].off == be[pe].off &&
5545 ar[qr].prot_id == be[pe].prot_id &&
5547 /* Found the "pe"th word in the
5552 /* After walking through all the words in the
5553 * "i"th recipe if "p"th word was not found then
5554 * this recipe is not what we are looking for.
5555 * So break out from this loop and try the next
5558 if (qr >= recp[i].lkup_exts.n_val_words) {
5563 /* If for "i"th recipe the found was never set to false
5564 * then it means we found our match
5566 if ((tun_type == recp[i].tun_type ||
5567 tun_type == ICE_SW_TUN_AND_NON_TUN) && found)
5568 return i; /* Return the recipe ID */
5571 return ICE_MAX_NUM_RECIPES;
5575 * ice_prot_type_to_id - get protocol ID from protocol type
5576 * @type: protocol type
5577 * @id: pointer to variable that will receive the ID
5579 * Returns true if found, false otherwise
5581 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5585 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5586 if (ice_prot_id_tbl[i].type == type) {
5587 *id = ice_prot_id_tbl[i].protocol_id;
5594 * ice_find_valid_words - count valid words
5595 * @rule: advanced rule with lookup information
5596 * @lkup_exts: byte offset extractions of the words that are valid
5598 * calculate valid words in a lookup rule using mask value
5601 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5602 struct ice_prot_lkup_ext *lkup_exts)
5604 u8 j, word, prot_id, ret_val;
5606 if (!ice_prot_type_to_id(rule->type, &prot_id))
5609 word = lkup_exts->n_val_words;
5611 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5612 if (((u16 *)&rule->m_u)[j] &&
5613 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5614 /* No more space to accommodate */
5615 if (word >= ICE_MAX_CHAIN_WORDS)
5617 lkup_exts->fv_words[word].off =
5618 ice_prot_ext[rule->type].offs[j];
5619 lkup_exts->fv_words[word].prot_id =
5620 ice_prot_id_tbl[rule->type].protocol_id;
5621 lkup_exts->field_mask[word] =
5622 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5626 ret_val = word - lkup_exts->n_val_words;
5627 lkup_exts->n_val_words = word;
5633 * ice_create_first_fit_recp_def - Create a recipe grouping
5634 * @hw: pointer to the hardware structure
5635 * @lkup_exts: an array of protocol header extractions
5636 * @rg_list: pointer to a list that stores new recipe groups
5637 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5639 * Using first fit algorithm, take all the words that are still not done
5640 * and start grouping them in 4-word groups. Each group makes up one
5643 static enum ice_status
5644 ice_create_first_fit_recp_def(struct ice_hw *hw,
5645 struct ice_prot_lkup_ext *lkup_exts,
5646 struct LIST_HEAD_TYPE *rg_list,
5649 struct ice_pref_recipe_group *grp = NULL;
5654 if (!lkup_exts->n_val_words) {
5655 struct ice_recp_grp_entry *entry;
5657 entry = (struct ice_recp_grp_entry *)
5658 ice_malloc(hw, sizeof(*entry));
5660 return ICE_ERR_NO_MEMORY;
5661 LIST_ADD(&entry->l_entry, rg_list);
5662 grp = &entry->r_group;
5664 grp->n_val_pairs = 0;
5667 /* Walk through every word in the rule to check if it is not done. If so
5668 * then this word needs to be part of a new recipe.
5670 for (j = 0; j < lkup_exts->n_val_words; j++)
5671 if (!ice_is_bit_set(lkup_exts->done, j)) {
5673 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5674 struct ice_recp_grp_entry *entry;
5676 entry = (struct ice_recp_grp_entry *)
5677 ice_malloc(hw, sizeof(*entry));
5679 return ICE_ERR_NO_MEMORY;
5680 LIST_ADD(&entry->l_entry, rg_list);
5681 grp = &entry->r_group;
5685 grp->pairs[grp->n_val_pairs].prot_id =
5686 lkup_exts->fv_words[j].prot_id;
5687 grp->pairs[grp->n_val_pairs].off =
5688 lkup_exts->fv_words[j].off;
5689 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5697 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5698 * @hw: pointer to the hardware structure
5699 * @fv_list: field vector with the extraction sequence information
5700 * @rg_list: recipe groupings with protocol-offset pairs
5702 * Helper function to fill in the field vector indices for protocol-offset
5703 * pairs. These indexes are then ultimately programmed into a recipe.
5705 static enum ice_status
5706 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5707 struct LIST_HEAD_TYPE *rg_list)
5709 struct ice_sw_fv_list_entry *fv;
5710 struct ice_recp_grp_entry *rg;
5711 struct ice_fv_word *fv_ext;
5713 if (LIST_EMPTY(fv_list))
5716 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5717 fv_ext = fv->fv_ptr->ew;
5719 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5722 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5723 struct ice_fv_word *pr;
5728 pr = &rg->r_group.pairs[i];
5729 mask = rg->r_group.mask[i];
5731 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5732 if (fv_ext[j].prot_id == pr->prot_id &&
5733 fv_ext[j].off == pr->off) {
5736 /* Store index of field vector */
5738 rg->fv_mask[i] = mask;
5742 /* Protocol/offset could not be found, caller gave an
5746 return ICE_ERR_PARAM;
5754 * ice_find_free_recp_res_idx - find free result indexes for recipe
5755 * @hw: pointer to hardware structure
5756 * @profiles: bitmap of profiles that will be associated with the new recipe
5757 * @free_idx: pointer to variable to receive the free index bitmap
5759 * The algorithm used here is:
5760 * 1. When creating a new recipe, create a set P which contains all
5761 * Profiles that will be associated with our new recipe
5763 * 2. For each Profile p in set P:
5764 * a. Add all recipes associated with Profile p into set R
5765 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5766 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5767 * i. Or just assume they all have the same possible indexes:
5769 * i.e., PossibleIndexes = 0x0000F00000000000
5771 * 3. For each Recipe r in set R:
5772 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5773 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5775 * FreeIndexes will contain the bits indicating the indexes free for use,
5776 * then the code needs to update the recipe[r].used_result_idx_bits to
5777 * indicate which indexes were selected for use by this recipe.
5780 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5781 ice_bitmap_t *free_idx)
5783 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5784 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5785 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5789 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5790 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5791 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5792 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5794 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5795 ice_set_bit(count, possible_idx);
5797 /* For each profile we are going to associate the recipe with, add the
5798 * recipes that are associated with that profile. This will give us
5799 * the set of recipes that our recipe may collide with. Also, determine
5800 * what possible result indexes are usable given this set of profiles.
5803 while (ICE_MAX_NUM_PROFILES >
5804 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5805 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5806 ICE_MAX_NUM_RECIPES);
5807 ice_and_bitmap(possible_idx, possible_idx,
5808 hw->switch_info->prof_res_bm[bit],
5813 /* For each recipe that our new recipe may collide with, determine
5814 * which indexes have been used.
5816 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5817 if (ice_is_bit_set(recipes, bit)) {
5818 ice_or_bitmap(used_idx, used_idx,
5819 hw->switch_info->recp_list[bit].res_idxs,
5823 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5825 /* return number of free indexes */
5828 while (ICE_MAX_FV_WORDS >
5829 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5838 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5839 * @hw: pointer to hardware structure
5840 * @rm: recipe management list entry
5841 * @match_tun_mask: tunnel mask that needs to be programmed
5842 * @profiles: bitmap of profiles that will be associated.
5844 static enum ice_status
5845 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5846 u16 match_tun_mask, ice_bitmap_t *profiles)
5848 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5849 struct ice_aqc_recipe_data_elem *tmp;
5850 struct ice_aqc_recipe_data_elem *buf;
5851 struct ice_recp_grp_entry *entry;
5852 enum ice_status status;
5858 /* When more than one recipe are required, another recipe is needed to
5859 * chain them together. Matching a tunnel metadata ID takes up one of
5860 * the match fields in the chaining recipe reducing the number of
5861 * chained recipes by one.
5863 /* check number of free result indices */
5864 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5865 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5867 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5868 free_res_idx, rm->n_grp_count);
5870 if (rm->n_grp_count > 1) {
5871 if (rm->n_grp_count > free_res_idx)
5872 return ICE_ERR_MAX_LIMIT;
5877 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5878 return ICE_ERR_MAX_LIMIT;
5880 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5881 ICE_MAX_NUM_RECIPES,
5884 return ICE_ERR_NO_MEMORY;
5886 buf = (struct ice_aqc_recipe_data_elem *)
5887 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5889 status = ICE_ERR_NO_MEMORY;
5893 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5894 recipe_count = ICE_MAX_NUM_RECIPES;
5895 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5897 if (status || recipe_count == 0)
5900 /* Allocate the recipe resources, and configure them according to the
5901 * match fields from protocol headers and extracted field vectors.
5903 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5904 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5907 status = ice_alloc_recipe(hw, &entry->rid);
5911 /* Clear the result index of the located recipe, as this will be
5912 * updated, if needed, later in the recipe creation process.
5914 tmp[0].content.result_indx = 0;
5916 buf[recps] = tmp[0];
5917 buf[recps].recipe_indx = (u8)entry->rid;
5918 /* if the recipe is a non-root recipe RID should be programmed
5919 * as 0 for the rules to be applied correctly.
5921 buf[recps].content.rid = 0;
5922 ice_memset(&buf[recps].content.lkup_indx, 0,
5923 sizeof(buf[recps].content.lkup_indx),
5926 /* All recipes use look-up index 0 to match switch ID. */
5927 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5928 buf[recps].content.mask[0] =
5929 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5930 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5933 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5934 buf[recps].content.lkup_indx[i] = 0x80;
5935 buf[recps].content.mask[i] = 0;
5938 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5939 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5940 buf[recps].content.mask[i + 1] =
5941 CPU_TO_LE16(entry->fv_mask[i]);
5944 if (rm->n_grp_count > 1) {
5945 /* Checks to see if there really is a valid result index
5948 if (chain_idx >= ICE_MAX_FV_WORDS) {
5949 ice_debug(hw, ICE_DBG_SW,
5950 "No chain index available\n");
5951 status = ICE_ERR_MAX_LIMIT;
5955 entry->chain_idx = chain_idx;
5956 buf[recps].content.result_indx =
5957 ICE_AQ_RECIPE_RESULT_EN |
5958 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5959 ICE_AQ_RECIPE_RESULT_DATA_M);
5960 ice_clear_bit(chain_idx, result_idx_bm);
5961 chain_idx = ice_find_first_bit(result_idx_bm,
5965 /* fill recipe dependencies */
5966 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5967 ICE_MAX_NUM_RECIPES);
5968 ice_set_bit(buf[recps].recipe_indx,
5969 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5970 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5974 if (rm->n_grp_count == 1) {
5975 rm->root_rid = buf[0].recipe_indx;
5976 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5977 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5978 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5979 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5980 sizeof(buf[0].recipe_bitmap),
5981 ICE_NONDMA_TO_NONDMA);
5983 status = ICE_ERR_BAD_PTR;
5986 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5987 * the recipe which is getting created if specified
5988 * by user. Usually any advanced switch filter, which results
5989 * into new extraction sequence, ended up creating a new recipe
5990 * of type ROOT and usually recipes are associated with profiles
5991 * Switch rule referreing newly created recipe, needs to have
5992 * either/or 'fwd' or 'join' priority, otherwise switch rule
5993 * evaluation will not happen correctly. In other words, if
5994 * switch rule to be evaluated on priority basis, then recipe
5995 * needs to have priority, otherwise it will be evaluated last.
5997 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5999 struct ice_recp_grp_entry *last_chain_entry;
6002 /* Allocate the last recipe that will chain the outcomes of the
6003 * other recipes together
6005 status = ice_alloc_recipe(hw, &rid);
6009 buf[recps].recipe_indx = (u8)rid;
6010 buf[recps].content.rid = (u8)rid;
6011 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6012 /* the new entry created should also be part of rg_list to
6013 * make sure we have complete recipe
6015 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6016 sizeof(*last_chain_entry));
6017 if (!last_chain_entry) {
6018 status = ICE_ERR_NO_MEMORY;
6021 last_chain_entry->rid = rid;
6022 ice_memset(&buf[recps].content.lkup_indx, 0,
6023 sizeof(buf[recps].content.lkup_indx),
6025 /* All recipes use look-up index 0 to match switch ID. */
6026 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6027 buf[recps].content.mask[0] =
6028 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6029 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6030 buf[recps].content.lkup_indx[i] =
6031 ICE_AQ_RECIPE_LKUP_IGNORE;
6032 buf[recps].content.mask[i] = 0;
6036 /* update r_bitmap with the recp that is used for chaining */
6037 ice_set_bit(rid, rm->r_bitmap);
6038 /* this is the recipe that chains all the other recipes so it
6039 * should not have a chaining ID to indicate the same
6041 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6042 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6044 last_chain_entry->fv_idx[i] = entry->chain_idx;
6045 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6046 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6047 ice_set_bit(entry->rid, rm->r_bitmap);
6049 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6050 if (sizeof(buf[recps].recipe_bitmap) >=
6051 sizeof(rm->r_bitmap)) {
6052 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6053 sizeof(buf[recps].recipe_bitmap),
6054 ICE_NONDMA_TO_NONDMA);
6056 status = ICE_ERR_BAD_PTR;
6059 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6061 /* To differentiate among different UDP tunnels, a meta data ID
6064 if (match_tun_mask) {
6065 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
6066 buf[recps].content.mask[i] =
6067 CPU_TO_LE16(match_tun_mask);
6071 rm->root_rid = (u8)rid;
6073 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6077 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6078 ice_release_change_lock(hw);
6082 /* Every recipe that just got created add it to the recipe
6085 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6086 struct ice_switch_info *sw = hw->switch_info;
6087 bool is_root, idx_found = false;
6088 struct ice_sw_recipe *recp;
6089 u16 idx, buf_idx = 0;
6091 /* find buffer index for copying some data */
6092 for (idx = 0; idx < rm->n_grp_count; idx++)
6093 if (buf[idx].recipe_indx == entry->rid) {
6099 status = ICE_ERR_OUT_OF_RANGE;
6103 recp = &sw->recp_list[entry->rid];
6104 is_root = (rm->root_rid == entry->rid);
6105 recp->is_root = is_root;
6107 recp->root_rid = entry->rid;
6108 recp->big_recp = (is_root && rm->n_grp_count > 1);
6110 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6111 entry->r_group.n_val_pairs *
6112 sizeof(struct ice_fv_word),
6113 ICE_NONDMA_TO_NONDMA);
6115 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6116 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6118 /* Copy non-result fv index values and masks to recipe. This
6119 * call will also update the result recipe bitmask.
6121 ice_collect_result_idx(&buf[buf_idx], recp);
6123 /* for non-root recipes, also copy to the root, this allows
6124 * easier matching of a complete chained recipe
6127 ice_collect_result_idx(&buf[buf_idx],
6128 &sw->recp_list[rm->root_rid]);
6130 recp->n_ext_words = entry->r_group.n_val_pairs;
6131 recp->chain_idx = entry->chain_idx;
6132 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6133 recp->n_grp_count = rm->n_grp_count;
6134 recp->tun_type = rm->tun_type;
6135 recp->recp_created = true;
6149 * ice_create_recipe_group - creates recipe group
6150 * @hw: pointer to hardware structure
6151 * @rm: recipe management list entry
6152 * @lkup_exts: lookup elements
6154 static enum ice_status
6155 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6156 struct ice_prot_lkup_ext *lkup_exts)
6158 enum ice_status status;
6161 rm->n_grp_count = 0;
6163 /* Create recipes for words that are marked not done by packing them
6166 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6167 &rm->rg_list, &recp_count);
6169 rm->n_grp_count += recp_count;
6170 rm->n_ext_words = lkup_exts->n_val_words;
6171 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6172 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6173 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6174 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6181 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6182 * @hw: pointer to hardware structure
6183 * @lkups: lookup elements or match criteria for the advanced recipe, one
6184 * structure per protocol header
6185 * @lkups_cnt: number of protocols
6186 * @bm: bitmap of field vectors to consider
6187 * @fv_list: pointer to a list that holds the returned field vectors
6189 static enum ice_status
6190 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6191 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6193 enum ice_status status;
6200 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6202 return ICE_ERR_NO_MEMORY;
6204 for (i = 0; i < lkups_cnt; i++)
6205 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6206 status = ICE_ERR_CFG;
6210 /* Find field vectors that include all specified protocol types */
6211 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6214 ice_free(hw, prot_ids);
6219 * ice_tun_type_match_mask - determine if tun type needs a match mask
6220 * @tun_type: tunnel type
6221 * @mask: mask to be used for the tunnel
6223 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6226 case ICE_SW_TUN_VXLAN_GPE:
6227 case ICE_SW_TUN_GENEVE:
6228 case ICE_SW_TUN_VXLAN:
6229 case ICE_SW_TUN_NVGRE:
6230 case ICE_SW_TUN_UDP:
6231 case ICE_ALL_TUNNELS:
6232 *mask = ICE_TUN_FLAG_MASK;
6235 case ICE_SW_TUN_GENEVE_VLAN:
6236 case ICE_SW_TUN_VXLAN_VLAN:
6237 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6247 * ice_add_special_words - Add words that are not protocols, such as metadata
6248 * @rinfo: other information regarding the rule e.g. priority and action info
6249 * @lkup_exts: lookup word structure
6251 static enum ice_status
6252 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6253 struct ice_prot_lkup_ext *lkup_exts)
6257 /* If this is a tunneled packet, then add recipe index to match the
6258 * tunnel bit in the packet metadata flags.
6260 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6261 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6262 u8 word = lkup_exts->n_val_words++;
6264 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6265 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6266 lkup_exts->field_mask[word] = mask;
6268 return ICE_ERR_MAX_LIMIT;
6275 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6276 * @hw: pointer to hardware structure
6277 * @rinfo: other information regarding the rule e.g. priority and action info
6278 * @bm: pointer to memory for returning the bitmap of field vectors
6281 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6284 enum ice_prof_type prof_type;
6286 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6288 switch (rinfo->tun_type) {
6290 prof_type = ICE_PROF_NON_TUN;
6292 case ICE_ALL_TUNNELS:
6293 prof_type = ICE_PROF_TUN_ALL;
6295 case ICE_SW_TUN_VXLAN_GPE:
6296 case ICE_SW_TUN_GENEVE:
6297 case ICE_SW_TUN_GENEVE_VLAN:
6298 case ICE_SW_TUN_VXLAN:
6299 case ICE_SW_TUN_VXLAN_VLAN:
6300 case ICE_SW_TUN_UDP:
6301 case ICE_SW_TUN_GTP:
6302 prof_type = ICE_PROF_TUN_UDP;
6304 case ICE_SW_TUN_NVGRE:
6305 prof_type = ICE_PROF_TUN_GRE;
6307 case ICE_SW_TUN_PPPOE:
6308 prof_type = ICE_PROF_TUN_PPPOE;
6310 case ICE_SW_TUN_PPPOE_PAY:
6311 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6313 case ICE_SW_TUN_PPPOE_IPV4:
6314 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6315 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6316 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6318 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6319 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6321 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6322 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6324 case ICE_SW_TUN_PPPOE_IPV6:
6325 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6326 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6327 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6329 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6330 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6332 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6333 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6335 case ICE_SW_TUN_PROFID_IPV6_ESP:
6336 case ICE_SW_TUN_IPV6_ESP:
6337 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6339 case ICE_SW_TUN_PROFID_IPV6_AH:
6340 case ICE_SW_TUN_IPV6_AH:
6341 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6343 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6344 case ICE_SW_TUN_IPV6_L2TPV3:
6345 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6347 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6348 case ICE_SW_TUN_IPV6_NAT_T:
6349 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6351 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6352 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6354 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6355 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6357 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6358 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6360 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6361 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6363 case ICE_SW_TUN_IPV4_NAT_T:
6364 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6366 case ICE_SW_TUN_IPV4_L2TPV3:
6367 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6369 case ICE_SW_TUN_IPV4_ESP:
6370 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6372 case ICE_SW_TUN_IPV4_AH:
6373 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6375 case ICE_SW_TUN_AND_NON_TUN:
6377 prof_type = ICE_PROF_ALL;
6381 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6385 * ice_is_prof_rule - determine if rule type is a profile rule
6386 * @type: the rule type
6388 * if the rule type is a profile rule, that means that there no field value
6389 * match required, in this case just a profile hit is required.
6391 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6394 case ICE_SW_TUN_PROFID_IPV6_ESP:
6395 case ICE_SW_TUN_PROFID_IPV6_AH:
6396 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6397 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6398 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6399 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6400 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6401 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6411 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6412 * @hw: pointer to hardware structure
6413 * @lkups: lookup elements or match criteria for the advanced recipe, one
6414 * structure per protocol header
6415 * @lkups_cnt: number of protocols
6416 * @rinfo: other information regarding the rule e.g. priority and action info
6417 * @rid: return the recipe ID of the recipe created
6419 static enum ice_status
6420 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6421 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6423 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6424 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6425 struct ice_prot_lkup_ext *lkup_exts;
6426 struct ice_recp_grp_entry *r_entry;
6427 struct ice_sw_fv_list_entry *fvit;
6428 struct ice_recp_grp_entry *r_tmp;
6429 struct ice_sw_fv_list_entry *tmp;
6430 enum ice_status status = ICE_SUCCESS;
6431 struct ice_sw_recipe *rm;
6432 u16 match_tun_mask = 0;
6436 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6437 return ICE_ERR_PARAM;
6439 lkup_exts = (struct ice_prot_lkup_ext *)
6440 ice_malloc(hw, sizeof(*lkup_exts));
6442 return ICE_ERR_NO_MEMORY;
6444 /* Determine the number of words to be matched and if it exceeds a
6445 * recipe's restrictions
6447 for (i = 0; i < lkups_cnt; i++) {
6450 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6451 status = ICE_ERR_CFG;
6452 goto err_free_lkup_exts;
6455 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6457 status = ICE_ERR_CFG;
6458 goto err_free_lkup_exts;
6462 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6464 status = ICE_ERR_NO_MEMORY;
6465 goto err_free_lkup_exts;
6468 /* Get field vectors that contain fields extracted from all the protocol
6469 * headers being programmed.
6471 INIT_LIST_HEAD(&rm->fv_list);
6472 INIT_LIST_HEAD(&rm->rg_list);
6474 /* Get bitmap of field vectors (profiles) that are compatible with the
6475 * rule request; only these will be searched in the subsequent call to
6478 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6480 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6484 /* Group match words into recipes using preferred recipe grouping
6487 status = ice_create_recipe_group(hw, rm, lkup_exts);
6491 /* For certain tunnel types it is necessary to use a metadata ID flag to
6492 * differentiate different tunnel types. A separate recipe needs to be
6493 * used for the metadata.
6495 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6496 rm->n_grp_count > 1)
6497 match_tun_mask = mask;
6499 /* set the recipe priority if specified */
6500 rm->priority = (u8)rinfo->priority;
6502 /* Find offsets from the field vector. Pick the first one for all the
6505 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6509 /* An empty FV list means to use all the profiles returned in the
6512 if (LIST_EMPTY(&rm->fv_list)) {
6515 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6516 if (ice_is_bit_set(fv_bitmap, j)) {
6517 struct ice_sw_fv_list_entry *fvl;
6519 fvl = (struct ice_sw_fv_list_entry *)
6520 ice_malloc(hw, sizeof(*fvl));
6524 fvl->profile_id = j;
6525 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6529 /* get bitmap of all profiles the recipe will be associated with */
6530 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6531 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6533 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6534 ice_set_bit((u16)fvit->profile_id, profiles);
6537 /* Create any special protocol/offset pairs, such as looking at tunnel
6538 * bits by extracting metadata
6540 status = ice_add_special_words(rinfo, lkup_exts);
6542 goto err_free_lkup_exts;
6544 /* Look for a recipe which matches our requested fv / mask list */
6545 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6546 if (*rid < ICE_MAX_NUM_RECIPES)
6547 /* Success if found a recipe that match the existing criteria */
6550 rm->tun_type = rinfo->tun_type;
6551 /* Recipe we need does not exist, add a recipe */
6552 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
6556 /* Associate all the recipes created with all the profiles in the
6557 * common field vector.
6559 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6561 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6564 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6565 (u8 *)r_bitmap, NULL);
6569 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6570 ICE_MAX_NUM_RECIPES);
6571 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6575 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6578 ice_release_change_lock(hw);
6583 /* Update profile to recipe bitmap array */
6584 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6585 ICE_MAX_NUM_RECIPES);
6587 /* Update recipe to profile bitmap array */
6588 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6589 if (ice_is_bit_set(r_bitmap, j))
6590 ice_set_bit((u16)fvit->profile_id,
6591 recipe_to_profile[j]);
6594 *rid = rm->root_rid;
6595 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6596 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6598 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6599 ice_recp_grp_entry, l_entry) {
6600 LIST_DEL(&r_entry->l_entry);
6601 ice_free(hw, r_entry);
6604 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6606 LIST_DEL(&fvit->list_entry);
6611 ice_free(hw, rm->root_buf);
6616 ice_free(hw, lkup_exts);
6622 * ice_find_dummy_packet - find dummy packet by tunnel type
6624 * @lkups: lookup elements or match criteria for the advanced recipe, one
6625 * structure per protocol header
6626 * @lkups_cnt: number of protocols
6627 * @tun_type: tunnel type from the match criteria
6628 * @pkt: dummy packet to fill according to filter match criteria
6629 * @pkt_len: packet length of dummy packet
6630 * @offsets: pointer to receive the pointer to the offsets for the packet
6633 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6634 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6636 const struct ice_dummy_pkt_offsets **offsets)
6638 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6642 for (i = 0; i < lkups_cnt; i++) {
6643 if (lkups[i].type == ICE_UDP_ILOS)
6645 else if (lkups[i].type == ICE_TCP_IL)
6647 else if (lkups[i].type == ICE_IPV6_OFOS)
6649 else if (lkups[i].type == ICE_VLAN_OFOS)
6651 else if (lkups[i].type == ICE_IPV4_OFOS &&
6652 lkups[i].h_u.ipv4_hdr.protocol ==
6653 ICE_IPV4_NVGRE_PROTO_ID &&
6654 lkups[i].m_u.ipv4_hdr.protocol ==
6657 else if (lkups[i].type == ICE_PPPOE &&
6658 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6659 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6660 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6663 else if (lkups[i].type == ICE_ETYPE_OL &&
6664 lkups[i].h_u.ethertype.ethtype_id ==
6665 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6666 lkups[i].m_u.ethertype.ethtype_id ==
6671 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6672 *pkt = dummy_ipv4_esp_pkt;
6673 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6674 *offsets = dummy_ipv4_esp_packet_offsets;
6678 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6679 *pkt = dummy_ipv6_esp_pkt;
6680 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6681 *offsets = dummy_ipv6_esp_packet_offsets;
6685 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6686 *pkt = dummy_ipv4_ah_pkt;
6687 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6688 *offsets = dummy_ipv4_ah_packet_offsets;
6692 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6693 *pkt = dummy_ipv6_ah_pkt;
6694 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6695 *offsets = dummy_ipv6_ah_packet_offsets;
6699 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6700 *pkt = dummy_ipv4_nat_pkt;
6701 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6702 *offsets = dummy_ipv4_nat_packet_offsets;
6706 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6707 *pkt = dummy_ipv6_nat_pkt;
6708 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6709 *offsets = dummy_ipv6_nat_packet_offsets;
6713 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6714 *pkt = dummy_ipv4_l2tpv3_pkt;
6715 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6716 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6720 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6721 *pkt = dummy_ipv6_l2tpv3_pkt;
6722 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6723 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6727 if (tun_type == ICE_SW_TUN_GTP) {
6728 *pkt = dummy_udp_gtp_packet;
6729 *pkt_len = sizeof(dummy_udp_gtp_packet);
6730 *offsets = dummy_udp_gtp_packet_offsets;
6734 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6735 *pkt = dummy_pppoe_ipv6_packet;
6736 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6737 *offsets = dummy_pppoe_packet_offsets;
6739 } else if (tun_type == ICE_SW_TUN_PPPOE ||
6740 tun_type == ICE_SW_TUN_PPPOE_PAY) {
6741 *pkt = dummy_pppoe_ipv4_packet;
6742 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6743 *offsets = dummy_pppoe_packet_offsets;
6747 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
6748 *pkt = dummy_pppoe_ipv4_packet;
6749 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6750 *offsets = dummy_pppoe_packet_ipv4_offsets;
6754 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
6755 *pkt = dummy_pppoe_ipv4_tcp_packet;
6756 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
6757 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
6761 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
6762 *pkt = dummy_pppoe_ipv4_udp_packet;
6763 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
6764 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
6768 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
6769 *pkt = dummy_pppoe_ipv6_packet;
6770 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6771 *offsets = dummy_pppoe_packet_ipv6_offsets;
6775 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
6776 *pkt = dummy_pppoe_ipv6_tcp_packet;
6777 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
6778 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
6782 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
6783 *pkt = dummy_pppoe_ipv6_udp_packet;
6784 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
6785 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
6789 if (tun_type == ICE_ALL_TUNNELS) {
6790 *pkt = dummy_gre_udp_packet;
6791 *pkt_len = sizeof(dummy_gre_udp_packet);
6792 *offsets = dummy_gre_udp_packet_offsets;
6796 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6798 *pkt = dummy_gre_tcp_packet;
6799 *pkt_len = sizeof(dummy_gre_tcp_packet);
6800 *offsets = dummy_gre_tcp_packet_offsets;
6804 *pkt = dummy_gre_udp_packet;
6805 *pkt_len = sizeof(dummy_gre_udp_packet);
6806 *offsets = dummy_gre_udp_packet_offsets;
6810 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6811 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
6812 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
6813 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
6815 *pkt = dummy_udp_tun_tcp_packet;
6816 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6817 *offsets = dummy_udp_tun_tcp_packet_offsets;
6821 *pkt = dummy_udp_tun_udp_packet;
6822 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6823 *offsets = dummy_udp_tun_udp_packet_offsets;
6829 *pkt = dummy_vlan_udp_packet;
6830 *pkt_len = sizeof(dummy_vlan_udp_packet);
6831 *offsets = dummy_vlan_udp_packet_offsets;
6834 *pkt = dummy_udp_packet;
6835 *pkt_len = sizeof(dummy_udp_packet);
6836 *offsets = dummy_udp_packet_offsets;
6838 } else if (udp && ipv6) {
6840 *pkt = dummy_vlan_udp_ipv6_packet;
6841 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6842 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6845 *pkt = dummy_udp_ipv6_packet;
6846 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6847 *offsets = dummy_udp_ipv6_packet_offsets;
6849 } else if ((tcp && ipv6) || ipv6) {
6851 *pkt = dummy_vlan_tcp_ipv6_packet;
6852 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6853 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6856 *pkt = dummy_tcp_ipv6_packet;
6857 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6858 *offsets = dummy_tcp_ipv6_packet_offsets;
6863 *pkt = dummy_vlan_tcp_packet;
6864 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6865 *offsets = dummy_vlan_tcp_packet_offsets;
6867 *pkt = dummy_tcp_packet;
6868 *pkt_len = sizeof(dummy_tcp_packet);
6869 *offsets = dummy_tcp_packet_offsets;
6874 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6876 * @lkups: lookup elements or match criteria for the advanced recipe, one
6877 * structure per protocol header
6878 * @lkups_cnt: number of protocols
6879 * @s_rule: stores rule information from the match criteria
6880 * @dummy_pkt: dummy packet to fill according to filter match criteria
6881 * @pkt_len: packet length of dummy packet
6882 * @offsets: offset info for the dummy packet
6884 static enum ice_status
6885 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6886 struct ice_aqc_sw_rules_elem *s_rule,
6887 const u8 *dummy_pkt, u16 pkt_len,
6888 const struct ice_dummy_pkt_offsets *offsets)
6893 /* Start with a packet with a pre-defined/dummy content. Then, fill
6894 * in the header values to be looked up or matched.
6896 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6898 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6900 for (i = 0; i < lkups_cnt; i++) {
6901 enum ice_protocol_type type;
6902 u16 offset = 0, len = 0, j;
6905 /* find the start of this layer; it should be found since this
6906 * was already checked when search for the dummy packet
6908 type = lkups[i].type;
6909 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6910 if (type == offsets[j].type) {
6911 offset = offsets[j].offset;
6916 /* this should never happen in a correct calling sequence */
6918 return ICE_ERR_PARAM;
6920 switch (lkups[i].type) {
6923 len = sizeof(struct ice_ether_hdr);
6926 len = sizeof(struct ice_ethtype_hdr);
6929 len = sizeof(struct ice_vlan_hdr);
6933 len = sizeof(struct ice_ipv4_hdr);
6937 len = sizeof(struct ice_ipv6_hdr);
6942 len = sizeof(struct ice_l4_hdr);
6945 len = sizeof(struct ice_sctp_hdr);
6948 len = sizeof(struct ice_nvgre);
6953 len = sizeof(struct ice_udp_tnl_hdr);
6957 len = sizeof(struct ice_udp_gtp_hdr);
6960 len = sizeof(struct ice_pppoe_hdr);
6963 len = sizeof(struct ice_esp_hdr);
6966 len = sizeof(struct ice_nat_t_hdr);
6969 len = sizeof(struct ice_ah_hdr);
6972 len = sizeof(struct ice_l2tpv3_sess_hdr);
6975 return ICE_ERR_PARAM;
6978 /* the length should be a word multiple */
6979 if (len % ICE_BYTES_PER_WORD)
6982 /* We have the offset to the header start, the length, the
6983 * caller's header values and mask. Use this information to
6984 * copy the data into the dummy packet appropriately based on
6985 * the mask. Note that we need to only write the bits as
6986 * indicated by the mask to make sure we don't improperly write
6987 * over any significant packet data.
6989 for (j = 0; j < len / sizeof(u16); j++)
6990 if (((u16 *)&lkups[i].m_u)[j])
6991 ((u16 *)(pkt + offset))[j] =
6992 (((u16 *)(pkt + offset))[j] &
6993 ~((u16 *)&lkups[i].m_u)[j]) |
6994 (((u16 *)&lkups[i].h_u)[j] &
6995 ((u16 *)&lkups[i].m_u)[j]);
6998 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7004 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7005 * @hw: pointer to the hardware structure
7006 * @tun_type: tunnel type
7007 * @pkt: dummy packet to fill in
7008 * @offsets: offset info for the dummy packet
7010 static enum ice_status
7011 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7012 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7017 case ICE_SW_TUN_AND_NON_TUN:
7018 case ICE_SW_TUN_VXLAN_GPE:
7019 case ICE_SW_TUN_VXLAN:
7020 case ICE_SW_TUN_VXLAN_VLAN:
7021 case ICE_SW_TUN_UDP:
7022 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7026 case ICE_SW_TUN_GENEVE:
7027 case ICE_SW_TUN_GENEVE_VLAN:
7028 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7033 /* Nothing needs to be done for this tunnel type */
7037 /* Find the outer UDP protocol header and insert the port number */
7038 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7039 if (offsets[i].type == ICE_UDP_OF) {
7040 struct ice_l4_hdr *hdr;
7043 offset = offsets[i].offset;
7044 hdr = (struct ice_l4_hdr *)&pkt[offset];
7045 hdr->dst_port = CPU_TO_BE16(open_port);
7055 * ice_find_adv_rule_entry - Search a rule entry
7056 * @hw: pointer to the hardware structure
7057 * @lkups: lookup elements or match criteria for the advanced recipe, one
7058 * structure per protocol header
7059 * @lkups_cnt: number of protocols
7060 * @recp_id: recipe ID for which we are finding the rule
7061 * @rinfo: other information regarding the rule e.g. priority and action info
7063 * Helper function to search for a given advance rule entry
7064 * Returns pointer to entry storing the rule if found
7066 static struct ice_adv_fltr_mgmt_list_entry *
7067 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7068 u16 lkups_cnt, u16 recp_id,
7069 struct ice_adv_rule_info *rinfo)
7071 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7072 struct ice_switch_info *sw = hw->switch_info;
7075 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7076 ice_adv_fltr_mgmt_list_entry, list_entry) {
7077 bool lkups_matched = true;
7079 if (lkups_cnt != list_itr->lkups_cnt)
7081 for (i = 0; i < list_itr->lkups_cnt; i++)
7082 if (memcmp(&list_itr->lkups[i], &lkups[i],
7084 lkups_matched = false;
7087 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7088 rinfo->tun_type == list_itr->rule_info.tun_type &&
7096 * ice_adv_add_update_vsi_list
7097 * @hw: pointer to the hardware structure
7098 * @m_entry: pointer to current adv filter management list entry
7099 * @cur_fltr: filter information from the book keeping entry
7100 * @new_fltr: filter information with the new VSI to be added
7102 * Call AQ command to add or update previously created VSI list with new VSI.
7104 * Helper function to do book keeping associated with adding filter information
7105 * The algorithm to do the booking keeping is described below :
7106 * When a VSI needs to subscribe to a given advanced filter
7107 * if only one VSI has been added till now
7108 * Allocate a new VSI list and add two VSIs
7109 * to this list using switch rule command
7110 * Update the previously created switch rule with the
7111 * newly created VSI list ID
7112 * if a VSI list was previously created
7113 * Add the new VSI to the previously created VSI list set
7114 * using the update switch rule command
7116 static enum ice_status
7117 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7118 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7119 struct ice_adv_rule_info *cur_fltr,
7120 struct ice_adv_rule_info *new_fltr)
7122 enum ice_status status;
7123 u16 vsi_list_id = 0;
7125 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7126 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7127 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7128 return ICE_ERR_NOT_IMPL;
7130 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7131 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7132 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7133 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7134 return ICE_ERR_NOT_IMPL;
7136 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7137 /* Only one entry existed in the mapping and it was not already
7138 * a part of a VSI list. So, create a VSI list with the old and
7141 struct ice_fltr_info tmp_fltr;
7142 u16 vsi_handle_arr[2];
7144 /* A rule already exists with the new VSI being added */
7145 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7146 new_fltr->sw_act.fwd_id.hw_vsi_id)
7147 return ICE_ERR_ALREADY_EXISTS;
7149 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7150 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7151 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7157 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7158 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7159 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7160 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7161 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7162 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7164 /* Update the previous switch rule of "forward to VSI" to
7167 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7171 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7172 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7173 m_entry->vsi_list_info =
7174 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7177 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7179 if (!m_entry->vsi_list_info)
7182 /* A rule already exists with the new VSI being added */
7183 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7186 /* Update the previously created VSI list set with
7187 * the new VSI ID passed in
7189 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7191 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7193 ice_aqc_opc_update_sw_rules,
7195 /* update VSI list mapping info with new VSI ID */
7197 ice_set_bit(vsi_handle,
7198 m_entry->vsi_list_info->vsi_map);
7201 m_entry->vsi_count++;
7206 * ice_add_adv_rule - helper function to create an advanced switch rule
7207 * @hw: pointer to the hardware structure
7208 * @lkups: information on the words that needs to be looked up. All words
7209 * together makes one recipe
7210 * @lkups_cnt: num of entries in the lkups array
7211 * @rinfo: other information related to the rule that needs to be programmed
7212 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7213 * ignored is case of error.
7215 * This function can program only 1 rule at a time. The lkups is used to
7216 * describe the all the words that forms the "lookup" portion of the recipe.
7217 * These words can span multiple protocols. Callers to this function need to
7218 * pass in a list of protocol headers with lookup information along and mask
7219 * that determines which words are valid from the given protocol header.
7220 * rinfo describes other information related to this rule such as forwarding
7221 * IDs, priority of this rule, etc.
7224 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7225 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7226 struct ice_rule_query_data *added_entry)
7228 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7229 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7230 const struct ice_dummy_pkt_offsets *pkt_offsets;
7231 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7232 struct LIST_HEAD_TYPE *rule_head;
7233 struct ice_switch_info *sw;
7234 enum ice_status status;
7235 const u8 *pkt = NULL;
7241 /* Initialize profile to result index bitmap */
7242 if (!hw->switch_info->prof_res_bm_init) {
7243 hw->switch_info->prof_res_bm_init = 1;
7244 ice_init_prof_result_bm(hw);
7247 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7248 if (!prof_rule && !lkups_cnt)
7249 return ICE_ERR_PARAM;
7251 /* get # of words we need to match */
7253 for (i = 0; i < lkups_cnt; i++) {
7256 ptr = (u16 *)&lkups[i].m_u;
7257 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7263 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7264 return ICE_ERR_PARAM;
7266 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7267 return ICE_ERR_PARAM;
7270 /* make sure that we can locate a dummy packet */
7271 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7274 status = ICE_ERR_PARAM;
7275 goto err_ice_add_adv_rule;
7278 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7279 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7280 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7281 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7284 vsi_handle = rinfo->sw_act.vsi_handle;
7285 if (!ice_is_vsi_valid(hw, vsi_handle))
7286 return ICE_ERR_PARAM;
7288 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7289 rinfo->sw_act.fwd_id.hw_vsi_id =
7290 ice_get_hw_vsi_num(hw, vsi_handle);
7291 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7292 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7294 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7297 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7299 /* we have to add VSI to VSI_LIST and increment vsi_count.
7300 * Also Update VSI list so that we can change forwarding rule
7301 * if the rule already exists, we will check if it exists with
7302 * same vsi_id, if not then add it to the VSI list if it already
7303 * exists if not then create a VSI list and add the existing VSI
7304 * ID and the new VSI ID to the list
7305 * We will add that VSI to the list
7307 status = ice_adv_add_update_vsi_list(hw, m_entry,
7308 &m_entry->rule_info,
7311 added_entry->rid = rid;
7312 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7313 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7317 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7318 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7320 return ICE_ERR_NO_MEMORY;
7321 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7322 switch (rinfo->sw_act.fltr_act) {
7323 case ICE_FWD_TO_VSI:
7324 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7325 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7326 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7329 act |= ICE_SINGLE_ACT_TO_Q;
7330 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7331 ICE_SINGLE_ACT_Q_INDEX_M;
7333 case ICE_FWD_TO_QGRP:
7334 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7335 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7336 act |= ICE_SINGLE_ACT_TO_Q;
7337 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7338 ICE_SINGLE_ACT_Q_INDEX_M;
7339 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7340 ICE_SINGLE_ACT_Q_REGION_M;
7342 case ICE_DROP_PACKET:
7343 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7344 ICE_SINGLE_ACT_VALID_BIT;
7347 status = ICE_ERR_CFG;
7348 goto err_ice_add_adv_rule;
7351 /* set the rule LOOKUP type based on caller specified 'RX'
7352 * instead of hardcoding it to be either LOOKUP_TX/RX
7354 * for 'RX' set the source to be the port number
7355 * for 'TX' set the source to be the source HW VSI number (determined
7359 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7360 s_rule->pdata.lkup_tx_rx.src =
7361 CPU_TO_LE16(hw->port_info->lport);
7363 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7364 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7367 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7368 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7370 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7371 pkt_len, pkt_offsets);
7373 goto err_ice_add_adv_rule;
7375 if (rinfo->tun_type != ICE_NON_TUN &&
7376 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7377 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7378 s_rule->pdata.lkup_tx_rx.hdr,
7381 goto err_ice_add_adv_rule;
7384 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7385 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7388 goto err_ice_add_adv_rule;
7389 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7390 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7392 status = ICE_ERR_NO_MEMORY;
7393 goto err_ice_add_adv_rule;
7396 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7397 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7398 ICE_NONDMA_TO_NONDMA);
7399 if (!adv_fltr->lkups && !prof_rule) {
7400 status = ICE_ERR_NO_MEMORY;
7401 goto err_ice_add_adv_rule;
7404 adv_fltr->lkups_cnt = lkups_cnt;
7405 adv_fltr->rule_info = *rinfo;
7406 adv_fltr->rule_info.fltr_rule_id =
7407 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7408 sw = hw->switch_info;
7409 sw->recp_list[rid].adv_rule = true;
7410 rule_head = &sw->recp_list[rid].filt_rules;
7412 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7413 adv_fltr->vsi_count = 1;
7415 /* Add rule entry to book keeping list */
7416 LIST_ADD(&adv_fltr->list_entry, rule_head);
7418 added_entry->rid = rid;
7419 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7420 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7422 err_ice_add_adv_rule:
7423 if (status && adv_fltr) {
7424 ice_free(hw, adv_fltr->lkups);
7425 ice_free(hw, adv_fltr);
7428 ice_free(hw, s_rule);
7434 * ice_adv_rem_update_vsi_list
7435 * @hw: pointer to the hardware structure
7436 * @vsi_handle: VSI handle of the VSI to remove
7437 * @fm_list: filter management entry for which the VSI list management needs to
7440 static enum ice_status
7441 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7442 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7444 struct ice_vsi_list_map_info *vsi_list_info;
7445 enum ice_sw_lkup_type lkup_type;
7446 enum ice_status status;
7449 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7450 fm_list->vsi_count == 0)
7451 return ICE_ERR_PARAM;
7453 /* A rule with the VSI being removed does not exist */
7454 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7455 return ICE_ERR_DOES_NOT_EXIST;
7457 lkup_type = ICE_SW_LKUP_LAST;
7458 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7459 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7460 ice_aqc_opc_update_sw_rules,
7465 fm_list->vsi_count--;
7466 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7467 vsi_list_info = fm_list->vsi_list_info;
7468 if (fm_list->vsi_count == 1) {
7469 struct ice_fltr_info tmp_fltr;
7472 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7474 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7475 return ICE_ERR_OUT_OF_RANGE;
7477 /* Make sure VSI list is empty before removing it below */
7478 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7480 ice_aqc_opc_update_sw_rules,
7485 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7486 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7487 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7488 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7489 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7490 tmp_fltr.fwd_id.hw_vsi_id =
7491 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7492 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7493 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7494 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7496 /* Update the previous switch rule of "MAC forward to VSI" to
7497 * "MAC fwd to VSI list"
7499 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7501 ice_debug(hw, ICE_DBG_SW,
7502 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7503 tmp_fltr.fwd_id.hw_vsi_id, status);
7507 /* Remove the VSI list since it is no longer used */
7508 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7510 ice_debug(hw, ICE_DBG_SW,
7511 "Failed to remove VSI list %d, error %d\n",
7512 vsi_list_id, status);
7516 LIST_DEL(&vsi_list_info->list_entry);
7517 ice_free(hw, vsi_list_info);
7518 fm_list->vsi_list_info = NULL;
7525 * ice_rem_adv_rule - removes existing advanced switch rule
7526 * @hw: pointer to the hardware structure
7527 * @lkups: information on the words that needs to be looked up. All words
7528 * together makes one recipe
7529 * @lkups_cnt: num of entries in the lkups array
7530 * @rinfo: Its the pointer to the rule information for the rule
7532 * This function can be used to remove 1 rule at a time. The lkups is
7533 * used to describe all the words that forms the "lookup" portion of the
7534 * rule. These words can span multiple protocols. Callers to this function
7535 * need to pass in a list of protocol headers with lookup information along
7536 * and mask that determines which words are valid from the given protocol
7537 * header. rinfo describes other information related to this rule such as
7538 * forwarding IDs, priority of this rule, etc.
7541 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7542 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7544 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7545 struct ice_prot_lkup_ext lkup_exts;
7546 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7547 enum ice_status status = ICE_SUCCESS;
7548 bool remove_rule = false;
7549 u16 i, rid, vsi_handle;
7551 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7552 for (i = 0; i < lkups_cnt; i++) {
7555 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7558 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7563 /* Create any special protocol/offset pairs, such as looking at tunnel
7564 * bits by extracting metadata
7566 status = ice_add_special_words(rinfo, &lkup_exts);
7570 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7571 /* If did not find a recipe that match the existing criteria */
7572 if (rid == ICE_MAX_NUM_RECIPES)
7573 return ICE_ERR_PARAM;
7575 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7576 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7577 /* the rule is already removed */
7580 ice_acquire_lock(rule_lock);
7581 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7583 } else if (list_elem->vsi_count > 1) {
7584 list_elem->vsi_list_info->ref_cnt--;
7585 remove_rule = false;
7586 vsi_handle = rinfo->sw_act.vsi_handle;
7587 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7589 vsi_handle = rinfo->sw_act.vsi_handle;
7590 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7592 ice_release_lock(rule_lock);
7595 if (list_elem->vsi_count == 0)
7598 ice_release_lock(rule_lock);
7600 struct ice_aqc_sw_rules_elem *s_rule;
7603 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7605 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7608 return ICE_ERR_NO_MEMORY;
7609 s_rule->pdata.lkup_tx_rx.act = 0;
7610 s_rule->pdata.lkup_tx_rx.index =
7611 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7612 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7613 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7615 ice_aqc_opc_remove_sw_rules, NULL);
7616 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
7617 struct ice_switch_info *sw = hw->switch_info;
7619 ice_acquire_lock(rule_lock);
7620 LIST_DEL(&list_elem->list_entry);
7621 ice_free(hw, list_elem->lkups);
7622 ice_free(hw, list_elem);
7623 ice_release_lock(rule_lock);
7624 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
7625 sw->recp_list[rid].adv_rule = false;
7627 ice_free(hw, s_rule);
7633 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7634 * @hw: pointer to the hardware structure
7635 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7637 * This function is used to remove 1 rule at a time. The removal is based on
7638 * the remove_entry parameter. This function will remove rule for a given
7639 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7642 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7643 struct ice_rule_query_data *remove_entry)
7645 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7646 struct LIST_HEAD_TYPE *list_head;
7647 struct ice_adv_rule_info rinfo;
7648 struct ice_switch_info *sw;
7650 sw = hw->switch_info;
7651 if (!sw->recp_list[remove_entry->rid].recp_created)
7652 return ICE_ERR_PARAM;
7653 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7654 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7656 if (list_itr->rule_info.fltr_rule_id ==
7657 remove_entry->rule_id) {
7658 rinfo = list_itr->rule_info;
7659 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7660 return ice_rem_adv_rule(hw, list_itr->lkups,
7661 list_itr->lkups_cnt, &rinfo);
7664 /* either list is empty or unable to find rule */
7665 return ICE_ERR_DOES_NOT_EXIST;
7669 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7671 * @hw: pointer to the hardware structure
7672 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7674 * This function is used to remove all the rules for a given VSI and as soon
7675 * as removing a rule fails, it will return immediately with the error code,
7676 * else it will return ICE_SUCCESS
7678 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7680 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7681 struct ice_vsi_list_map_info *map_info;
7682 struct LIST_HEAD_TYPE *list_head;
7683 struct ice_adv_rule_info rinfo;
7684 struct ice_switch_info *sw;
7685 enum ice_status status;
7686 u16 vsi_list_id = 0;
7689 sw = hw->switch_info;
7690 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7691 if (!sw->recp_list[rid].recp_created)
7693 if (!sw->recp_list[rid].adv_rule)
7695 list_head = &sw->recp_list[rid].filt_rules;
7697 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7698 ice_adv_fltr_mgmt_list_entry, list_entry) {
7699 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7704 rinfo = list_itr->rule_info;
7705 rinfo.sw_act.vsi_handle = vsi_handle;
7706 status = ice_rem_adv_rule(hw, list_itr->lkups,
7707 list_itr->lkups_cnt, &rinfo);
7717 * ice_replay_fltr - Replay all the filters stored by a specific list head
7718 * @hw: pointer to the hardware structure
7719 * @list_head: list for which filters needs to be replayed
7720 * @recp_id: Recipe ID for which rules need to be replayed
7722 static enum ice_status
7723 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7725 struct ice_fltr_mgmt_list_entry *itr;
7726 enum ice_status status = ICE_SUCCESS;
7727 struct ice_sw_recipe *recp_list;
7728 u8 lport = hw->port_info->lport;
7729 struct LIST_HEAD_TYPE l_head;
7731 if (LIST_EMPTY(list_head))
7734 recp_list = &hw->switch_info->recp_list[recp_id];
7735 /* Move entries from the given list_head to a temporary l_head so that
7736 * they can be replayed. Otherwise when trying to re-add the same
7737 * filter, the function will return already exists
7739 LIST_REPLACE_INIT(list_head, &l_head);
7741 /* Mark the given list_head empty by reinitializing it so filters
7742 * could be added again by *handler
7744 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7746 struct ice_fltr_list_entry f_entry;
7748 f_entry.fltr_info = itr->fltr_info;
7749 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7750 status = ice_add_rule_internal(hw, recp_list, lport,
7752 if (status != ICE_SUCCESS)
7757 /* Add a filter per VSI separately */
7762 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7764 if (!ice_is_vsi_valid(hw, vsi_handle))
7767 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7768 f_entry.fltr_info.vsi_handle = vsi_handle;
7769 f_entry.fltr_info.fwd_id.hw_vsi_id =
7770 ice_get_hw_vsi_num(hw, vsi_handle);
7771 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7772 if (recp_id == ICE_SW_LKUP_VLAN)
7773 status = ice_add_vlan_internal(hw, recp_list,
7776 status = ice_add_rule_internal(hw, recp_list,
7779 if (status != ICE_SUCCESS)
7784 /* Clear the filter management list */
7785 ice_rem_sw_rule_info(hw, &l_head);
7790 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7791 * @hw: pointer to the hardware structure
7793 * NOTE: This function does not clean up partially added filters on error.
7794 * It is up to caller of the function to issue a reset or fail early.
7796 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7798 struct ice_switch_info *sw = hw->switch_info;
7799 enum ice_status status = ICE_SUCCESS;
7802 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7803 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7805 status = ice_replay_fltr(hw, i, head);
7806 if (status != ICE_SUCCESS)
7813 * ice_replay_vsi_fltr - Replay filters for requested VSI
7814 * @hw: pointer to the hardware structure
7815 * @pi: pointer to port information structure
7816 * @sw: pointer to switch info struct for which function replays filters
7817 * @vsi_handle: driver VSI handle
7818 * @recp_id: Recipe ID for which rules need to be replayed
7819 * @list_head: list for which filters need to be replayed
7821 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7822 * It is required to pass valid VSI handle.
7824 static enum ice_status
7825 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7826 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
7827 struct LIST_HEAD_TYPE *list_head)
7829 struct ice_fltr_mgmt_list_entry *itr;
7830 enum ice_status status = ICE_SUCCESS;
7831 struct ice_sw_recipe *recp_list;
7834 if (LIST_EMPTY(list_head))
7836 recp_list = &sw->recp_list[recp_id];
7837 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7839 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7841 struct ice_fltr_list_entry f_entry;
7843 f_entry.fltr_info = itr->fltr_info;
7844 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7845 itr->fltr_info.vsi_handle == vsi_handle) {
7846 /* update the src in case it is VSI num */
7847 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7848 f_entry.fltr_info.src = hw_vsi_id;
7849 status = ice_add_rule_internal(hw, recp_list,
7852 if (status != ICE_SUCCESS)
7856 if (!itr->vsi_list_info ||
7857 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7859 /* Clearing it so that the logic can add it back */
7860 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7861 f_entry.fltr_info.vsi_handle = vsi_handle;
7862 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7863 /* update the src in case it is VSI num */
7864 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7865 f_entry.fltr_info.src = hw_vsi_id;
7866 if (recp_id == ICE_SW_LKUP_VLAN)
7867 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7869 status = ice_add_rule_internal(hw, recp_list,
7872 if (status != ICE_SUCCESS)
7880 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7881 * @hw: pointer to the hardware structure
7882 * @vsi_handle: driver VSI handle
7883 * @list_head: list for which filters need to be replayed
7885 * Replay the advanced rule for the given VSI.
7887 static enum ice_status
7888 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7889 struct LIST_HEAD_TYPE *list_head)
7891 struct ice_rule_query_data added_entry = { 0 };
7892 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7893 enum ice_status status = ICE_SUCCESS;
7895 if (LIST_EMPTY(list_head))
7897 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7899 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7900 u16 lk_cnt = adv_fltr->lkups_cnt;
7902 if (vsi_handle != rinfo->sw_act.vsi_handle)
7904 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7913 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7914 * @hw: pointer to the hardware structure
7915 * @pi: pointer to port information structure
7916 * @vsi_handle: driver VSI handle
7918 * Replays filters for requested VSI via vsi_handle.
7921 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7924 struct ice_switch_info *sw = hw->switch_info;
7925 enum ice_status status;
7928 /* Update the recipes that were created */
7929 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7930 struct LIST_HEAD_TYPE *head;
7932 head = &sw->recp_list[i].filt_replay_rules;
7933 if (!sw->recp_list[i].adv_rule)
7934 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
7937 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7938 if (status != ICE_SUCCESS)
7946 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
7947 * @hw: pointer to the HW struct
7948 * @sw: pointer to switch info struct for which function removes filters
7950 * Deletes the filter replay rules for given switch
7952 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
7959 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7960 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7961 struct LIST_HEAD_TYPE *l_head;
7963 l_head = &sw->recp_list[i].filt_replay_rules;
7964 if (!sw->recp_list[i].adv_rule)
7965 ice_rem_sw_rule_info(hw, l_head);
7967 ice_rem_adv_rule_info(hw, l_head);
7973 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7974 * @hw: pointer to the HW struct
7976 * Deletes the filter replay rules.
7978 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7980 ice_rm_sw_replay_rule_info(hw, hw->switch_info);