1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
509 { ICE_IPV4_OFOS, 14 },
512 { ICE_PROTOCOL_LAST, 0 },
515 static const u8 dummy_udp_gtp_packet[] = {
516 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
521 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x11, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 0x00, 0x1c, 0x00, 0x00,
530 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x00, 0x85,
534 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 0x00, 0x00, 0x00, 0x00,
538 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
540 { ICE_ETYPE_OL, 12 },
541 { ICE_VLAN_OFOS, 14},
543 { ICE_PROTOCOL_LAST, 0 },
546 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
548 { ICE_ETYPE_OL, 12 },
549 { ICE_VLAN_OFOS, 14},
551 { ICE_IPV4_OFOS, 26 },
552 { ICE_PROTOCOL_LAST, 0 },
555 static const u8 dummy_pppoe_ipv4_packet[] = {
556 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x81, 0x00, /* ICE_ETYPE_OL 12 */
562 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
564 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
567 0x00, 0x21, /* PPP Link Layer 24 */
569 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
579 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
581 { ICE_ETYPE_OL, 12 },
582 { ICE_VLAN_OFOS, 14},
584 { ICE_IPV4_OFOS, 26 },
586 { ICE_PROTOCOL_LAST, 0 },
589 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
590 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
591 0x00, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
594 0x81, 0x00, /* ICE_ETYPE_OL 12 */
596 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
598 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
601 0x00, 0x21, /* PPP Link Layer 24 */
603 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
604 0x00, 0x01, 0x00, 0x00,
605 0x00, 0x06, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x00, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
610 0x00, 0x00, 0x00, 0x00,
611 0x00, 0x00, 0x00, 0x00,
612 0x50, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
619 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
621 { ICE_ETYPE_OL, 12 },
622 { ICE_VLAN_OFOS, 14},
624 { ICE_IPV4_OFOS, 26 },
625 { ICE_UDP_ILOS, 46 },
626 { ICE_PROTOCOL_LAST, 0 },
629 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
630 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
631 0x00, 0x00, 0x00, 0x00,
632 0x00, 0x00, 0x00, 0x00,
634 0x81, 0x00, /* ICE_ETYPE_OL 12 */
636 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
638 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
641 0x00, 0x21, /* PPP Link Layer 24 */
643 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
644 0x00, 0x01, 0x00, 0x00,
645 0x00, 0x11, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
650 0x00, 0x08, 0x00, 0x00,
652 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
655 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
657 { ICE_ETYPE_OL, 12 },
658 { ICE_VLAN_OFOS, 14},
660 { ICE_IPV6_OFOS, 26 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_pppoe_ipv6_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
669 0x81, 0x00, /* ICE_ETYPE_OL 12 */
671 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
673 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
676 0x00, 0x57, /* PPP Link Layer 24 */
678 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
679 0x00, 0x00, 0x3b, 0x00,
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
685 0x00, 0x00, 0x00, 0x00,
686 0x00, 0x00, 0x00, 0x00,
687 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
693 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
695 { ICE_ETYPE_OL, 12 },
696 { ICE_VLAN_OFOS, 14},
698 { ICE_IPV6_OFOS, 26 },
700 { ICE_PROTOCOL_LAST, 0 },
703 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
704 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
705 0x00, 0x00, 0x00, 0x00,
706 0x00, 0x00, 0x00, 0x00,
708 0x81, 0x00, /* ICE_ETYPE_OL 12 */
710 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
712 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
715 0x00, 0x57, /* PPP Link Layer 24 */
717 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
718 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
729 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x00, 0x00,
731 0x50, 0x00, 0x00, 0x00,
732 0x00, 0x00, 0x00, 0x00,
734 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
738 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
740 { ICE_ETYPE_OL, 12 },
741 { ICE_VLAN_OFOS, 14},
743 { ICE_IPV6_OFOS, 26 },
744 { ICE_UDP_ILOS, 66 },
745 { ICE_PROTOCOL_LAST, 0 },
748 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
749 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
753 0x81, 0x00, /* ICE_ETYPE_OL 12 */
755 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
757 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
760 0x00, 0x57, /* PPP Link Layer 24 */
762 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
763 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
774 0x00, 0x08, 0x00, 0x00,
776 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
779 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
781 { ICE_IPV4_OFOS, 14 },
783 { ICE_PROTOCOL_LAST, 0 },
786 static const u8 dummy_ipv4_esp_pkt[] = {
787 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
788 0x00, 0x00, 0x00, 0x00,
789 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
793 0x00, 0x00, 0x40, 0x00,
794 0x40, 0x32, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
803 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
805 { ICE_IPV6_OFOS, 14 },
807 { ICE_PROTOCOL_LAST, 0 },
810 static const u8 dummy_ipv6_esp_pkt[] = {
811 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
812 0x00, 0x00, 0x00, 0x00,
813 0x00, 0x00, 0x00, 0x00,
816 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
817 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
832 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
834 { ICE_IPV4_OFOS, 14 },
836 { ICE_PROTOCOL_LAST, 0 },
839 static const u8 dummy_ipv4_ah_pkt[] = {
840 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
841 0x00, 0x00, 0x00, 0x00,
842 0x00, 0x00, 0x00, 0x00,
845 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
846 0x00, 0x00, 0x40, 0x00,
847 0x40, 0x33, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
857 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
859 { ICE_IPV6_OFOS, 14 },
861 { ICE_PROTOCOL_LAST, 0 },
864 static const u8 dummy_ipv6_ah_pkt[] = {
865 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
866 0x00, 0x00, 0x00, 0x00,
867 0x00, 0x00, 0x00, 0x00,
870 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
871 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
887 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
889 { ICE_IPV4_OFOS, 14 },
890 { ICE_UDP_ILOS, 34 },
892 { ICE_PROTOCOL_LAST, 0 },
895 static const u8 dummy_ipv4_nat_pkt[] = {
896 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
897 0x00, 0x00, 0x00, 0x00,
898 0x00, 0x00, 0x00, 0x00,
901 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
902 0x00, 0x00, 0x40, 0x00,
903 0x40, 0x11, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
908 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
915 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
917 { ICE_IPV6_OFOS, 14 },
918 { ICE_UDP_ILOS, 54 },
920 { ICE_PROTOCOL_LAST, 0 },
923 static const u8 dummy_ipv6_nat_pkt[] = {
924 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
929 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
930 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x00, 0x00,
936 0x00, 0x00, 0x00, 0x00,
937 0x00, 0x00, 0x00, 0x00,
938 0x00, 0x00, 0x00, 0x00,
940 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
941 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, 0x00, 0x00,
945 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
949 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
951 { ICE_IPV4_OFOS, 14 },
953 { ICE_PROTOCOL_LAST, 0 },
956 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
957 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
958 0x00, 0x00, 0x00, 0x00,
959 0x00, 0x00, 0x00, 0x00,
962 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
963 0x00, 0x00, 0x40, 0x00,
964 0x40, 0x73, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
974 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
976 { ICE_IPV6_OFOS, 14 },
978 { ICE_PROTOCOL_LAST, 0 },
981 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
982 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
983 0x00, 0x00, 0x00, 0x00,
984 0x00, 0x00, 0x00, 0x00,
987 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
988 0x00, 0x0c, 0x73, 0x40,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
998 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1004 /* this is a recipe to profile association bitmap */
1005 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1006 ICE_MAX_NUM_PROFILES);
1008 /* this is a profile to recipe association bitmap */
1009 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1010 ICE_MAX_NUM_RECIPES);
1012 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1015 * ice_collect_result_idx - copy result index values
1016 * @buf: buffer that contains the result index
1017 * @recp: the recipe struct to copy data into
1019 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1020 struct ice_sw_recipe *recp)
1022 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1023 ice_set_bit(buf->content.result_indx &
1024 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1028 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1029 * @rid: recipe ID that we are populating
1031 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
1033 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1034 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1035 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1036 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1037 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1038 u16 i, j, profile_num = 0;
1039 bool non_tun_valid = false;
1040 bool pppoe_valid = false;
1041 bool vxlan_valid = false;
1042 bool gre_valid = false;
1043 bool gtp_valid = false;
1044 bool flag_valid = false;
1046 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1047 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1052 for (i = 0; i < 12; i++) {
1053 if (gre_profile[i] == j)
1057 for (i = 0; i < 12; i++) {
1058 if (vxlan_profile[i] == j)
1062 for (i = 0; i < 7; i++) {
1063 if (pppoe_profile[i] == j)
1067 for (i = 0; i < 6; i++) {
1068 if (non_tun_profile[i] == j)
1069 non_tun_valid = true;
1072 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1073 j <= ICE_PROFID_IPV6_GTPU_IPV6_OTHER)
1076 if (j >= ICE_PROFID_IPV4_ESP &&
1077 j <= ICE_PROFID_IPV6_PFCP_SESSION)
1081 if (!non_tun_valid && vxlan_valid)
1082 tun_type = ICE_SW_TUN_VXLAN;
1083 else if (!non_tun_valid && gre_valid)
1084 tun_type = ICE_SW_TUN_NVGRE;
1085 else if (!non_tun_valid && pppoe_valid)
1086 tun_type = ICE_SW_TUN_PPPOE;
1087 else if (!non_tun_valid && gtp_valid)
1088 tun_type = ICE_SW_TUN_GTP;
1089 else if (non_tun_valid &&
1090 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1091 tun_type = ICE_SW_TUN_AND_NON_TUN;
1092 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1094 tun_type = ICE_NON_TUN;
1096 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1097 i = ice_is_bit_set(recipe_to_profile[rid],
1098 ICE_PROFID_PPPOE_IPV4_OTHER);
1099 j = ice_is_bit_set(recipe_to_profile[rid],
1100 ICE_PROFID_PPPOE_IPV6_OTHER);
1102 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1104 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1107 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1108 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1109 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1111 case ICE_PROFID_IPV4_TCP:
1112 tun_type = ICE_SW_IPV4_TCP;
1114 case ICE_PROFID_IPV4_UDP:
1115 tun_type = ICE_SW_IPV4_UDP;
1117 case ICE_PROFID_IPV6_TCP:
1118 tun_type = ICE_SW_IPV6_TCP;
1120 case ICE_PROFID_IPV6_UDP:
1121 tun_type = ICE_SW_IPV6_UDP;
1123 case ICE_PROFID_PPPOE_PAY:
1124 tun_type = ICE_SW_TUN_PPPOE_PAY;
1126 case ICE_PROFID_PPPOE_IPV4_TCP:
1127 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1129 case ICE_PROFID_PPPOE_IPV4_UDP:
1130 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1132 case ICE_PROFID_PPPOE_IPV4_OTHER:
1133 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1135 case ICE_PROFID_PPPOE_IPV6_TCP:
1136 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1138 case ICE_PROFID_PPPOE_IPV6_UDP:
1139 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1141 case ICE_PROFID_PPPOE_IPV6_OTHER:
1142 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1144 case ICE_PROFID_IPV4_ESP:
1145 tun_type = ICE_SW_TUN_IPV4_ESP;
1147 case ICE_PROFID_IPV6_ESP:
1148 tun_type = ICE_SW_TUN_IPV6_ESP;
1150 case ICE_PROFID_IPV4_AH:
1151 tun_type = ICE_SW_TUN_IPV4_AH;
1153 case ICE_PROFID_IPV6_AH:
1154 tun_type = ICE_SW_TUN_IPV6_AH;
1156 case ICE_PROFID_IPV4_NAT_T:
1157 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1159 case ICE_PROFID_IPV6_NAT_T:
1160 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1162 case ICE_PROFID_IPV4_PFCP_NODE:
1164 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1166 case ICE_PROFID_IPV6_PFCP_NODE:
1168 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1170 case ICE_PROFID_IPV4_PFCP_SESSION:
1172 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1174 case ICE_PROFID_IPV6_PFCP_SESSION:
1176 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1178 case ICE_PROFID_MAC_IPV4_L2TPV3:
1179 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1181 case ICE_PROFID_MAC_IPV6_L2TPV3:
1182 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1197 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1198 * @hw: pointer to hardware structure
1199 * @recps: struct that we need to populate
1200 * @rid: recipe ID that we are populating
1201 * @refresh_required: true if we should get recipe to profile mapping from FW
1203 * This function is used to populate all the necessary entries into our
1204 * bookkeeping so that we have a current list of all the recipes that are
1205 * programmed in the firmware.
1207 static enum ice_status
1208 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1209 bool *refresh_required)
1211 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1212 struct ice_aqc_recipe_data_elem *tmp;
1213 u16 num_recps = ICE_MAX_NUM_RECIPES;
1214 struct ice_prot_lkup_ext *lkup_exts;
1215 enum ice_status status;
1219 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1221 /* we need a buffer big enough to accommodate all the recipes */
1222 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1223 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1225 return ICE_ERR_NO_MEMORY;
1227 tmp[0].recipe_indx = rid;
1228 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1229 /* non-zero status meaning recipe doesn't exist */
1233 /* Get recipe to profile map so that we can get the fv from lkups that
1234 * we read for a recipe from FW. Since we want to minimize the number of
1235 * times we make this FW call, just make one call and cache the copy
1236 * until a new recipe is added. This operation is only required the
1237 * first time to get the changes from FW. Then to search existing
1238 * entries we don't need to update the cache again until another recipe
1241 if (*refresh_required) {
1242 ice_get_recp_to_prof_map(hw);
1243 *refresh_required = false;
1246 /* Start populating all the entries for recps[rid] based on lkups from
1247 * firmware. Note that we are only creating the root recipe in our
1250 lkup_exts = &recps[rid].lkup_exts;
1252 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1253 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1254 struct ice_recp_grp_entry *rg_entry;
1255 u8 i, prof, idx, prot = 0;
1259 rg_entry = (struct ice_recp_grp_entry *)
1260 ice_malloc(hw, sizeof(*rg_entry));
1262 status = ICE_ERR_NO_MEMORY;
1266 idx = root_bufs.recipe_indx;
1267 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1269 /* Mark all result indices in this chain */
1270 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1271 ice_set_bit(root_bufs.content.result_indx &
1272 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1274 /* get the first profile that is associated with rid */
1275 prof = ice_find_first_bit(recipe_to_profile[idx],
1276 ICE_MAX_NUM_PROFILES);
1277 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1278 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1280 rg_entry->fv_idx[i] = lkup_indx;
1281 rg_entry->fv_mask[i] =
1282 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1284 /* If the recipe is a chained recipe then all its
1285 * child recipe's result will have a result index.
1286 * To fill fv_words we should not use those result
1287 * index, we only need the protocol ids and offsets.
1288 * We will skip all the fv_idx which stores result
1289 * index in them. We also need to skip any fv_idx which
1290 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1291 * valid offset value.
1293 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1294 rg_entry->fv_idx[i]) ||
1295 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1296 rg_entry->fv_idx[i] == 0)
1299 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1300 rg_entry->fv_idx[i], &prot, &off);
1301 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1302 lkup_exts->fv_words[fv_word_idx].off = off;
1303 lkup_exts->field_mask[fv_word_idx] =
1304 rg_entry->fv_mask[i];
1307 /* populate rg_list with the data from the child entry of this
1310 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1312 /* Propagate some data to the recipe database */
1313 recps[idx].is_root = !!is_root;
1314 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1315 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1316 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1317 recps[idx].chain_idx = root_bufs.content.result_indx &
1318 ~ICE_AQ_RECIPE_RESULT_EN;
1319 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1321 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1327 /* Only do the following for root recipes entries */
1328 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1329 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1330 recps[idx].root_rid = root_bufs.content.rid &
1331 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1332 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1335 /* Complete initialization of the root recipe entry */
1336 lkup_exts->n_val_words = fv_word_idx;
1337 recps[rid].big_recp = (num_recps > 1);
1338 recps[rid].n_grp_count = (u8)num_recps;
1339 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
1340 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1341 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1342 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1343 if (!recps[rid].root_buf)
1346 /* Copy result indexes */
1347 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1348 recps[rid].recp_created = true;
1356 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1357 * @hw: pointer to hardware structure
1359 * This function is used to populate recipe_to_profile matrix where index to
1360 * this array is the recipe ID and the element is the mapping of which profiles
1361 * is this recipe mapped to.
1363 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1365 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1368 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1371 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1372 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1373 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1375 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1376 ICE_MAX_NUM_RECIPES);
1377 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1378 if (ice_is_bit_set(r_bitmap, j))
1379 ice_set_bit(i, recipe_to_profile[j]);
1384 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1385 * @hw: pointer to the HW struct
1386 * @recp_list: pointer to sw recipe list
1388 * Allocate memory for the entire recipe table and initialize the structures/
1389 * entries corresponding to basic recipes.
1392 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1394 struct ice_sw_recipe *recps;
1397 recps = (struct ice_sw_recipe *)
1398 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1400 return ICE_ERR_NO_MEMORY;
1402 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1403 recps[i].root_rid = i;
1404 INIT_LIST_HEAD(&recps[i].filt_rules);
1405 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1406 INIT_LIST_HEAD(&recps[i].rg_list);
1407 ice_init_lock(&recps[i].filt_rule_lock);
1416 * ice_aq_get_sw_cfg - get switch configuration
1417 * @hw: pointer to the hardware structure
1418 * @buf: pointer to the result buffer
1419 * @buf_size: length of the buffer available for response
1420 * @req_desc: pointer to requested descriptor
1421 * @num_elems: pointer to number of elements
1422 * @cd: pointer to command details structure or NULL
1424 * Get switch configuration (0x0200) to be placed in buf.
1425 * This admin command returns information such as initial VSI/port number
1426 * and switch ID it belongs to.
1428 * NOTE: *req_desc is both an input/output parameter.
1429 * The caller of this function first calls this function with *request_desc set
1430 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1431 * configuration information has been returned; if non-zero (meaning not all
1432 * the information was returned), the caller should call this function again
1433 * with *req_desc set to the previous value returned by f/w to get the
1434 * next block of switch configuration information.
1436 * *num_elems is output only parameter. This reflects the number of elements
1437 * in response buffer. The caller of this function to use *num_elems while
1438 * parsing the response buffer.
1440 static enum ice_status
1441 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1442 u16 buf_size, u16 *req_desc, u16 *num_elems,
1443 struct ice_sq_cd *cd)
1445 struct ice_aqc_get_sw_cfg *cmd;
1446 struct ice_aq_desc desc;
1447 enum ice_status status;
1449 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1450 cmd = &desc.params.get_sw_conf;
1451 cmd->element = CPU_TO_LE16(*req_desc);
1453 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1455 *req_desc = LE16_TO_CPU(cmd->element);
1456 *num_elems = LE16_TO_CPU(cmd->num_elems);
1463 * ice_alloc_sw - allocate resources specific to switch
1464 * @hw: pointer to the HW struct
1465 * @ena_stats: true to turn on VEB stats
1466 * @shared_res: true for shared resource, false for dedicated resource
1467 * @sw_id: switch ID returned
1468 * @counter_id: VEB counter ID returned
1470 * allocates switch resources (SWID and VEB counter) (0x0208)
1473 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1476 struct ice_aqc_alloc_free_res_elem *sw_buf;
1477 struct ice_aqc_res_elem *sw_ele;
1478 enum ice_status status;
1481 buf_len = ice_struct_size(sw_buf, elem, 1);
1482 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1484 return ICE_ERR_NO_MEMORY;
1486 /* Prepare buffer for switch ID.
1487 * The number of resource entries in buffer is passed as 1 since only a
1488 * single switch/VEB instance is allocated, and hence a single sw_id
1491 sw_buf->num_elems = CPU_TO_LE16(1);
1493 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1494 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1495 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1497 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1498 ice_aqc_opc_alloc_res, NULL);
1501 goto ice_alloc_sw_exit;
1503 sw_ele = &sw_buf->elem[0];
1504 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1507 /* Prepare buffer for VEB Counter */
1508 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1509 struct ice_aqc_alloc_free_res_elem *counter_buf;
1510 struct ice_aqc_res_elem *counter_ele;
1512 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1513 ice_malloc(hw, buf_len);
1515 status = ICE_ERR_NO_MEMORY;
1516 goto ice_alloc_sw_exit;
1519 /* The number of resource entries in buffer is passed as 1 since
1520 * only a single switch/VEB instance is allocated, and hence a
1521 * single VEB counter is requested.
1523 counter_buf->num_elems = CPU_TO_LE16(1);
1524 counter_buf->res_type =
1525 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1526 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1527 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1531 ice_free(hw, counter_buf);
1532 goto ice_alloc_sw_exit;
1534 counter_ele = &counter_buf->elem[0];
1535 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1536 ice_free(hw, counter_buf);
1540 ice_free(hw, sw_buf);
1545 * ice_free_sw - free resources specific to switch
1546 * @hw: pointer to the HW struct
1547 * @sw_id: switch ID returned
1548 * @counter_id: VEB counter ID returned
1550 * free switch resources (SWID and VEB counter) (0x0209)
1552 * NOTE: This function frees multiple resources. It continues
1553 * releasing other resources even after it encounters error.
1554 * The error code returned is the last error it encountered.
1556 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1558 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1559 enum ice_status status, ret_status;
1562 buf_len = ice_struct_size(sw_buf, elem, 1);
1563 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1565 return ICE_ERR_NO_MEMORY;
1567 /* Prepare buffer to free for switch ID res.
1568 * The number of resource entries in buffer is passed as 1 since only a
1569 * single switch/VEB instance is freed, and hence a single sw_id
1572 sw_buf->num_elems = CPU_TO_LE16(1);
1573 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1574 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1576 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1577 ice_aqc_opc_free_res, NULL);
1580 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1582 /* Prepare buffer to free for VEB Counter resource */
1583 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1584 ice_malloc(hw, buf_len);
1586 ice_free(hw, sw_buf);
1587 return ICE_ERR_NO_MEMORY;
1590 /* The number of resource entries in buffer is passed as 1 since only a
1591 * single switch/VEB instance is freed, and hence a single VEB counter
1594 counter_buf->num_elems = CPU_TO_LE16(1);
1595 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1596 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1598 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1599 ice_aqc_opc_free_res, NULL);
1601 ice_debug(hw, ICE_DBG_SW,
1602 "VEB counter resource could not be freed\n");
1603 ret_status = status;
1606 ice_free(hw, counter_buf);
1607 ice_free(hw, sw_buf);
1613 * @hw: pointer to the HW struct
1614 * @vsi_ctx: pointer to a VSI context struct
1615 * @cd: pointer to command details structure or NULL
1617 * Add a VSI context to the hardware (0x0210)
1620 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1621 struct ice_sq_cd *cd)
1623 struct ice_aqc_add_update_free_vsi_resp *res;
1624 struct ice_aqc_add_get_update_free_vsi *cmd;
1625 struct ice_aq_desc desc;
1626 enum ice_status status;
1628 cmd = &desc.params.vsi_cmd;
1629 res = &desc.params.add_update_free_vsi_res;
1631 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1633 if (!vsi_ctx->alloc_from_pool)
1634 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1635 ICE_AQ_VSI_IS_VALID);
1637 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1639 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1641 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1642 sizeof(vsi_ctx->info), cd);
1645 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1646 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1647 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1655 * @hw: pointer to the HW struct
1656 * @vsi_ctx: pointer to a VSI context struct
1657 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1658 * @cd: pointer to command details structure or NULL
1660 * Free VSI context info from hardware (0x0213)
1663 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1664 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1666 struct ice_aqc_add_update_free_vsi_resp *resp;
1667 struct ice_aqc_add_get_update_free_vsi *cmd;
1668 struct ice_aq_desc desc;
1669 enum ice_status status;
1671 cmd = &desc.params.vsi_cmd;
1672 resp = &desc.params.add_update_free_vsi_res;
1674 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1676 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1678 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1680 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1682 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1683 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1691 * @hw: pointer to the HW struct
1692 * @vsi_ctx: pointer to a VSI context struct
1693 * @cd: pointer to command details structure or NULL
1695 * Update VSI context in the hardware (0x0211)
1698 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1699 struct ice_sq_cd *cd)
1701 struct ice_aqc_add_update_free_vsi_resp *resp;
1702 struct ice_aqc_add_get_update_free_vsi *cmd;
1703 struct ice_aq_desc desc;
1704 enum ice_status status;
1706 cmd = &desc.params.vsi_cmd;
1707 resp = &desc.params.add_update_free_vsi_res;
1709 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1711 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1713 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1715 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1716 sizeof(vsi_ctx->info), cd);
1719 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1720 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1727 * ice_is_vsi_valid - check whether the VSI is valid or not
1728 * @hw: pointer to the HW struct
1729 * @vsi_handle: VSI handle
1731 * check whether the VSI is valid or not
1733 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1735 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1739 * ice_get_hw_vsi_num - return the HW VSI number
1740 * @hw: pointer to the HW struct
1741 * @vsi_handle: VSI handle
1743 * return the HW VSI number
1744 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1746 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1748 return hw->vsi_ctx[vsi_handle]->vsi_num;
1752 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1753 * @hw: pointer to the HW struct
1754 * @vsi_handle: VSI handle
1756 * return the VSI context entry for a given VSI handle
1758 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1760 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1764 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1765 * @hw: pointer to the HW struct
1766 * @vsi_handle: VSI handle
1767 * @vsi: VSI context pointer
1769 * save the VSI context entry for a given VSI handle
1772 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1774 hw->vsi_ctx[vsi_handle] = vsi;
1778 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1779 * @hw: pointer to the HW struct
1780 * @vsi_handle: VSI handle
1782 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1784 struct ice_vsi_ctx *vsi;
1787 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1790 ice_for_each_traffic_class(i) {
1791 if (vsi->lan_q_ctx[i]) {
1792 ice_free(hw, vsi->lan_q_ctx[i]);
1793 vsi->lan_q_ctx[i] = NULL;
1799 * ice_clear_vsi_ctx - clear the VSI context entry
1800 * @hw: pointer to the HW struct
1801 * @vsi_handle: VSI handle
1803 * clear the VSI context entry
1805 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1807 struct ice_vsi_ctx *vsi;
1809 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1811 ice_clear_vsi_q_ctx(hw, vsi_handle);
1813 hw->vsi_ctx[vsi_handle] = NULL;
1818 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1819 * @hw: pointer to the HW struct
1821 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1825 for (i = 0; i < ICE_MAX_VSI; i++)
1826 ice_clear_vsi_ctx(hw, i);
1830 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1831 * @hw: pointer to the HW struct
1832 * @vsi_handle: unique VSI handle provided by drivers
1833 * @vsi_ctx: pointer to a VSI context struct
1834 * @cd: pointer to command details structure or NULL
1836 * Add a VSI context to the hardware also add it into the VSI handle list.
1837 * If this function gets called after reset for existing VSIs then update
1838 * with the new HW VSI number in the corresponding VSI handle list entry.
1841 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1842 struct ice_sq_cd *cd)
1844 struct ice_vsi_ctx *tmp_vsi_ctx;
1845 enum ice_status status;
1847 if (vsi_handle >= ICE_MAX_VSI)
1848 return ICE_ERR_PARAM;
1849 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1852 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1854 /* Create a new VSI context */
1855 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1856 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1858 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1859 return ICE_ERR_NO_MEMORY;
1861 *tmp_vsi_ctx = *vsi_ctx;
1863 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1865 /* update with new HW VSI num */
1866 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1873 * ice_free_vsi- free VSI context from hardware and VSI handle list
1874 * @hw: pointer to the HW struct
1875 * @vsi_handle: unique VSI handle
1876 * @vsi_ctx: pointer to a VSI context struct
1877 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1878 * @cd: pointer to command details structure or NULL
1880 * Free VSI context info from hardware as well as from VSI handle list
1883 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1884 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1886 enum ice_status status;
1888 if (!ice_is_vsi_valid(hw, vsi_handle))
1889 return ICE_ERR_PARAM;
1890 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1891 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1893 ice_clear_vsi_ctx(hw, vsi_handle);
1899 * @hw: pointer to the HW struct
1900 * @vsi_handle: unique VSI handle
1901 * @vsi_ctx: pointer to a VSI context struct
1902 * @cd: pointer to command details structure or NULL
1904 * Update VSI context in the hardware
1907 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1908 struct ice_sq_cd *cd)
1910 if (!ice_is_vsi_valid(hw, vsi_handle))
1911 return ICE_ERR_PARAM;
1912 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1913 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1917 * ice_aq_get_vsi_params
1918 * @hw: pointer to the HW struct
1919 * @vsi_ctx: pointer to a VSI context struct
1920 * @cd: pointer to command details structure or NULL
1922 * Get VSI context info from hardware (0x0212)
1925 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1926 struct ice_sq_cd *cd)
1928 struct ice_aqc_add_get_update_free_vsi *cmd;
1929 struct ice_aqc_get_vsi_resp *resp;
1930 struct ice_aq_desc desc;
1931 enum ice_status status;
1933 cmd = &desc.params.vsi_cmd;
1934 resp = &desc.params.get_vsi_resp;
1936 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1938 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1940 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1941 sizeof(vsi_ctx->info), cd);
1943 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1945 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1946 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1953 * ice_aq_add_update_mir_rule - add/update a mirror rule
1954 * @hw: pointer to the HW struct
1955 * @rule_type: Rule Type
1956 * @dest_vsi: VSI number to which packets will be mirrored
1957 * @count: length of the list
1958 * @mr_buf: buffer for list of mirrored VSI numbers
1959 * @cd: pointer to command details structure or NULL
1962 * Add/Update Mirror Rule (0x260).
1965 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1966 u16 count, struct ice_mir_rule_buf *mr_buf,
1967 struct ice_sq_cd *cd, u16 *rule_id)
1969 struct ice_aqc_add_update_mir_rule *cmd;
1970 struct ice_aq_desc desc;
1971 enum ice_status status;
1972 __le16 *mr_list = NULL;
1975 switch (rule_type) {
1976 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1977 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1978 /* Make sure count and mr_buf are set for these rule_types */
1979 if (!(count && mr_buf))
1980 return ICE_ERR_PARAM;
1982 buf_size = count * sizeof(__le16);
1983 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1985 return ICE_ERR_NO_MEMORY;
1987 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1988 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1989 /* Make sure count and mr_buf are not set for these
1992 if (count || mr_buf)
1993 return ICE_ERR_PARAM;
1996 ice_debug(hw, ICE_DBG_SW,
1997 "Error due to unsupported rule_type %u\n", rule_type);
1998 return ICE_ERR_OUT_OF_RANGE;
2001 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2003 /* Pre-process 'mr_buf' items for add/update of virtual port
2004 * ingress/egress mirroring (but not physical port ingress/egress
2010 for (i = 0; i < count; i++) {
2013 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2015 /* Validate specified VSI number, make sure it is less
2016 * than ICE_MAX_VSI, if not return with error.
2018 if (id >= ICE_MAX_VSI) {
2019 ice_debug(hw, ICE_DBG_SW,
2020 "Error VSI index (%u) out-of-range\n",
2022 ice_free(hw, mr_list);
2023 return ICE_ERR_OUT_OF_RANGE;
2026 /* add VSI to mirror rule */
2029 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2030 else /* remove VSI from mirror rule */
2031 mr_list[i] = CPU_TO_LE16(id);
2035 cmd = &desc.params.add_update_rule;
2036 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2037 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2038 ICE_AQC_RULE_ID_VALID_M);
2039 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2040 cmd->num_entries = CPU_TO_LE16(count);
2041 cmd->dest = CPU_TO_LE16(dest_vsi);
2043 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2045 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2047 ice_free(hw, mr_list);
2053 * ice_aq_delete_mir_rule - delete a mirror rule
2054 * @hw: pointer to the HW struct
2055 * @rule_id: Mirror rule ID (to be deleted)
2056 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2057 * otherwise it is returned to the shared pool
2058 * @cd: pointer to command details structure or NULL
2060 * Delete Mirror Rule (0x261).
2063 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2064 struct ice_sq_cd *cd)
2066 struct ice_aqc_delete_mir_rule *cmd;
2067 struct ice_aq_desc desc;
2069 /* rule_id should be in the range 0...63 */
2070 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2071 return ICE_ERR_OUT_OF_RANGE;
2073 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2075 cmd = &desc.params.del_rule;
2076 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2077 cmd->rule_id = CPU_TO_LE16(rule_id);
2080 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2082 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2086 * ice_aq_alloc_free_vsi_list
2087 * @hw: pointer to the HW struct
2088 * @vsi_list_id: VSI list ID returned or used for lookup
2089 * @lkup_type: switch rule filter lookup type
2090 * @opc: switch rules population command type - pass in the command opcode
2092 * allocates or free a VSI list resource
2094 static enum ice_status
2095 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2096 enum ice_sw_lkup_type lkup_type,
2097 enum ice_adminq_opc opc)
2099 struct ice_aqc_alloc_free_res_elem *sw_buf;
2100 struct ice_aqc_res_elem *vsi_ele;
2101 enum ice_status status;
2104 buf_len = ice_struct_size(sw_buf, elem, 1);
2105 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2107 return ICE_ERR_NO_MEMORY;
2108 sw_buf->num_elems = CPU_TO_LE16(1);
2110 if (lkup_type == ICE_SW_LKUP_MAC ||
2111 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2112 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2113 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2114 lkup_type == ICE_SW_LKUP_PROMISC ||
2115 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2116 lkup_type == ICE_SW_LKUP_LAST) {
2117 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2118 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2120 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2122 status = ICE_ERR_PARAM;
2123 goto ice_aq_alloc_free_vsi_list_exit;
2126 if (opc == ice_aqc_opc_free_res)
2127 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2129 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2131 goto ice_aq_alloc_free_vsi_list_exit;
2133 if (opc == ice_aqc_opc_alloc_res) {
2134 vsi_ele = &sw_buf->elem[0];
2135 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2138 ice_aq_alloc_free_vsi_list_exit:
2139 ice_free(hw, sw_buf);
2144 * ice_aq_set_storm_ctrl - Sets storm control configuration
2145 * @hw: pointer to the HW struct
2146 * @bcast_thresh: represents the upper threshold for broadcast storm control
2147 * @mcast_thresh: represents the upper threshold for multicast storm control
2148 * @ctl_bitmask: storm control control knobs
2150 * Sets the storm control configuration (0x0280)
2153 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2156 struct ice_aqc_storm_cfg *cmd;
2157 struct ice_aq_desc desc;
2159 cmd = &desc.params.storm_conf;
2161 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2163 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2164 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2165 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2167 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2171 * ice_aq_get_storm_ctrl - gets storm control configuration
2172 * @hw: pointer to the HW struct
2173 * @bcast_thresh: represents the upper threshold for broadcast storm control
2174 * @mcast_thresh: represents the upper threshold for multicast storm control
2175 * @ctl_bitmask: storm control control knobs
2177 * Gets the storm control configuration (0x0281)
2180 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2183 enum ice_status status;
2184 struct ice_aq_desc desc;
2186 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2188 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2190 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2193 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2196 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2199 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2206 * ice_aq_sw_rules - add/update/remove switch rules
2207 * @hw: pointer to the HW struct
2208 * @rule_list: pointer to switch rule population list
2209 * @rule_list_sz: total size of the rule list in bytes
2210 * @num_rules: number of switch rules in the rule_list
2211 * @opc: switch rules population command type - pass in the command opcode
2212 * @cd: pointer to command details structure or NULL
2214 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2216 static enum ice_status
2217 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2218 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2220 struct ice_aq_desc desc;
2221 enum ice_status status;
2223 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2225 if (opc != ice_aqc_opc_add_sw_rules &&
2226 opc != ice_aqc_opc_update_sw_rules &&
2227 opc != ice_aqc_opc_remove_sw_rules)
2228 return ICE_ERR_PARAM;
2230 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2232 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2233 desc.params.sw_rules.num_rules_fltr_entry_index =
2234 CPU_TO_LE16(num_rules);
2235 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2236 if (opc != ice_aqc_opc_add_sw_rules &&
2237 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2238 status = ICE_ERR_DOES_NOT_EXIST;
2244 * ice_aq_add_recipe - add switch recipe
2245 * @hw: pointer to the HW struct
2246 * @s_recipe_list: pointer to switch rule population list
2247 * @num_recipes: number of switch recipes in the list
2248 * @cd: pointer to command details structure or NULL
2253 ice_aq_add_recipe(struct ice_hw *hw,
2254 struct ice_aqc_recipe_data_elem *s_recipe_list,
2255 u16 num_recipes, struct ice_sq_cd *cd)
2257 struct ice_aqc_add_get_recipe *cmd;
2258 struct ice_aq_desc desc;
2261 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2262 cmd = &desc.params.add_get_recipe;
2263 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2265 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2266 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2268 buf_size = num_recipes * sizeof(*s_recipe_list);
2270 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2274 * ice_aq_get_recipe - get switch recipe
2275 * @hw: pointer to the HW struct
2276 * @s_recipe_list: pointer to switch rule population list
2277 * @num_recipes: pointer to the number of recipes (input and output)
2278 * @recipe_root: root recipe number of recipe(s) to retrieve
2279 * @cd: pointer to command details structure or NULL
2283 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2284 * On output, *num_recipes will equal the number of entries returned in
2287 * The caller must supply enough space in s_recipe_list to hold all possible
2288 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2291 ice_aq_get_recipe(struct ice_hw *hw,
2292 struct ice_aqc_recipe_data_elem *s_recipe_list,
2293 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2295 struct ice_aqc_add_get_recipe *cmd;
2296 struct ice_aq_desc desc;
2297 enum ice_status status;
2300 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2301 return ICE_ERR_PARAM;
2303 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2304 cmd = &desc.params.add_get_recipe;
2305 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2307 cmd->return_index = CPU_TO_LE16(recipe_root);
2308 cmd->num_sub_recipes = 0;
2310 buf_size = *num_recipes * sizeof(*s_recipe_list);
2312 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2313 /* cppcheck-suppress constArgument */
2314 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2320 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2321 * @hw: pointer to the HW struct
2322 * @profile_id: package profile ID to associate the recipe with
2323 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2324 * @cd: pointer to command details structure or NULL
2325 * Recipe to profile association (0x0291)
2328 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2329 struct ice_sq_cd *cd)
2331 struct ice_aqc_recipe_to_profile *cmd;
2332 struct ice_aq_desc desc;
2334 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2335 cmd = &desc.params.recipe_to_profile;
2336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2337 cmd->profile_id = CPU_TO_LE16(profile_id);
2338 /* Set the recipe ID bit in the bitmask to let the device know which
2339 * profile we are associating the recipe to
2341 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2342 ICE_NONDMA_TO_NONDMA);
2344 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2348 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2349 * @hw: pointer to the HW struct
2350 * @profile_id: package profile ID to associate the recipe with
2351 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2352 * @cd: pointer to command details structure or NULL
2353 * Associate profile ID with given recipe (0x0293)
2356 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2357 struct ice_sq_cd *cd)
2359 struct ice_aqc_recipe_to_profile *cmd;
2360 struct ice_aq_desc desc;
2361 enum ice_status status;
2363 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2364 cmd = &desc.params.recipe_to_profile;
2365 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2366 cmd->profile_id = CPU_TO_LE16(profile_id);
2368 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2370 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2371 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2377 * ice_alloc_recipe - add recipe resource
2378 * @hw: pointer to the hardware structure
2379 * @rid: recipe ID returned as response to AQ call
2381 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2383 struct ice_aqc_alloc_free_res_elem *sw_buf;
2384 enum ice_status status;
2387 buf_len = ice_struct_size(sw_buf, elem, 1);
2388 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2390 return ICE_ERR_NO_MEMORY;
2392 sw_buf->num_elems = CPU_TO_LE16(1);
2393 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2394 ICE_AQC_RES_TYPE_S) |
2395 ICE_AQC_RES_TYPE_FLAG_SHARED);
2396 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2397 ice_aqc_opc_alloc_res, NULL);
2399 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2400 ice_free(hw, sw_buf);
2405 /* ice_init_port_info - Initialize port_info with switch configuration data
2406 * @pi: pointer to port_info
2407 * @vsi_port_num: VSI number or port number
2408 * @type: Type of switch element (port or VSI)
2409 * @swid: switch ID of the switch the element is attached to
2410 * @pf_vf_num: PF or VF number
2411 * @is_vf: true if the element is a VF, false otherwise
2414 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2415 u16 swid, u16 pf_vf_num, bool is_vf)
2418 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2419 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2421 pi->pf_vf_num = pf_vf_num;
2423 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2424 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2427 ice_debug(pi->hw, ICE_DBG_SW,
2428 "incorrect VSI/port type received\n");
2433 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2434 * @hw: pointer to the hardware structure
2436 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2438 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2439 enum ice_status status;
2446 num_total_ports = 1;
2448 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2449 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2452 return ICE_ERR_NO_MEMORY;
2454 /* Multiple calls to ice_aq_get_sw_cfg may be required
2455 * to get all the switch configuration information. The need
2456 * for additional calls is indicated by ice_aq_get_sw_cfg
2457 * writing a non-zero value in req_desc
2460 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2462 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2463 &req_desc, &num_elems, NULL);
2468 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2469 u16 pf_vf_num, swid, vsi_port_num;
2473 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2474 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2476 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2477 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2479 swid = LE16_TO_CPU(ele->swid);
2481 if (LE16_TO_CPU(ele->pf_vf_num) &
2482 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2485 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2486 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2489 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2490 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2491 if (j == num_total_ports) {
2492 ice_debug(hw, ICE_DBG_SW,
2493 "more ports than expected\n");
2494 status = ICE_ERR_CFG;
2497 ice_init_port_info(hw->port_info,
2498 vsi_port_num, res_type, swid,
2506 } while (req_desc && !status);
2509 ice_free(hw, (void *)rbuf);
2514 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2515 * @hw: pointer to the hardware structure
2516 * @fi: filter info structure to fill/update
2518 * This helper function populates the lb_en and lan_en elements of the provided
2519 * ice_fltr_info struct using the switch's type and characteristics of the
2520 * switch rule being configured.
2522 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2524 if ((fi->flag & ICE_FLTR_RX) &&
2525 (fi->fltr_act == ICE_FWD_TO_VSI ||
2526 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2527 fi->lkup_type == ICE_SW_LKUP_LAST)
2531 if ((fi->flag & ICE_FLTR_TX) &&
2532 (fi->fltr_act == ICE_FWD_TO_VSI ||
2533 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2534 fi->fltr_act == ICE_FWD_TO_Q ||
2535 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2536 /* Setting LB for prune actions will result in replicated
2537 * packets to the internal switch that will be dropped.
2539 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2542 /* Set lan_en to TRUE if
2543 * 1. The switch is a VEB AND
2545 * 2.1 The lookup is a directional lookup like ethertype,
2546 * promiscuous, ethertype-MAC, promiscuous-VLAN
2547 * and default-port OR
2548 * 2.2 The lookup is VLAN, OR
2549 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2550 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2554 * The switch is a VEPA.
2556 * In all other cases, the LAN enable has to be set to false.
2559 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2560 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2561 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2562 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2563 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2564 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2565 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2566 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2567 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2568 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2577 * ice_fill_sw_rule - Helper function to fill switch rule structure
2578 * @hw: pointer to the hardware structure
2579 * @f_info: entry containing packet forwarding information
2580 * @s_rule: switch rule structure to be filled in based on mac_entry
2581 * @opc: switch rules population command type - pass in the command opcode
2584 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2585 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2587 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2595 if (opc == ice_aqc_opc_remove_sw_rules) {
2596 s_rule->pdata.lkup_tx_rx.act = 0;
2597 s_rule->pdata.lkup_tx_rx.index =
2598 CPU_TO_LE16(f_info->fltr_rule_id);
2599 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2603 eth_hdr_sz = sizeof(dummy_eth_header);
2604 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2606 /* initialize the ether header with a dummy header */
2607 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2608 ice_fill_sw_info(hw, f_info);
2610 switch (f_info->fltr_act) {
2611 case ICE_FWD_TO_VSI:
2612 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2613 ICE_SINGLE_ACT_VSI_ID_M;
2614 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2615 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2616 ICE_SINGLE_ACT_VALID_BIT;
2618 case ICE_FWD_TO_VSI_LIST:
2619 act |= ICE_SINGLE_ACT_VSI_LIST;
2620 act |= (f_info->fwd_id.vsi_list_id <<
2621 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2622 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2623 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2624 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2625 ICE_SINGLE_ACT_VALID_BIT;
2628 act |= ICE_SINGLE_ACT_TO_Q;
2629 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2630 ICE_SINGLE_ACT_Q_INDEX_M;
2632 case ICE_DROP_PACKET:
2633 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2634 ICE_SINGLE_ACT_VALID_BIT;
2636 case ICE_FWD_TO_QGRP:
2637 q_rgn = f_info->qgrp_size > 0 ?
2638 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2639 act |= ICE_SINGLE_ACT_TO_Q;
2640 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2641 ICE_SINGLE_ACT_Q_INDEX_M;
2642 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2643 ICE_SINGLE_ACT_Q_REGION_M;
2650 act |= ICE_SINGLE_ACT_LB_ENABLE;
2652 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2654 switch (f_info->lkup_type) {
2655 case ICE_SW_LKUP_MAC:
2656 daddr = f_info->l_data.mac.mac_addr;
2658 case ICE_SW_LKUP_VLAN:
2659 vlan_id = f_info->l_data.vlan.vlan_id;
2660 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2661 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2662 act |= ICE_SINGLE_ACT_PRUNE;
2663 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2666 case ICE_SW_LKUP_ETHERTYPE_MAC:
2667 daddr = f_info->l_data.ethertype_mac.mac_addr;
2669 case ICE_SW_LKUP_ETHERTYPE:
2670 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2671 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2673 case ICE_SW_LKUP_MAC_VLAN:
2674 daddr = f_info->l_data.mac_vlan.mac_addr;
2675 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2677 case ICE_SW_LKUP_PROMISC_VLAN:
2678 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2680 case ICE_SW_LKUP_PROMISC:
2681 daddr = f_info->l_data.mac_vlan.mac_addr;
2687 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2688 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2689 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2691 /* Recipe set depending on lookup type */
2692 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2693 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2694 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2697 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2698 ICE_NONDMA_TO_NONDMA);
2700 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2701 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2702 *off = CPU_TO_BE16(vlan_id);
2705 /* Create the switch rule with the final dummy Ethernet header */
2706 if (opc != ice_aqc_opc_update_sw_rules)
2707 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2711 * ice_add_marker_act
2712 * @hw: pointer to the hardware structure
2713 * @m_ent: the management entry for which sw marker needs to be added
2714 * @sw_marker: sw marker to tag the Rx descriptor with
2715 * @l_id: large action resource ID
2717 * Create a large action to hold software marker and update the switch rule
2718 * entry pointed by m_ent with newly created large action
2720 static enum ice_status
2721 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2722 u16 sw_marker, u16 l_id)
2724 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2725 /* For software marker we need 3 large actions
2726 * 1. FWD action: FWD TO VSI or VSI LIST
2727 * 2. GENERIC VALUE action to hold the profile ID
2728 * 3. GENERIC VALUE action to hold the software marker ID
2730 const u16 num_lg_acts = 3;
2731 enum ice_status status;
2737 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2738 return ICE_ERR_PARAM;
2740 /* Create two back-to-back switch rules and submit them to the HW using
2741 * one memory buffer:
2745 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2746 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2747 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2749 return ICE_ERR_NO_MEMORY;
2751 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2753 /* Fill in the first switch rule i.e. large action */
2754 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2755 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2756 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2758 /* First action VSI forwarding or VSI list forwarding depending on how
2761 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2762 m_ent->fltr_info.fwd_id.hw_vsi_id;
2764 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2765 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2766 ICE_LG_ACT_VSI_LIST_ID_M;
2767 if (m_ent->vsi_count > 1)
2768 act |= ICE_LG_ACT_VSI_LIST;
2769 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2771 /* Second action descriptor type */
2772 act = ICE_LG_ACT_GENERIC;
2774 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2775 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2777 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2778 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2780 /* Third action Marker value */
2781 act |= ICE_LG_ACT_GENERIC;
2782 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2783 ICE_LG_ACT_GENERIC_VALUE_M;
2785 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2787 /* call the fill switch rule to fill the lookup Tx Rx structure */
2788 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2789 ice_aqc_opc_update_sw_rules);
2791 /* Update the action to point to the large action ID */
2792 rx_tx->pdata.lkup_tx_rx.act =
2793 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2794 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2795 ICE_SINGLE_ACT_PTR_VAL_M));
2797 /* Use the filter rule ID of the previously created rule with single
2798 * act. Once the update happens, hardware will treat this as large
2801 rx_tx->pdata.lkup_tx_rx.index =
2802 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2804 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2805 ice_aqc_opc_update_sw_rules, NULL);
2807 m_ent->lg_act_idx = l_id;
2808 m_ent->sw_marker_id = sw_marker;
2811 ice_free(hw, lg_act);
2816 * ice_add_counter_act - add/update filter rule with counter action
2817 * @hw: pointer to the hardware structure
2818 * @m_ent: the management entry for which counter needs to be added
2819 * @counter_id: VLAN counter ID returned as part of allocate resource
2820 * @l_id: large action resource ID
2822 static enum ice_status
2823 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2824 u16 counter_id, u16 l_id)
2826 struct ice_aqc_sw_rules_elem *lg_act;
2827 struct ice_aqc_sw_rules_elem *rx_tx;
2828 enum ice_status status;
2829 /* 2 actions will be added while adding a large action counter */
2830 const int num_acts = 2;
2837 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2838 return ICE_ERR_PARAM;
2840 /* Create two back-to-back switch rules and submit them to the HW using
2841 * one memory buffer:
2845 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2846 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2847 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2850 return ICE_ERR_NO_MEMORY;
2852 rx_tx = (struct ice_aqc_sw_rules_elem *)
2853 ((u8 *)lg_act + lg_act_size);
2855 /* Fill in the first switch rule i.e. large action */
2856 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2857 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2858 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2860 /* First action VSI forwarding or VSI list forwarding depending on how
2863 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2864 m_ent->fltr_info.fwd_id.hw_vsi_id;
2866 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2867 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2868 ICE_LG_ACT_VSI_LIST_ID_M;
2869 if (m_ent->vsi_count > 1)
2870 act |= ICE_LG_ACT_VSI_LIST;
2871 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2873 /* Second action counter ID */
2874 act = ICE_LG_ACT_STAT_COUNT;
2875 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2876 ICE_LG_ACT_STAT_COUNT_M;
2877 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2879 /* call the fill switch rule to fill the lookup Tx Rx structure */
2880 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2881 ice_aqc_opc_update_sw_rules);
2883 act = ICE_SINGLE_ACT_PTR;
2884 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2885 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2887 /* Use the filter rule ID of the previously created rule with single
2888 * act. Once the update happens, hardware will treat this as large
2891 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2892 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2894 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2895 ice_aqc_opc_update_sw_rules, NULL);
2897 m_ent->lg_act_idx = l_id;
2898 m_ent->counter_index = counter_id;
2901 ice_free(hw, lg_act);
2906 * ice_create_vsi_list_map
2907 * @hw: pointer to the hardware structure
2908 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2909 * @num_vsi: number of VSI handles in the array
2910 * @vsi_list_id: VSI list ID generated as part of allocate resource
2912 * Helper function to create a new entry of VSI list ID to VSI mapping
2913 * using the given VSI list ID
2915 static struct ice_vsi_list_map_info *
2916 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2919 struct ice_switch_info *sw = hw->switch_info;
2920 struct ice_vsi_list_map_info *v_map;
2923 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2928 v_map->vsi_list_id = vsi_list_id;
2930 for (i = 0; i < num_vsi; i++)
2931 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2933 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2938 * ice_update_vsi_list_rule
2939 * @hw: pointer to the hardware structure
2940 * @vsi_handle_arr: array of VSI handles to form a VSI list
2941 * @num_vsi: number of VSI handles in the array
2942 * @vsi_list_id: VSI list ID generated as part of allocate resource
2943 * @remove: Boolean value to indicate if this is a remove action
2944 * @opc: switch rules population command type - pass in the command opcode
2945 * @lkup_type: lookup type of the filter
2947 * Call AQ command to add a new switch rule or update existing switch rule
2948 * using the given VSI list ID
2950 static enum ice_status
2951 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2952 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2953 enum ice_sw_lkup_type lkup_type)
2955 struct ice_aqc_sw_rules_elem *s_rule;
2956 enum ice_status status;
2962 return ICE_ERR_PARAM;
2964 if (lkup_type == ICE_SW_LKUP_MAC ||
2965 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2966 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2967 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2968 lkup_type == ICE_SW_LKUP_PROMISC ||
2969 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2970 lkup_type == ICE_SW_LKUP_LAST)
2971 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2972 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2973 else if (lkup_type == ICE_SW_LKUP_VLAN)
2974 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2975 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2977 return ICE_ERR_PARAM;
2979 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2980 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2982 return ICE_ERR_NO_MEMORY;
2983 for (i = 0; i < num_vsi; i++) {
2984 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2985 status = ICE_ERR_PARAM;
2988 /* AQ call requires hw_vsi_id(s) */
2989 s_rule->pdata.vsi_list.vsi[i] =
2990 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2993 s_rule->type = CPU_TO_LE16(rule_type);
2994 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2995 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2997 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3000 ice_free(hw, s_rule);
3005 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3006 * @hw: pointer to the HW struct
3007 * @vsi_handle_arr: array of VSI handles to form a VSI list
3008 * @num_vsi: number of VSI handles in the array
3009 * @vsi_list_id: stores the ID of the VSI list to be created
3010 * @lkup_type: switch rule filter's lookup type
3012 static enum ice_status
3013 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3014 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3016 enum ice_status status;
3018 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3019 ice_aqc_opc_alloc_res);
3023 /* Update the newly created VSI list to include the specified VSIs */
3024 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3025 *vsi_list_id, false,
3026 ice_aqc_opc_add_sw_rules, lkup_type);
3030 * ice_create_pkt_fwd_rule
3031 * @hw: pointer to the hardware structure
3032 * @recp_list: corresponding filter management list
3033 * @f_entry: entry containing packet forwarding information
3035 * Create switch rule with given filter information and add an entry
3036 * to the corresponding filter management list to track this switch rule
3039 static enum ice_status
3040 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3041 struct ice_fltr_list_entry *f_entry)
3043 struct ice_fltr_mgmt_list_entry *fm_entry;
3044 struct ice_aqc_sw_rules_elem *s_rule;
3045 enum ice_status status;
3047 s_rule = (struct ice_aqc_sw_rules_elem *)
3048 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3050 return ICE_ERR_NO_MEMORY;
3051 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3052 ice_malloc(hw, sizeof(*fm_entry));
3054 status = ICE_ERR_NO_MEMORY;
3055 goto ice_create_pkt_fwd_rule_exit;
3058 fm_entry->fltr_info = f_entry->fltr_info;
3060 /* Initialize all the fields for the management entry */
3061 fm_entry->vsi_count = 1;
3062 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3063 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3064 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3066 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3067 ice_aqc_opc_add_sw_rules);
3069 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3070 ice_aqc_opc_add_sw_rules, NULL);
3072 ice_free(hw, fm_entry);
3073 goto ice_create_pkt_fwd_rule_exit;
3076 f_entry->fltr_info.fltr_rule_id =
3077 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3078 fm_entry->fltr_info.fltr_rule_id =
3079 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3081 /* The book keeping entries will get removed when base driver
3082 * calls remove filter AQ command
3084 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3086 ice_create_pkt_fwd_rule_exit:
3087 ice_free(hw, s_rule);
3092 * ice_update_pkt_fwd_rule
3093 * @hw: pointer to the hardware structure
3094 * @f_info: filter information for switch rule
3096 * Call AQ command to update a previously created switch rule with a
3099 static enum ice_status
3100 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3102 struct ice_aqc_sw_rules_elem *s_rule;
3103 enum ice_status status;
3105 s_rule = (struct ice_aqc_sw_rules_elem *)
3106 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3108 return ICE_ERR_NO_MEMORY;
3110 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3112 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3114 /* Update switch rule with new rule set to forward VSI list */
3115 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3116 ice_aqc_opc_update_sw_rules, NULL);
3118 ice_free(hw, s_rule);
3123 * ice_update_sw_rule_bridge_mode
3124 * @hw: pointer to the HW struct
3126 * Updates unicast switch filter rules based on VEB/VEPA mode
3128 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3130 struct ice_switch_info *sw = hw->switch_info;
3131 struct ice_fltr_mgmt_list_entry *fm_entry;
3132 enum ice_status status = ICE_SUCCESS;
3133 struct LIST_HEAD_TYPE *rule_head;
3134 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3136 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3137 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3139 ice_acquire_lock(rule_lock);
3140 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3142 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3143 u8 *addr = fi->l_data.mac.mac_addr;
3145 /* Update unicast Tx rules to reflect the selected
3148 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3149 (fi->fltr_act == ICE_FWD_TO_VSI ||
3150 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3151 fi->fltr_act == ICE_FWD_TO_Q ||
3152 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3153 status = ice_update_pkt_fwd_rule(hw, fi);
3159 ice_release_lock(rule_lock);
3165 * ice_add_update_vsi_list
3166 * @hw: pointer to the hardware structure
3167 * @m_entry: pointer to current filter management list entry
3168 * @cur_fltr: filter information from the book keeping entry
3169 * @new_fltr: filter information with the new VSI to be added
3171 * Call AQ command to add or update previously created VSI list with new VSI.
3173 * Helper function to do book keeping associated with adding filter information
3174 * The algorithm to do the book keeping is described below :
3175 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3176 * if only one VSI has been added till now
3177 * Allocate a new VSI list and add two VSIs
3178 * to this list using switch rule command
3179 * Update the previously created switch rule with the
3180 * newly created VSI list ID
3181 * if a VSI list was previously created
3182 * Add the new VSI to the previously created VSI list set
3183 * using the update switch rule command
3185 static enum ice_status
3186 ice_add_update_vsi_list(struct ice_hw *hw,
3187 struct ice_fltr_mgmt_list_entry *m_entry,
3188 struct ice_fltr_info *cur_fltr,
3189 struct ice_fltr_info *new_fltr)
3191 enum ice_status status = ICE_SUCCESS;
3192 u16 vsi_list_id = 0;
3194 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3195 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3196 return ICE_ERR_NOT_IMPL;
3198 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3199 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3200 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3201 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3202 return ICE_ERR_NOT_IMPL;
3204 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3205 /* Only one entry existed in the mapping and it was not already
3206 * a part of a VSI list. So, create a VSI list with the old and
3209 struct ice_fltr_info tmp_fltr;
3210 u16 vsi_handle_arr[2];
3212 /* A rule already exists with the new VSI being added */
3213 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3214 return ICE_ERR_ALREADY_EXISTS;
3216 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3217 vsi_handle_arr[1] = new_fltr->vsi_handle;
3218 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3220 new_fltr->lkup_type);
3224 tmp_fltr = *new_fltr;
3225 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3226 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3227 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3228 /* Update the previous switch rule of "MAC forward to VSI" to
3229 * "MAC fwd to VSI list"
3231 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3235 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3236 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3237 m_entry->vsi_list_info =
3238 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3241 /* If this entry was large action then the large action needs
3242 * to be updated to point to FWD to VSI list
3244 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3246 ice_add_marker_act(hw, m_entry,
3247 m_entry->sw_marker_id,
3248 m_entry->lg_act_idx);
3250 u16 vsi_handle = new_fltr->vsi_handle;
3251 enum ice_adminq_opc opcode;
3253 if (!m_entry->vsi_list_info)
3256 /* A rule already exists with the new VSI being added */
3257 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3260 /* Update the previously created VSI list set with
3261 * the new VSI ID passed in
3263 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3264 opcode = ice_aqc_opc_update_sw_rules;
3266 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3267 vsi_list_id, false, opcode,
3268 new_fltr->lkup_type);
3269 /* update VSI list mapping info with new VSI ID */
3271 ice_set_bit(vsi_handle,
3272 m_entry->vsi_list_info->vsi_map);
3275 m_entry->vsi_count++;
3280 * ice_find_rule_entry - Search a rule entry
3281 * @list_head: head of rule list
3282 * @f_info: rule information
3284 * Helper function to search for a given rule entry
3285 * Returns pointer to entry storing the rule if found
3287 static struct ice_fltr_mgmt_list_entry *
3288 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3289 struct ice_fltr_info *f_info)
3291 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3293 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3295 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3296 sizeof(f_info->l_data)) &&
3297 f_info->flag == list_itr->fltr_info.flag) {
3306 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3307 * @recp_list: VSI lists needs to be searched
3308 * @vsi_handle: VSI handle to be found in VSI list
3309 * @vsi_list_id: VSI list ID found containing vsi_handle
3311 * Helper function to search a VSI list with single entry containing given VSI
3312 * handle element. This can be extended further to search VSI list with more
3313 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3315 static struct ice_vsi_list_map_info *
3316 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3319 struct ice_vsi_list_map_info *map_info = NULL;
3320 struct LIST_HEAD_TYPE *list_head;
3322 list_head = &recp_list->filt_rules;
3323 if (recp_list->adv_rule) {
3324 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3326 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3327 ice_adv_fltr_mgmt_list_entry,
3329 if (list_itr->vsi_list_info) {
3330 map_info = list_itr->vsi_list_info;
3331 if (ice_is_bit_set(map_info->vsi_map,
3333 *vsi_list_id = map_info->vsi_list_id;
3339 struct ice_fltr_mgmt_list_entry *list_itr;
3341 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3342 ice_fltr_mgmt_list_entry,
3344 if (list_itr->vsi_count == 1 &&
3345 list_itr->vsi_list_info) {
3346 map_info = list_itr->vsi_list_info;
3347 if (ice_is_bit_set(map_info->vsi_map,
3349 *vsi_list_id = map_info->vsi_list_id;
3359 * ice_add_rule_internal - add rule for a given lookup type
3360 * @hw: pointer to the hardware structure
3361 * @recp_list: recipe list for which rule has to be added
3362 * @lport: logic port number on which function add rule
3363 * @f_entry: structure containing MAC forwarding information
3365 * Adds or updates the rule lists for a given recipe
3367 static enum ice_status
3368 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3369 u8 lport, struct ice_fltr_list_entry *f_entry)
3371 struct ice_fltr_info *new_fltr, *cur_fltr;
3372 struct ice_fltr_mgmt_list_entry *m_entry;
3373 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3374 enum ice_status status = ICE_SUCCESS;
3376 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3377 return ICE_ERR_PARAM;
3379 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3380 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3381 f_entry->fltr_info.fwd_id.hw_vsi_id =
3382 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3384 rule_lock = &recp_list->filt_rule_lock;
3386 ice_acquire_lock(rule_lock);
3387 new_fltr = &f_entry->fltr_info;
3388 if (new_fltr->flag & ICE_FLTR_RX)
3389 new_fltr->src = lport;
3390 else if (new_fltr->flag & ICE_FLTR_TX)
3392 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3394 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3396 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3397 goto exit_add_rule_internal;
3400 cur_fltr = &m_entry->fltr_info;
3401 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3403 exit_add_rule_internal:
3404 ice_release_lock(rule_lock);
3409 * ice_remove_vsi_list_rule
3410 * @hw: pointer to the hardware structure
3411 * @vsi_list_id: VSI list ID generated as part of allocate resource
3412 * @lkup_type: switch rule filter lookup type
3414 * The VSI list should be emptied before this function is called to remove the
3417 static enum ice_status
3418 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3419 enum ice_sw_lkup_type lkup_type)
3421 /* Free the vsi_list resource that we allocated. It is assumed that the
3422 * list is empty at this point.
3424 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3425 ice_aqc_opc_free_res);
3429 * ice_rem_update_vsi_list
3430 * @hw: pointer to the hardware structure
3431 * @vsi_handle: VSI handle of the VSI to remove
3432 * @fm_list: filter management entry for which the VSI list management needs to
3435 static enum ice_status
3436 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3437 struct ice_fltr_mgmt_list_entry *fm_list)
3439 enum ice_sw_lkup_type lkup_type;
3440 enum ice_status status = ICE_SUCCESS;
3443 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3444 fm_list->vsi_count == 0)
3445 return ICE_ERR_PARAM;
3447 /* A rule with the VSI being removed does not exist */
3448 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3449 return ICE_ERR_DOES_NOT_EXIST;
3451 lkup_type = fm_list->fltr_info.lkup_type;
3452 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3453 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3454 ice_aqc_opc_update_sw_rules,
3459 fm_list->vsi_count--;
3460 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3462 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3463 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3464 struct ice_vsi_list_map_info *vsi_list_info =
3465 fm_list->vsi_list_info;
3468 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3470 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3471 return ICE_ERR_OUT_OF_RANGE;
3473 /* Make sure VSI list is empty before removing it below */
3474 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3476 ice_aqc_opc_update_sw_rules,
3481 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3482 tmp_fltr_info.fwd_id.hw_vsi_id =
3483 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3484 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3485 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3487 ice_debug(hw, ICE_DBG_SW,
3488 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3489 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3493 fm_list->fltr_info = tmp_fltr_info;
3496 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3497 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3498 struct ice_vsi_list_map_info *vsi_list_info =
3499 fm_list->vsi_list_info;
3501 /* Remove the VSI list since it is no longer used */
3502 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3504 ice_debug(hw, ICE_DBG_SW,
3505 "Failed to remove VSI list %d, error %d\n",
3506 vsi_list_id, status);
3510 LIST_DEL(&vsi_list_info->list_entry);
3511 ice_free(hw, vsi_list_info);
3512 fm_list->vsi_list_info = NULL;
3519 * ice_remove_rule_internal - Remove a filter rule of a given type
3521 * @hw: pointer to the hardware structure
3522 * @recp_list: recipe list for which the rule needs to removed
3523 * @f_entry: rule entry containing filter information
3525 static enum ice_status
3526 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3527 struct ice_fltr_list_entry *f_entry)
3529 struct ice_fltr_mgmt_list_entry *list_elem;
3530 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3531 enum ice_status status = ICE_SUCCESS;
3532 bool remove_rule = false;
3535 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3536 return ICE_ERR_PARAM;
3537 f_entry->fltr_info.fwd_id.hw_vsi_id =
3538 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3540 rule_lock = &recp_list->filt_rule_lock;
3541 ice_acquire_lock(rule_lock);
3542 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3543 &f_entry->fltr_info);
3545 status = ICE_ERR_DOES_NOT_EXIST;
3549 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3551 } else if (!list_elem->vsi_list_info) {
3552 status = ICE_ERR_DOES_NOT_EXIST;
3554 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3555 /* a ref_cnt > 1 indicates that the vsi_list is being
3556 * shared by multiple rules. Decrement the ref_cnt and
3557 * remove this rule, but do not modify the list, as it
3558 * is in-use by other rules.
3560 list_elem->vsi_list_info->ref_cnt--;
3563 /* a ref_cnt of 1 indicates the vsi_list is only used
3564 * by one rule. However, the original removal request is only
3565 * for a single VSI. Update the vsi_list first, and only
3566 * remove the rule if there are no further VSIs in this list.
3568 vsi_handle = f_entry->fltr_info.vsi_handle;
3569 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3572 /* if VSI count goes to zero after updating the VSI list */
3573 if (list_elem->vsi_count == 0)
3578 /* Remove the lookup rule */
3579 struct ice_aqc_sw_rules_elem *s_rule;
3581 s_rule = (struct ice_aqc_sw_rules_elem *)
3582 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3584 status = ICE_ERR_NO_MEMORY;
3588 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3589 ice_aqc_opc_remove_sw_rules);
3591 status = ice_aq_sw_rules(hw, s_rule,
3592 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3593 ice_aqc_opc_remove_sw_rules, NULL);
3595 /* Remove a book keeping from the list */
3596 ice_free(hw, s_rule);
3601 LIST_DEL(&list_elem->list_entry);
3602 ice_free(hw, list_elem);
3605 ice_release_lock(rule_lock);
3610 * ice_aq_get_res_alloc - get allocated resources
3611 * @hw: pointer to the HW struct
3612 * @num_entries: pointer to u16 to store the number of resource entries returned
3613 * @buf: pointer to buffer
3614 * @buf_size: size of buf
3615 * @cd: pointer to command details structure or NULL
3617 * The caller-supplied buffer must be large enough to store the resource
3618 * information for all resource types. Each resource type is an
3619 * ice_aqc_get_res_resp_elem structure.
3622 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
3623 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
3624 struct ice_sq_cd *cd)
3626 struct ice_aqc_get_res_alloc *resp;
3627 enum ice_status status;
3628 struct ice_aq_desc desc;
3631 return ICE_ERR_BAD_PTR;
3633 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3634 return ICE_ERR_INVAL_SIZE;
3636 resp = &desc.params.get_res;
3638 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3639 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3641 if (!status && num_entries)
3642 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3648 * ice_aq_get_res_descs - get allocated resource descriptors
3649 * @hw: pointer to the hardware structure
3650 * @num_entries: number of resource entries in buffer
3651 * @buf: structure to hold response data buffer
3652 * @buf_size: size of buffer
3653 * @res_type: resource type
3654 * @res_shared: is resource shared
3655 * @desc_id: input - first desc ID to start; output - next desc ID
3656 * @cd: pointer to command details structure or NULL
3659 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3660 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
3661 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
3663 struct ice_aqc_get_allocd_res_desc *cmd;
3664 struct ice_aq_desc desc;
3665 enum ice_status status;
3667 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3669 cmd = &desc.params.get_res_desc;
3672 return ICE_ERR_PARAM;
3674 if (buf_size != (num_entries * sizeof(*buf)))
3675 return ICE_ERR_PARAM;
3677 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3679 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3680 ICE_AQC_RES_TYPE_M) | (res_shared ?
3681 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3682 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3684 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3686 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3692 * ice_add_mac_rule - Add a MAC address based filter rule
3693 * @hw: pointer to the hardware structure
3694 * @m_list: list of MAC addresses and forwarding information
3695 * @sw: pointer to switch info struct for which function add rule
3696 * @lport: logic port number on which function add rule
3698 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3699 * multiple unicast addresses, the function assumes that all the
3700 * addresses are unique in a given add_mac call. It doesn't
3701 * check for duplicates in this case, removing duplicates from a given
3702 * list should be taken care of in the caller of this function.
3704 static enum ice_status
3705 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3706 struct ice_switch_info *sw, u8 lport)
3708 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3709 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3710 struct ice_fltr_list_entry *m_list_itr;
3711 struct LIST_HEAD_TYPE *rule_head;
3712 u16 total_elem_left, s_rule_size;
3713 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3714 enum ice_status status = ICE_SUCCESS;
3715 u16 num_unicast = 0;
3719 rule_lock = &recp_list->filt_rule_lock;
3720 rule_head = &recp_list->filt_rules;
3722 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3724 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3728 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3729 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3730 if (!ice_is_vsi_valid(hw, vsi_handle))
3731 return ICE_ERR_PARAM;
3732 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3733 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3734 /* update the src in case it is VSI num */
3735 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3736 return ICE_ERR_PARAM;
3737 m_list_itr->fltr_info.src = hw_vsi_id;
3738 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3739 IS_ZERO_ETHER_ADDR(add))
3740 return ICE_ERR_PARAM;
3741 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3742 /* Don't overwrite the unicast address */
3743 ice_acquire_lock(rule_lock);
3744 if (ice_find_rule_entry(rule_head,
3745 &m_list_itr->fltr_info)) {
3746 ice_release_lock(rule_lock);
3747 return ICE_ERR_ALREADY_EXISTS;
3749 ice_release_lock(rule_lock);
3751 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3752 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3753 m_list_itr->status =
3754 ice_add_rule_internal(hw, recp_list, lport,
3756 if (m_list_itr->status)
3757 return m_list_itr->status;
3761 ice_acquire_lock(rule_lock);
3762 /* Exit if no suitable entries were found for adding bulk switch rule */
3764 status = ICE_SUCCESS;
3765 goto ice_add_mac_exit;
3768 /* Allocate switch rule buffer for the bulk update for unicast */
3769 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3770 s_rule = (struct ice_aqc_sw_rules_elem *)
3771 ice_calloc(hw, num_unicast, s_rule_size);
3773 status = ICE_ERR_NO_MEMORY;
3774 goto ice_add_mac_exit;
3778 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3780 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3781 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3783 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3784 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3785 ice_aqc_opc_add_sw_rules);
3786 r_iter = (struct ice_aqc_sw_rules_elem *)
3787 ((u8 *)r_iter + s_rule_size);
3791 /* Call AQ bulk switch rule update for all unicast addresses */
3793 /* Call AQ switch rule in AQ_MAX chunk */
3794 for (total_elem_left = num_unicast; total_elem_left > 0;
3795 total_elem_left -= elem_sent) {
3796 struct ice_aqc_sw_rules_elem *entry = r_iter;
3798 elem_sent = MIN_T(u8, total_elem_left,
3799 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3800 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3801 elem_sent, ice_aqc_opc_add_sw_rules,
3804 goto ice_add_mac_exit;
3805 r_iter = (struct ice_aqc_sw_rules_elem *)
3806 ((u8 *)r_iter + (elem_sent * s_rule_size));
3809 /* Fill up rule ID based on the value returned from FW */
3811 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3813 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3814 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3815 struct ice_fltr_mgmt_list_entry *fm_entry;
3817 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3818 f_info->fltr_rule_id =
3819 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3820 f_info->fltr_act = ICE_FWD_TO_VSI;
3821 /* Create an entry to track this MAC address */
3822 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3823 ice_malloc(hw, sizeof(*fm_entry));
3825 status = ICE_ERR_NO_MEMORY;
3826 goto ice_add_mac_exit;
3828 fm_entry->fltr_info = *f_info;
3829 fm_entry->vsi_count = 1;
3830 /* The book keeping entries will get removed when
3831 * base driver calls remove filter AQ command
3834 LIST_ADD(&fm_entry->list_entry, rule_head);
3835 r_iter = (struct ice_aqc_sw_rules_elem *)
3836 ((u8 *)r_iter + s_rule_size);
3841 ice_release_lock(rule_lock);
3843 ice_free(hw, s_rule);
3848 * ice_add_mac - Add a MAC address based filter rule
3849 * @hw: pointer to the hardware structure
3850 * @m_list: list of MAC addresses and forwarding information
3852 * Function add MAC rule for logical port from HW struct
3854 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3857 return ICE_ERR_PARAM;
3859 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3860 hw->port_info->lport);
3864 * ice_add_vlan_internal - Add one VLAN based filter rule
3865 * @hw: pointer to the hardware structure
3866 * @recp_list: recipe list for which rule has to be added
3867 * @f_entry: filter entry containing one VLAN information
3869 static enum ice_status
3870 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3871 struct ice_fltr_list_entry *f_entry)
3873 struct ice_fltr_mgmt_list_entry *v_list_itr;
3874 struct ice_fltr_info *new_fltr, *cur_fltr;
3875 enum ice_sw_lkup_type lkup_type;
3876 u16 vsi_list_id = 0, vsi_handle;
3877 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3878 enum ice_status status = ICE_SUCCESS;
3880 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3881 return ICE_ERR_PARAM;
3883 f_entry->fltr_info.fwd_id.hw_vsi_id =
3884 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3885 new_fltr = &f_entry->fltr_info;
3887 /* VLAN ID should only be 12 bits */
3888 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3889 return ICE_ERR_PARAM;
3891 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3892 return ICE_ERR_PARAM;
3894 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3895 lkup_type = new_fltr->lkup_type;
3896 vsi_handle = new_fltr->vsi_handle;
3897 rule_lock = &recp_list->filt_rule_lock;
3898 ice_acquire_lock(rule_lock);
3899 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3901 struct ice_vsi_list_map_info *map_info = NULL;
3903 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3904 /* All VLAN pruning rules use a VSI list. Check if
3905 * there is already a VSI list containing VSI that we
3906 * want to add. If found, use the same vsi_list_id for
3907 * this new VLAN rule or else create a new list.
3909 map_info = ice_find_vsi_list_entry(recp_list,
3913 status = ice_create_vsi_list_rule(hw,
3921 /* Convert the action to forwarding to a VSI list. */
3922 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3923 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3926 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3928 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3931 status = ICE_ERR_DOES_NOT_EXIST;
3934 /* reuse VSI list for new rule and increment ref_cnt */
3936 v_list_itr->vsi_list_info = map_info;
3937 map_info->ref_cnt++;
3939 v_list_itr->vsi_list_info =
3940 ice_create_vsi_list_map(hw, &vsi_handle,
3944 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3945 /* Update existing VSI list to add new VSI ID only if it used
3948 cur_fltr = &v_list_itr->fltr_info;
3949 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3952 /* If VLAN rule exists and VSI list being used by this rule is
3953 * referenced by more than 1 VLAN rule. Then create a new VSI
3954 * list appending previous VSI with new VSI and update existing
3955 * VLAN rule to point to new VSI list ID
3957 struct ice_fltr_info tmp_fltr;
3958 u16 vsi_handle_arr[2];
3961 /* Current implementation only supports reusing VSI list with
3962 * one VSI count. We should never hit below condition
3964 if (v_list_itr->vsi_count > 1 &&
3965 v_list_itr->vsi_list_info->ref_cnt > 1) {
3966 ice_debug(hw, ICE_DBG_SW,
3967 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3968 status = ICE_ERR_CFG;
3973 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3976 /* A rule already exists with the new VSI being added */
3977 if (cur_handle == vsi_handle) {
3978 status = ICE_ERR_ALREADY_EXISTS;
3982 vsi_handle_arr[0] = cur_handle;
3983 vsi_handle_arr[1] = vsi_handle;
3984 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3985 &vsi_list_id, lkup_type);
3989 tmp_fltr = v_list_itr->fltr_info;
3990 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3991 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3992 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3993 /* Update the previous switch rule to a new VSI list which
3994 * includes current VSI that is requested
3996 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4000 /* before overriding VSI list map info. decrement ref_cnt of
4003 v_list_itr->vsi_list_info->ref_cnt--;
4005 /* now update to newly created list */
4006 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4007 v_list_itr->vsi_list_info =
4008 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4010 v_list_itr->vsi_count++;
4014 ice_release_lock(rule_lock);
4019 * ice_add_vlan_rule - Add VLAN based filter rule
4020 * @hw: pointer to the hardware structure
4021 * @v_list: list of VLAN entries and forwarding information
4022 * @sw: pointer to switch info struct for which function add rule
4024 static enum ice_status
4025 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4026 struct ice_switch_info *sw)
4028 struct ice_fltr_list_entry *v_list_itr;
4029 struct ice_sw_recipe *recp_list;
4031 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4032 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4034 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4035 return ICE_ERR_PARAM;
4036 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4037 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4039 if (v_list_itr->status)
4040 return v_list_itr->status;
4046 * ice_add_vlan - Add a VLAN based filter rule
4047 * @hw: pointer to the hardware structure
4048 * @v_list: list of VLAN and forwarding information
4050 * Function add VLAN rule for logical port from HW struct
4052 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4055 return ICE_ERR_PARAM;
4057 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4061 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4062 * @hw: pointer to the hardware structure
4063 * @mv_list: list of MAC and VLAN filters
4064 * @sw: pointer to switch info struct for which function add rule
4065 * @lport: logic port number on which function add rule
4067 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4068 * pruning bits enabled, then it is the responsibility of the caller to make
4069 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4070 * VLAN won't be received on that VSI otherwise.
4072 static enum ice_status
4073 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4074 struct ice_switch_info *sw, u8 lport)
4076 struct ice_fltr_list_entry *mv_list_itr;
4077 struct ice_sw_recipe *recp_list;
4079 if (!mv_list || !hw)
4080 return ICE_ERR_PARAM;
4082 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4083 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4085 enum ice_sw_lkup_type l_type =
4086 mv_list_itr->fltr_info.lkup_type;
4088 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4089 return ICE_ERR_PARAM;
4090 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4091 mv_list_itr->status =
4092 ice_add_rule_internal(hw, recp_list, lport,
4094 if (mv_list_itr->status)
4095 return mv_list_itr->status;
4101 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4102 * @hw: pointer to the hardware structure
4103 * @mv_list: list of MAC VLAN addresses and forwarding information
4105 * Function add MAC VLAN rule for logical port from HW struct
4108 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4110 if (!mv_list || !hw)
4111 return ICE_ERR_PARAM;
4113 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4114 hw->port_info->lport);
4118 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4119 * @hw: pointer to the hardware structure
4120 * @em_list: list of ether type MAC filter, MAC is optional
4121 * @sw: pointer to switch info struct for which function add rule
4122 * @lport: logic port number on which function add rule
4124 * This function requires the caller to populate the entries in
4125 * the filter list with the necessary fields (including flags to
4126 * indicate Tx or Rx rules).
4128 static enum ice_status
4129 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4130 struct ice_switch_info *sw, u8 lport)
4132 struct ice_fltr_list_entry *em_list_itr;
4134 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4136 struct ice_sw_recipe *recp_list;
4137 enum ice_sw_lkup_type l_type;
4139 l_type = em_list_itr->fltr_info.lkup_type;
4140 recp_list = &sw->recp_list[l_type];
4142 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4143 l_type != ICE_SW_LKUP_ETHERTYPE)
4144 return ICE_ERR_PARAM;
4146 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4149 if (em_list_itr->status)
4150 return em_list_itr->status;
4156 * ice_add_eth_mac - Add a ethertype based filter rule
4157 * @hw: pointer to the hardware structure
4158 * @em_list: list of ethertype and forwarding information
4160 * Function add ethertype rule for logical port from HW struct
4163 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4165 if (!em_list || !hw)
4166 return ICE_ERR_PARAM;
4168 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4169 hw->port_info->lport);
4173 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4174 * @hw: pointer to the hardware structure
4175 * @em_list: list of ethertype or ethertype MAC entries
4176 * @sw: pointer to switch info struct for which function add rule
4178 static enum ice_status
4179 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4180 struct ice_switch_info *sw)
4182 struct ice_fltr_list_entry *em_list_itr, *tmp;
4184 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4186 struct ice_sw_recipe *recp_list;
4187 enum ice_sw_lkup_type l_type;
4189 l_type = em_list_itr->fltr_info.lkup_type;
4191 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4192 l_type != ICE_SW_LKUP_ETHERTYPE)
4193 return ICE_ERR_PARAM;
4195 recp_list = &sw->recp_list[l_type];
4196 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4198 if (em_list_itr->status)
4199 return em_list_itr->status;
4205 * ice_remove_eth_mac - remove a ethertype based filter rule
4206 * @hw: pointer to the hardware structure
4207 * @em_list: list of ethertype and forwarding information
4211 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4213 if (!em_list || !hw)
4214 return ICE_ERR_PARAM;
4216 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4220 * ice_rem_sw_rule_info
4221 * @hw: pointer to the hardware structure
4222 * @rule_head: pointer to the switch list structure that we want to delete
4225 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4227 if (!LIST_EMPTY(rule_head)) {
4228 struct ice_fltr_mgmt_list_entry *entry;
4229 struct ice_fltr_mgmt_list_entry *tmp;
4231 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4232 ice_fltr_mgmt_list_entry, list_entry) {
4233 LIST_DEL(&entry->list_entry);
4234 ice_free(hw, entry);
4240 * ice_rem_adv_rule_info
4241 * @hw: pointer to the hardware structure
4242 * @rule_head: pointer to the switch list structure that we want to delete
4245 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4247 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4248 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4250 if (LIST_EMPTY(rule_head))
4253 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4254 ice_adv_fltr_mgmt_list_entry, list_entry) {
4255 LIST_DEL(&lst_itr->list_entry);
4256 ice_free(hw, lst_itr->lkups);
4257 ice_free(hw, lst_itr);
4262 * ice_rem_all_sw_rules_info
4263 * @hw: pointer to the hardware structure
4265 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4267 struct ice_switch_info *sw = hw->switch_info;
4270 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4271 struct LIST_HEAD_TYPE *rule_head;
4273 rule_head = &sw->recp_list[i].filt_rules;
4274 if (!sw->recp_list[i].adv_rule)
4275 ice_rem_sw_rule_info(hw, rule_head);
4277 ice_rem_adv_rule_info(hw, rule_head);
4278 if (sw->recp_list[i].adv_rule &&
4279 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4280 sw->recp_list[i].adv_rule = false;
4285 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4286 * @pi: pointer to the port_info structure
4287 * @vsi_handle: VSI handle to set as default
4288 * @set: true to add the above mentioned switch rule, false to remove it
4289 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4291 * add filter rule to set/unset given VSI as default VSI for the switch
4292 * (represented by swid)
4295 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4298 struct ice_aqc_sw_rules_elem *s_rule;
4299 struct ice_fltr_info f_info;
4300 struct ice_hw *hw = pi->hw;
4301 enum ice_adminq_opc opcode;
4302 enum ice_status status;
4306 if (!ice_is_vsi_valid(hw, vsi_handle))
4307 return ICE_ERR_PARAM;
4308 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4310 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4311 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4312 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4314 return ICE_ERR_NO_MEMORY;
4316 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4318 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4319 f_info.flag = direction;
4320 f_info.fltr_act = ICE_FWD_TO_VSI;
4321 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4323 if (f_info.flag & ICE_FLTR_RX) {
4324 f_info.src = pi->lport;
4325 f_info.src_id = ICE_SRC_ID_LPORT;
4327 f_info.fltr_rule_id =
4328 pi->dflt_rx_vsi_rule_id;
4329 } else if (f_info.flag & ICE_FLTR_TX) {
4330 f_info.src_id = ICE_SRC_ID_VSI;
4331 f_info.src = hw_vsi_id;
4333 f_info.fltr_rule_id =
4334 pi->dflt_tx_vsi_rule_id;
4338 opcode = ice_aqc_opc_add_sw_rules;
4340 opcode = ice_aqc_opc_remove_sw_rules;
4342 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4344 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4345 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4348 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4350 if (f_info.flag & ICE_FLTR_TX) {
4351 pi->dflt_tx_vsi_num = hw_vsi_id;
4352 pi->dflt_tx_vsi_rule_id = index;
4353 } else if (f_info.flag & ICE_FLTR_RX) {
4354 pi->dflt_rx_vsi_num = hw_vsi_id;
4355 pi->dflt_rx_vsi_rule_id = index;
4358 if (f_info.flag & ICE_FLTR_TX) {
4359 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4360 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4361 } else if (f_info.flag & ICE_FLTR_RX) {
4362 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4363 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4368 ice_free(hw, s_rule);
4373 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4374 * @list_head: head of rule list
4375 * @f_info: rule information
4377 * Helper function to search for a unicast rule entry - this is to be used
4378 * to remove unicast MAC filter that is not shared with other VSIs on the
4381 * Returns pointer to entry storing the rule if found
4383 static struct ice_fltr_mgmt_list_entry *
4384 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4385 struct ice_fltr_info *f_info)
4387 struct ice_fltr_mgmt_list_entry *list_itr;
4389 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4391 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4392 sizeof(f_info->l_data)) &&
4393 f_info->fwd_id.hw_vsi_id ==
4394 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4395 f_info->flag == list_itr->fltr_info.flag)
4402 * ice_remove_mac_rule - remove a MAC based filter rule
4403 * @hw: pointer to the hardware structure
4404 * @m_list: list of MAC addresses and forwarding information
4405 * @recp_list: list from which function remove MAC address
4407 * This function removes either a MAC filter rule or a specific VSI from a
4408 * VSI list for a multicast MAC address.
4410 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4411 * ice_add_mac. Caller should be aware that this call will only work if all
4412 * the entries passed into m_list were added previously. It will not attempt to
4413 * do a partial remove of entries that were found.
4415 static enum ice_status
4416 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4417 struct ice_sw_recipe *recp_list)
4419 struct ice_fltr_list_entry *list_itr, *tmp;
4420 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4423 return ICE_ERR_PARAM;
4425 rule_lock = &recp_list->filt_rule_lock;
4426 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4428 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4429 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4432 if (l_type != ICE_SW_LKUP_MAC)
4433 return ICE_ERR_PARAM;
4435 vsi_handle = list_itr->fltr_info.vsi_handle;
4436 if (!ice_is_vsi_valid(hw, vsi_handle))
4437 return ICE_ERR_PARAM;
4439 list_itr->fltr_info.fwd_id.hw_vsi_id =
4440 ice_get_hw_vsi_num(hw, vsi_handle);
4441 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4442 /* Don't remove the unicast address that belongs to
4443 * another VSI on the switch, since it is not being
4446 ice_acquire_lock(rule_lock);
4447 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4448 &list_itr->fltr_info)) {
4449 ice_release_lock(rule_lock);
4450 return ICE_ERR_DOES_NOT_EXIST;
4452 ice_release_lock(rule_lock);
4454 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4456 if (list_itr->status)
4457 return list_itr->status;
4463 * ice_remove_mac - remove a MAC address based filter rule
4464 * @hw: pointer to the hardware structure
4465 * @m_list: list of MAC addresses and forwarding information
4468 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4470 struct ice_sw_recipe *recp_list;
4472 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4473 return ice_remove_mac_rule(hw, m_list, recp_list);
4477 * ice_remove_vlan_rule - Remove VLAN based filter rule
4478 * @hw: pointer to the hardware structure
4479 * @v_list: list of VLAN entries and forwarding information
4480 * @recp_list: list from which function remove VLAN
4482 static enum ice_status
4483 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4484 struct ice_sw_recipe *recp_list)
4486 struct ice_fltr_list_entry *v_list_itr, *tmp;
4488 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4490 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4492 if (l_type != ICE_SW_LKUP_VLAN)
4493 return ICE_ERR_PARAM;
4494 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4496 if (v_list_itr->status)
4497 return v_list_itr->status;
4503 * ice_remove_vlan - remove a VLAN address based filter rule
4504 * @hw: pointer to the hardware structure
4505 * @v_list: list of VLAN and forwarding information
4509 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4511 struct ice_sw_recipe *recp_list;
4514 return ICE_ERR_PARAM;
4516 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4517 return ice_remove_vlan_rule(hw, v_list, recp_list);
4521 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4522 * @hw: pointer to the hardware structure
4523 * @v_list: list of MAC VLAN entries and forwarding information
4524 * @recp_list: list from which function remove MAC VLAN
4526 static enum ice_status
4527 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4528 struct ice_sw_recipe *recp_list)
4530 struct ice_fltr_list_entry *v_list_itr, *tmp;
4532 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4533 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4535 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4537 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4538 return ICE_ERR_PARAM;
4539 v_list_itr->status =
4540 ice_remove_rule_internal(hw, recp_list,
4542 if (v_list_itr->status)
4543 return v_list_itr->status;
4549 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4550 * @hw: pointer to the hardware structure
4551 * @mv_list: list of MAC VLAN and forwarding information
4554 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4556 struct ice_sw_recipe *recp_list;
4558 if (!mv_list || !hw)
4559 return ICE_ERR_PARAM;
4561 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4562 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4566 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4567 * @fm_entry: filter entry to inspect
4568 * @vsi_handle: VSI handle to compare with filter info
4571 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4573 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4574 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4575 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4576 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4581 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4582 * @hw: pointer to the hardware structure
4583 * @vsi_handle: VSI handle to remove filters from
4584 * @vsi_list_head: pointer to the list to add entry to
4585 * @fi: pointer to fltr_info of filter entry to copy & add
4587 * Helper function, used when creating a list of filters to remove from
4588 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4589 * original filter entry, with the exception of fltr_info.fltr_act and
4590 * fltr_info.fwd_id fields. These are set such that later logic can
4591 * extract which VSI to remove the fltr from, and pass on that information.
4593 static enum ice_status
4594 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4595 struct LIST_HEAD_TYPE *vsi_list_head,
4596 struct ice_fltr_info *fi)
4598 struct ice_fltr_list_entry *tmp;
4600 /* this memory is freed up in the caller function
4601 * once filters for this VSI are removed
4603 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4605 return ICE_ERR_NO_MEMORY;
4607 tmp->fltr_info = *fi;
4609 /* Overwrite these fields to indicate which VSI to remove filter from,
4610 * so find and remove logic can extract the information from the
4611 * list entries. Note that original entries will still have proper
4614 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4615 tmp->fltr_info.vsi_handle = vsi_handle;
4616 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4618 LIST_ADD(&tmp->list_entry, vsi_list_head);
4624 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4625 * @hw: pointer to the hardware structure
4626 * @vsi_handle: VSI handle to remove filters from
4627 * @lkup_list_head: pointer to the list that has certain lookup type filters
4628 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4630 * Locates all filters in lkup_list_head that are used by the given VSI,
4631 * and adds COPIES of those entries to vsi_list_head (intended to be used
4632 * to remove the listed filters).
4633 * Note that this means all entries in vsi_list_head must be explicitly
4634 * deallocated by the caller when done with list.
4636 static enum ice_status
4637 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4638 struct LIST_HEAD_TYPE *lkup_list_head,
4639 struct LIST_HEAD_TYPE *vsi_list_head)
4641 struct ice_fltr_mgmt_list_entry *fm_entry;
4642 enum ice_status status = ICE_SUCCESS;
4644 /* check to make sure VSI ID is valid and within boundary */
4645 if (!ice_is_vsi_valid(hw, vsi_handle))
4646 return ICE_ERR_PARAM;
4648 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4649 ice_fltr_mgmt_list_entry, list_entry) {
4650 struct ice_fltr_info *fi;
4652 fi = &fm_entry->fltr_info;
4653 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4656 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4665 * ice_determine_promisc_mask
4666 * @fi: filter info to parse
4668 * Helper function to determine which ICE_PROMISC_ mask corresponds
4669 * to given filter into.
4671 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4673 u16 vid = fi->l_data.mac_vlan.vlan_id;
4674 u8 *macaddr = fi->l_data.mac.mac_addr;
4675 bool is_tx_fltr = false;
4676 u8 promisc_mask = 0;
4678 if (fi->flag == ICE_FLTR_TX)
4681 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4682 promisc_mask |= is_tx_fltr ?
4683 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4684 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4685 promisc_mask |= is_tx_fltr ?
4686 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4687 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4688 promisc_mask |= is_tx_fltr ?
4689 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4691 promisc_mask |= is_tx_fltr ?
4692 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4694 return promisc_mask;
4698 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4699 * @hw: pointer to the hardware structure
4700 * @vsi_handle: VSI handle to retrieve info from
4701 * @promisc_mask: pointer to mask to be filled in
4702 * @vid: VLAN ID of promisc VLAN VSI
4703 * @sw: pointer to switch info struct for which function add rule
4705 static enum ice_status
4706 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4707 u16 *vid, struct ice_switch_info *sw)
4709 struct ice_fltr_mgmt_list_entry *itr;
4710 struct LIST_HEAD_TYPE *rule_head;
4711 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4713 if (!ice_is_vsi_valid(hw, vsi_handle))
4714 return ICE_ERR_PARAM;
4718 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4719 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4721 ice_acquire_lock(rule_lock);
4722 LIST_FOR_EACH_ENTRY(itr, rule_head,
4723 ice_fltr_mgmt_list_entry, list_entry) {
4724 /* Continue if this filter doesn't apply to this VSI or the
4725 * VSI ID is not in the VSI map for this filter
4727 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4730 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4732 ice_release_lock(rule_lock);
4738 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4739 * @hw: pointer to the hardware structure
4740 * @vsi_handle: VSI handle to retrieve info from
4741 * @promisc_mask: pointer to mask to be filled in
4742 * @vid: VLAN ID of promisc VLAN VSI
4745 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4748 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4749 vid, hw->switch_info);
4753 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4754 * @hw: pointer to the hardware structure
4755 * @vsi_handle: VSI handle to retrieve info from
4756 * @promisc_mask: pointer to mask to be filled in
4757 * @vid: VLAN ID of promisc VLAN VSI
4758 * @sw: pointer to switch info struct for which function add rule
4760 static enum ice_status
4761 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4762 u16 *vid, struct ice_switch_info *sw)
4764 struct ice_fltr_mgmt_list_entry *itr;
4765 struct LIST_HEAD_TYPE *rule_head;
4766 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4768 if (!ice_is_vsi_valid(hw, vsi_handle))
4769 return ICE_ERR_PARAM;
4773 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4774 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4776 ice_acquire_lock(rule_lock);
4777 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4779 /* Continue if this filter doesn't apply to this VSI or the
4780 * VSI ID is not in the VSI map for this filter
4782 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4785 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4787 ice_release_lock(rule_lock);
4793 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4794 * @hw: pointer to the hardware structure
4795 * @vsi_handle: VSI handle to retrieve info from
4796 * @promisc_mask: pointer to mask to be filled in
4797 * @vid: VLAN ID of promisc VLAN VSI
4800 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4803 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4804 vid, hw->switch_info);
4808 * ice_remove_promisc - Remove promisc based filter rules
4809 * @hw: pointer to the hardware structure
4810 * @recp_id: recipe ID for which the rule needs to removed
4811 * @v_list: list of promisc entries
4813 static enum ice_status
4814 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4815 struct LIST_HEAD_TYPE *v_list)
4817 struct ice_fltr_list_entry *v_list_itr, *tmp;
4818 struct ice_sw_recipe *recp_list;
4820 recp_list = &hw->switch_info->recp_list[recp_id];
4821 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4823 v_list_itr->status =
4824 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4825 if (v_list_itr->status)
4826 return v_list_itr->status;
4832 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4833 * @hw: pointer to the hardware structure
4834 * @vsi_handle: VSI handle to clear mode
4835 * @promisc_mask: mask of promiscuous config bits to clear
4836 * @vid: VLAN ID to clear VLAN promiscuous
4837 * @sw: pointer to switch info struct for which function add rule
4839 static enum ice_status
4840 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4841 u16 vid, struct ice_switch_info *sw)
4843 struct ice_fltr_list_entry *fm_entry, *tmp;
4844 struct LIST_HEAD_TYPE remove_list_head;
4845 struct ice_fltr_mgmt_list_entry *itr;
4846 struct LIST_HEAD_TYPE *rule_head;
4847 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4848 enum ice_status status = ICE_SUCCESS;
4851 if (!ice_is_vsi_valid(hw, vsi_handle))
4852 return ICE_ERR_PARAM;
4854 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4855 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4857 recipe_id = ICE_SW_LKUP_PROMISC;
4859 rule_head = &sw->recp_list[recipe_id].filt_rules;
4860 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4862 INIT_LIST_HEAD(&remove_list_head);
4864 ice_acquire_lock(rule_lock);
4865 LIST_FOR_EACH_ENTRY(itr, rule_head,
4866 ice_fltr_mgmt_list_entry, list_entry) {
4867 struct ice_fltr_info *fltr_info;
4868 u8 fltr_promisc_mask = 0;
4870 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4872 fltr_info = &itr->fltr_info;
4874 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4875 vid != fltr_info->l_data.mac_vlan.vlan_id)
4878 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4880 /* Skip if filter is not completely specified by given mask */
4881 if (fltr_promisc_mask & ~promisc_mask)
4884 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4888 ice_release_lock(rule_lock);
4889 goto free_fltr_list;
4892 ice_release_lock(rule_lock);
4894 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4897 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4898 ice_fltr_list_entry, list_entry) {
4899 LIST_DEL(&fm_entry->list_entry);
4900 ice_free(hw, fm_entry);
4907 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4908 * @hw: pointer to the hardware structure
4909 * @vsi_handle: VSI handle to clear mode
4910 * @promisc_mask: mask of promiscuous config bits to clear
4911 * @vid: VLAN ID to clear VLAN promiscuous
4914 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4915 u8 promisc_mask, u16 vid)
4917 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4918 vid, hw->switch_info);
4922 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4923 * @hw: pointer to the hardware structure
4924 * @vsi_handle: VSI handle to configure
4925 * @promisc_mask: mask of promiscuous config bits
4926 * @vid: VLAN ID to set VLAN promiscuous
4927 * @lport: logical port number to configure promisc mode
4928 * @sw: pointer to switch info struct for which function add rule
4930 static enum ice_status
4931 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4932 u16 vid, u8 lport, struct ice_switch_info *sw)
4934 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4935 struct ice_fltr_list_entry f_list_entry;
4936 struct ice_fltr_info new_fltr;
4937 enum ice_status status = ICE_SUCCESS;
4943 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4945 if (!ice_is_vsi_valid(hw, vsi_handle))
4946 return ICE_ERR_PARAM;
4947 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4949 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4951 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4952 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4953 new_fltr.l_data.mac_vlan.vlan_id = vid;
4954 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4956 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4957 recipe_id = ICE_SW_LKUP_PROMISC;
4960 /* Separate filters must be set for each direction/packet type
4961 * combination, so we will loop over the mask value, store the
4962 * individual type, and clear it out in the input mask as it
4965 while (promisc_mask) {
4966 struct ice_sw_recipe *recp_list;
4972 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4973 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4974 pkt_type = UCAST_FLTR;
4975 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4976 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4977 pkt_type = UCAST_FLTR;
4979 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4980 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4981 pkt_type = MCAST_FLTR;
4982 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4983 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4984 pkt_type = MCAST_FLTR;
4986 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4987 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4988 pkt_type = BCAST_FLTR;
4989 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4990 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4991 pkt_type = BCAST_FLTR;
4995 /* Check for VLAN promiscuous flag */
4996 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4997 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4998 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4999 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5003 /* Set filter DA based on packet type */
5004 mac_addr = new_fltr.l_data.mac.mac_addr;
5005 if (pkt_type == BCAST_FLTR) {
5006 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5007 } else if (pkt_type == MCAST_FLTR ||
5008 pkt_type == UCAST_FLTR) {
5009 /* Use the dummy ether header DA */
5010 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5011 ICE_NONDMA_TO_NONDMA);
5012 if (pkt_type == MCAST_FLTR)
5013 mac_addr[0] |= 0x1; /* Set multicast bit */
5016 /* Need to reset this to zero for all iterations */
5019 new_fltr.flag |= ICE_FLTR_TX;
5020 new_fltr.src = hw_vsi_id;
5022 new_fltr.flag |= ICE_FLTR_RX;
5023 new_fltr.src = lport;
5026 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5027 new_fltr.vsi_handle = vsi_handle;
5028 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5029 f_list_entry.fltr_info = new_fltr;
5030 recp_list = &sw->recp_list[recipe_id];
5032 status = ice_add_rule_internal(hw, recp_list, lport,
5034 if (status != ICE_SUCCESS)
5035 goto set_promisc_exit;
5043 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5044 * @hw: pointer to the hardware structure
5045 * @vsi_handle: VSI handle to configure
5046 * @promisc_mask: mask of promiscuous config bits
5047 * @vid: VLAN ID to set VLAN promiscuous
5050 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5053 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5054 hw->port_info->lport,
5059 * _ice_set_vlan_vsi_promisc
5060 * @hw: pointer to the hardware structure
5061 * @vsi_handle: VSI handle to configure
5062 * @promisc_mask: mask of promiscuous config bits
5063 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5064 * @lport: logical port number to configure promisc mode
5065 * @sw: pointer to switch info struct for which function add rule
5067 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5069 static enum ice_status
5070 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5071 bool rm_vlan_promisc, u8 lport,
5072 struct ice_switch_info *sw)
5074 struct ice_fltr_list_entry *list_itr, *tmp;
5075 struct LIST_HEAD_TYPE vsi_list_head;
5076 struct LIST_HEAD_TYPE *vlan_head;
5077 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5078 enum ice_status status;
5081 INIT_LIST_HEAD(&vsi_list_head);
5082 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5083 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5084 ice_acquire_lock(vlan_lock);
5085 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5087 ice_release_lock(vlan_lock);
5089 goto free_fltr_list;
5091 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5093 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5094 if (rm_vlan_promisc)
5095 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5099 status = _ice_set_vsi_promisc(hw, vsi_handle,
5100 promisc_mask, vlan_id,
5107 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5108 ice_fltr_list_entry, list_entry) {
5109 LIST_DEL(&list_itr->list_entry);
5110 ice_free(hw, list_itr);
5116 * ice_set_vlan_vsi_promisc
5117 * @hw: pointer to the hardware structure
5118 * @vsi_handle: VSI handle to configure
5119 * @promisc_mask: mask of promiscuous config bits
5120 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5122 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5125 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5126 bool rm_vlan_promisc)
5128 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5129 rm_vlan_promisc, hw->port_info->lport,
5134 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5135 * @hw: pointer to the hardware structure
5136 * @vsi_handle: VSI handle to remove filters from
5137 * @recp_list: recipe list from which function remove fltr
5138 * @lkup: switch rule filter lookup type
5141 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5142 struct ice_sw_recipe *recp_list,
5143 enum ice_sw_lkup_type lkup)
5145 struct ice_fltr_list_entry *fm_entry;
5146 struct LIST_HEAD_TYPE remove_list_head;
5147 struct LIST_HEAD_TYPE *rule_head;
5148 struct ice_fltr_list_entry *tmp;
5149 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5150 enum ice_status status;
5152 INIT_LIST_HEAD(&remove_list_head);
5153 rule_lock = &recp_list[lkup].filt_rule_lock;
5154 rule_head = &recp_list[lkup].filt_rules;
5155 ice_acquire_lock(rule_lock);
5156 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5158 ice_release_lock(rule_lock);
5163 case ICE_SW_LKUP_MAC:
5164 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5166 case ICE_SW_LKUP_VLAN:
5167 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5169 case ICE_SW_LKUP_PROMISC:
5170 case ICE_SW_LKUP_PROMISC_VLAN:
5171 ice_remove_promisc(hw, lkup, &remove_list_head);
5173 case ICE_SW_LKUP_MAC_VLAN:
5174 ice_remove_mac_vlan(hw, &remove_list_head);
5176 case ICE_SW_LKUP_ETHERTYPE:
5177 case ICE_SW_LKUP_ETHERTYPE_MAC:
5178 ice_remove_eth_mac(hw, &remove_list_head);
5180 case ICE_SW_LKUP_DFLT:
5181 ice_debug(hw, ICE_DBG_SW,
5182 "Remove filters for this lookup type hasn't been implemented yet\n");
5184 case ICE_SW_LKUP_LAST:
5185 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5189 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5190 ice_fltr_list_entry, list_entry) {
5191 LIST_DEL(&fm_entry->list_entry);
5192 ice_free(hw, fm_entry);
5197 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5198 * @hw: pointer to the hardware structure
5199 * @vsi_handle: VSI handle to remove filters from
5200 * @sw: pointer to switch info struct
5203 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5204 struct ice_switch_info *sw)
5206 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5208 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5209 sw->recp_list, ICE_SW_LKUP_MAC);
5210 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5211 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5212 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5213 sw->recp_list, ICE_SW_LKUP_PROMISC);
5214 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5215 sw->recp_list, ICE_SW_LKUP_VLAN);
5216 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5217 sw->recp_list, ICE_SW_LKUP_DFLT);
5218 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5219 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5220 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5221 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5222 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5223 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5227 * ice_remove_vsi_fltr - Remove all filters for a VSI
5228 * @hw: pointer to the hardware structure
5229 * @vsi_handle: VSI handle to remove filters from
5231 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5233 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5237 * ice_alloc_res_cntr - allocating resource counter
5238 * @hw: pointer to the hardware structure
5239 * @type: type of resource
5240 * @alloc_shared: if set it is shared else dedicated
5241 * @num_items: number of entries requested for FD resource type
5242 * @counter_id: counter index returned by AQ call
5245 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5248 struct ice_aqc_alloc_free_res_elem *buf;
5249 enum ice_status status;
5252 /* Allocate resource */
5253 buf_len = ice_struct_size(buf, elem, 1);
5254 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5256 return ICE_ERR_NO_MEMORY;
5258 buf->num_elems = CPU_TO_LE16(num_items);
5259 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5260 ICE_AQC_RES_TYPE_M) | alloc_shared);
5262 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5263 ice_aqc_opc_alloc_res, NULL);
5267 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5275 * ice_free_res_cntr - free resource counter
5276 * @hw: pointer to the hardware structure
5277 * @type: type of resource
5278 * @alloc_shared: if set it is shared else dedicated
5279 * @num_items: number of entries to be freed for FD resource type
5280 * @counter_id: counter ID resource which needs to be freed
5283 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5286 struct ice_aqc_alloc_free_res_elem *buf;
5287 enum ice_status status;
5291 buf_len = ice_struct_size(buf, elem, 1);
5292 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5294 return ICE_ERR_NO_MEMORY;
5296 buf->num_elems = CPU_TO_LE16(num_items);
5297 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5298 ICE_AQC_RES_TYPE_M) | alloc_shared);
5299 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5301 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5302 ice_aqc_opc_free_res, NULL);
5304 ice_debug(hw, ICE_DBG_SW,
5305 "counter resource could not be freed\n");
5312 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5313 * @hw: pointer to the hardware structure
5314 * @counter_id: returns counter index
5316 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5318 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5319 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5324 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5325 * @hw: pointer to the hardware structure
5326 * @counter_id: counter index to be freed
5328 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5330 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5331 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5336 * ice_alloc_res_lg_act - add large action resource
5337 * @hw: pointer to the hardware structure
5338 * @l_id: large action ID to fill it in
5339 * @num_acts: number of actions to hold with a large action entry
5341 static enum ice_status
5342 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5344 struct ice_aqc_alloc_free_res_elem *sw_buf;
5345 enum ice_status status;
5348 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5349 return ICE_ERR_PARAM;
5351 /* Allocate resource for large action */
5352 buf_len = ice_struct_size(sw_buf, elem, 1);
5353 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5355 return ICE_ERR_NO_MEMORY;
5357 sw_buf->num_elems = CPU_TO_LE16(1);
5359 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5360 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5361 * If num_acts is greater than 2, then use
5362 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5363 * The num_acts cannot exceed 4. This was ensured at the
5364 * beginning of the function.
5367 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5368 else if (num_acts == 2)
5369 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5371 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5373 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5374 ice_aqc_opc_alloc_res, NULL);
5376 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5378 ice_free(hw, sw_buf);
5383 * ice_add_mac_with_sw_marker - add filter with sw marker
5384 * @hw: pointer to the hardware structure
5385 * @f_info: filter info structure containing the MAC filter information
5386 * @sw_marker: sw marker to tag the Rx descriptor with
5389 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5392 struct ice_fltr_mgmt_list_entry *m_entry;
5393 struct ice_fltr_list_entry fl_info;
5394 struct ice_sw_recipe *recp_list;
5395 struct LIST_HEAD_TYPE l_head;
5396 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5397 enum ice_status ret;
5401 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5402 return ICE_ERR_PARAM;
5404 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5405 return ICE_ERR_PARAM;
5407 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5408 return ICE_ERR_PARAM;
5410 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5411 return ICE_ERR_PARAM;
5412 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5414 /* Add filter if it doesn't exist so then the adding of large
5415 * action always results in update
5418 INIT_LIST_HEAD(&l_head);
5419 fl_info.fltr_info = *f_info;
5420 LIST_ADD(&fl_info.list_entry, &l_head);
5422 entry_exists = false;
5423 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5424 hw->port_info->lport);
5425 if (ret == ICE_ERR_ALREADY_EXISTS)
5426 entry_exists = true;
5430 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5431 rule_lock = &recp_list->filt_rule_lock;
5432 ice_acquire_lock(rule_lock);
5433 /* Get the book keeping entry for the filter */
5434 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5438 /* If counter action was enabled for this rule then don't enable
5439 * sw marker large action
5441 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5442 ret = ICE_ERR_PARAM;
5446 /* if same marker was added before */
5447 if (m_entry->sw_marker_id == sw_marker) {
5448 ret = ICE_ERR_ALREADY_EXISTS;
5452 /* Allocate a hardware table entry to hold large act. Three actions
5453 * for marker based large action
5455 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5459 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5462 /* Update the switch rule to add the marker action */
5463 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5465 ice_release_lock(rule_lock);
5470 ice_release_lock(rule_lock);
5471 /* only remove entry if it did not exist previously */
5473 ret = ice_remove_mac(hw, &l_head);
5479 * ice_add_mac_with_counter - add filter with counter enabled
5480 * @hw: pointer to the hardware structure
5481 * @f_info: pointer to filter info structure containing the MAC filter
5485 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5487 struct ice_fltr_mgmt_list_entry *m_entry;
5488 struct ice_fltr_list_entry fl_info;
5489 struct ice_sw_recipe *recp_list;
5490 struct LIST_HEAD_TYPE l_head;
5491 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5492 enum ice_status ret;
5497 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5498 return ICE_ERR_PARAM;
5500 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5501 return ICE_ERR_PARAM;
5503 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5504 return ICE_ERR_PARAM;
5505 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5506 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5508 entry_exist = false;
5510 rule_lock = &recp_list->filt_rule_lock;
5512 /* Add filter if it doesn't exist so then the adding of large
5513 * action always results in update
5515 INIT_LIST_HEAD(&l_head);
5517 fl_info.fltr_info = *f_info;
5518 LIST_ADD(&fl_info.list_entry, &l_head);
5520 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5521 hw->port_info->lport);
5522 if (ret == ICE_ERR_ALREADY_EXISTS)
5527 ice_acquire_lock(rule_lock);
5528 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5530 ret = ICE_ERR_BAD_PTR;
5534 /* Don't enable counter for a filter for which sw marker was enabled */
5535 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5536 ret = ICE_ERR_PARAM;
5540 /* If a counter was already enabled then don't need to add again */
5541 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5542 ret = ICE_ERR_ALREADY_EXISTS;
5546 /* Allocate a hardware table entry to VLAN counter */
5547 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5551 /* Allocate a hardware table entry to hold large act. Two actions for
5552 * counter based large action
5554 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5558 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5561 /* Update the switch rule to add the counter action */
5562 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5564 ice_release_lock(rule_lock);
5569 ice_release_lock(rule_lock);
5570 /* only remove entry if it did not exist previously */
5572 ret = ice_remove_mac(hw, &l_head);
5577 /* This is mapping table entry that maps every word within a given protocol
5578 * structure to the real byte offset as per the specification of that
5580 * for example dst address is 3 words in ethertype header and corresponding
5581 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5582 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5583 * matching entry describing its field. This needs to be updated if new
5584 * structure is added to that union.
5586 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5587 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5588 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5589 { ICE_ETYPE_OL, { 0 } },
5590 { ICE_VLAN_OFOS, { 0, 2 } },
5591 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5592 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5593 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5594 26, 28, 30, 32, 34, 36, 38 } },
5595 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5596 26, 28, 30, 32, 34, 36, 38 } },
5597 { ICE_TCP_IL, { 0, 2 } },
5598 { ICE_UDP_OF, { 0, 2 } },
5599 { ICE_UDP_ILOS, { 0, 2 } },
5600 { ICE_SCTP_IL, { 0, 2 } },
5601 { ICE_VXLAN, { 8, 10, 12, 14 } },
5602 { ICE_GENEVE, { 8, 10, 12, 14 } },
5603 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5604 { ICE_NVGRE, { 0, 2, 4, 6 } },
5605 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5606 { ICE_PPPOE, { 0, 2, 4, 6 } },
5607 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5608 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5609 { ICE_ESP, { 0, 2, 4, 6 } },
5610 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5611 { ICE_NAT_T, { 8, 10, 12, 14 } },
5614 /* The following table describes preferred grouping of recipes.
5615 * If a recipe that needs to be programmed is a superset or matches one of the
5616 * following combinations, then the recipe needs to be chained as per the
5620 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5621 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5622 { ICE_MAC_IL, ICE_MAC_IL_HW },
5623 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5624 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5625 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5626 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5627 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5628 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5629 { ICE_TCP_IL, ICE_TCP_IL_HW },
5630 { ICE_UDP_OF, ICE_UDP_OF_HW },
5631 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5632 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5633 { ICE_VXLAN, ICE_UDP_OF_HW },
5634 { ICE_GENEVE, ICE_UDP_OF_HW },
5635 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5636 { ICE_NVGRE, ICE_GRE_OF_HW },
5637 { ICE_GTP, ICE_UDP_OF_HW },
5638 { ICE_PPPOE, ICE_PPPOE_HW },
5639 { ICE_PFCP, ICE_UDP_ILOS_HW },
5640 { ICE_L2TPV3, ICE_L2TPV3_HW },
5641 { ICE_ESP, ICE_ESP_HW },
5642 { ICE_AH, ICE_AH_HW },
5643 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5647 * ice_find_recp - find a recipe
5648 * @hw: pointer to the hardware structure
5649 * @lkup_exts: extension sequence to match
5651 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5653 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5654 enum ice_sw_tunnel_type tun_type)
5656 bool refresh_required = true;
5657 struct ice_sw_recipe *recp;
5660 /* Walk through existing recipes to find a match */
5661 recp = hw->switch_info->recp_list;
5662 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5663 /* If recipe was not created for this ID, in SW bookkeeping,
5664 * check if FW has an entry for this recipe. If the FW has an
5665 * entry update it in our SW bookkeeping and continue with the
5668 if (!recp[i].recp_created)
5669 if (ice_get_recp_frm_fw(hw,
5670 hw->switch_info->recp_list, i,
5674 /* Skip inverse action recipes */
5675 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5676 ICE_AQ_RECIPE_ACT_INV_ACT)
5679 /* if number of words we are looking for match */
5680 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5681 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5682 struct ice_fv_word *be = lkup_exts->fv_words;
5683 u16 *cr = recp[i].lkup_exts.field_mask;
5684 u16 *de = lkup_exts->field_mask;
5688 /* ar, cr, and qr are related to the recipe words, while
5689 * be, de, and pe are related to the lookup words
5691 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5692 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5694 if (ar[qr].off == be[pe].off &&
5695 ar[qr].prot_id == be[pe].prot_id &&
5697 /* Found the "pe"th word in the
5702 /* After walking through all the words in the
5703 * "i"th recipe if "p"th word was not found then
5704 * this recipe is not what we are looking for.
5705 * So break out from this loop and try the next
5708 if (qr >= recp[i].lkup_exts.n_val_words) {
5713 /* If for "i"th recipe the found was never set to false
5714 * then it means we found our match
5716 if (tun_type == recp[i].tun_type && found)
5717 return i; /* Return the recipe ID */
5720 return ICE_MAX_NUM_RECIPES;
5724 * ice_prot_type_to_id - get protocol ID from protocol type
5725 * @type: protocol type
5726 * @id: pointer to variable that will receive the ID
5728 * Returns true if found, false otherwise
5730 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5734 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5735 if (ice_prot_id_tbl[i].type == type) {
5736 *id = ice_prot_id_tbl[i].protocol_id;
5743 * ice_find_valid_words - count valid words
5744 * @rule: advanced rule with lookup information
5745 * @lkup_exts: byte offset extractions of the words that are valid
5747 * calculate valid words in a lookup rule using mask value
5750 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5751 struct ice_prot_lkup_ext *lkup_exts)
5753 u8 j, word, prot_id, ret_val;
5755 if (!ice_prot_type_to_id(rule->type, &prot_id))
5758 word = lkup_exts->n_val_words;
5760 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5761 if (((u16 *)&rule->m_u)[j] &&
5762 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5763 /* No more space to accommodate */
5764 if (word >= ICE_MAX_CHAIN_WORDS)
5766 lkup_exts->fv_words[word].off =
5767 ice_prot_ext[rule->type].offs[j];
5768 lkup_exts->fv_words[word].prot_id =
5769 ice_prot_id_tbl[rule->type].protocol_id;
5770 lkup_exts->field_mask[word] =
5771 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
5775 ret_val = word - lkup_exts->n_val_words;
5776 lkup_exts->n_val_words = word;
5782 * ice_create_first_fit_recp_def - Create a recipe grouping
5783 * @hw: pointer to the hardware structure
5784 * @lkup_exts: an array of protocol header extractions
5785 * @rg_list: pointer to a list that stores new recipe groups
5786 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5788 * Using first fit algorithm, take all the words that are still not done
5789 * and start grouping them in 4-word groups. Each group makes up one
5792 static enum ice_status
5793 ice_create_first_fit_recp_def(struct ice_hw *hw,
5794 struct ice_prot_lkup_ext *lkup_exts,
5795 struct LIST_HEAD_TYPE *rg_list,
5798 struct ice_pref_recipe_group *grp = NULL;
5803 if (!lkup_exts->n_val_words) {
5804 struct ice_recp_grp_entry *entry;
5806 entry = (struct ice_recp_grp_entry *)
5807 ice_malloc(hw, sizeof(*entry));
5809 return ICE_ERR_NO_MEMORY;
5810 LIST_ADD(&entry->l_entry, rg_list);
5811 grp = &entry->r_group;
5813 grp->n_val_pairs = 0;
5816 /* Walk through every word in the rule to check if it is not done. If so
5817 * then this word needs to be part of a new recipe.
5819 for (j = 0; j < lkup_exts->n_val_words; j++)
5820 if (!ice_is_bit_set(lkup_exts->done, j)) {
5822 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5823 struct ice_recp_grp_entry *entry;
5825 entry = (struct ice_recp_grp_entry *)
5826 ice_malloc(hw, sizeof(*entry));
5828 return ICE_ERR_NO_MEMORY;
5829 LIST_ADD(&entry->l_entry, rg_list);
5830 grp = &entry->r_group;
5834 grp->pairs[grp->n_val_pairs].prot_id =
5835 lkup_exts->fv_words[j].prot_id;
5836 grp->pairs[grp->n_val_pairs].off =
5837 lkup_exts->fv_words[j].off;
5838 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5846 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5847 * @hw: pointer to the hardware structure
5848 * @fv_list: field vector with the extraction sequence information
5849 * @rg_list: recipe groupings with protocol-offset pairs
5851 * Helper function to fill in the field vector indices for protocol-offset
5852 * pairs. These indexes are then ultimately programmed into a recipe.
5854 static enum ice_status
5855 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5856 struct LIST_HEAD_TYPE *rg_list)
5858 struct ice_sw_fv_list_entry *fv;
5859 struct ice_recp_grp_entry *rg;
5860 struct ice_fv_word *fv_ext;
5862 if (LIST_EMPTY(fv_list))
5865 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5866 fv_ext = fv->fv_ptr->ew;
5868 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5871 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5872 struct ice_fv_word *pr;
5877 pr = &rg->r_group.pairs[i];
5878 mask = rg->r_group.mask[i];
5880 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5881 if (fv_ext[j].prot_id == pr->prot_id &&
5882 fv_ext[j].off == pr->off) {
5885 /* Store index of field vector */
5887 rg->fv_mask[i] = mask;
5891 /* Protocol/offset could not be found, caller gave an
5895 return ICE_ERR_PARAM;
5903 * ice_find_free_recp_res_idx - find free result indexes for recipe
5904 * @hw: pointer to hardware structure
5905 * @profiles: bitmap of profiles that will be associated with the new recipe
5906 * @free_idx: pointer to variable to receive the free index bitmap
5908 * The algorithm used here is:
5909 * 1. When creating a new recipe, create a set P which contains all
5910 * Profiles that will be associated with our new recipe
5912 * 2. For each Profile p in set P:
5913 * a. Add all recipes associated with Profile p into set R
5914 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5915 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5916 * i. Or just assume they all have the same possible indexes:
5918 * i.e., PossibleIndexes = 0x0000F00000000000
5920 * 3. For each Recipe r in set R:
5921 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5922 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5924 * FreeIndexes will contain the bits indicating the indexes free for use,
5925 * then the code needs to update the recipe[r].used_result_idx_bits to
5926 * indicate which indexes were selected for use by this recipe.
5929 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5930 ice_bitmap_t *free_idx)
5932 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5933 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5934 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5938 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5939 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5940 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5941 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5943 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5944 ice_set_bit(count, possible_idx);
5946 /* For each profile we are going to associate the recipe with, add the
5947 * recipes that are associated with that profile. This will give us
5948 * the set of recipes that our recipe may collide with. Also, determine
5949 * what possible result indexes are usable given this set of profiles.
5952 while (ICE_MAX_NUM_PROFILES >
5953 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5954 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5955 ICE_MAX_NUM_RECIPES);
5956 ice_and_bitmap(possible_idx, possible_idx,
5957 hw->switch_info->prof_res_bm[bit],
5962 /* For each recipe that our new recipe may collide with, determine
5963 * which indexes have been used.
5965 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5966 if (ice_is_bit_set(recipes, bit)) {
5967 ice_or_bitmap(used_idx, used_idx,
5968 hw->switch_info->recp_list[bit].res_idxs,
5972 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5974 /* return number of free indexes */
5977 while (ICE_MAX_FV_WORDS >
5978 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5987 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5988 * @hw: pointer to hardware structure
5989 * @rm: recipe management list entry
5990 * @profiles: bitmap of profiles that will be associated.
5992 static enum ice_status
5993 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5994 ice_bitmap_t *profiles)
5996 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5997 struct ice_aqc_recipe_data_elem *tmp;
5998 struct ice_aqc_recipe_data_elem *buf;
5999 struct ice_recp_grp_entry *entry;
6000 enum ice_status status;
6006 /* When more than one recipe are required, another recipe is needed to
6007 * chain them together. Matching a tunnel metadata ID takes up one of
6008 * the match fields in the chaining recipe reducing the number of
6009 * chained recipes by one.
6011 /* check number of free result indices */
6012 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6013 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6015 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6016 free_res_idx, rm->n_grp_count);
6018 if (rm->n_grp_count > 1) {
6019 if (rm->n_grp_count > free_res_idx)
6020 return ICE_ERR_MAX_LIMIT;
6025 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6026 return ICE_ERR_MAX_LIMIT;
6028 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6029 ICE_MAX_NUM_RECIPES,
6032 return ICE_ERR_NO_MEMORY;
6034 buf = (struct ice_aqc_recipe_data_elem *)
6035 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6037 status = ICE_ERR_NO_MEMORY;
6041 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6042 recipe_count = ICE_MAX_NUM_RECIPES;
6043 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6045 if (status || recipe_count == 0)
6048 /* Allocate the recipe resources, and configure them according to the
6049 * match fields from protocol headers and extracted field vectors.
6051 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6052 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6055 status = ice_alloc_recipe(hw, &entry->rid);
6059 /* Clear the result index of the located recipe, as this will be
6060 * updated, if needed, later in the recipe creation process.
6062 tmp[0].content.result_indx = 0;
6064 buf[recps] = tmp[0];
6065 buf[recps].recipe_indx = (u8)entry->rid;
6066 /* if the recipe is a non-root recipe RID should be programmed
6067 * as 0 for the rules to be applied correctly.
6069 buf[recps].content.rid = 0;
6070 ice_memset(&buf[recps].content.lkup_indx, 0,
6071 sizeof(buf[recps].content.lkup_indx),
6074 /* All recipes use look-up index 0 to match switch ID. */
6075 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6076 buf[recps].content.mask[0] =
6077 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6078 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6081 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6082 buf[recps].content.lkup_indx[i] = 0x80;
6083 buf[recps].content.mask[i] = 0;
6086 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6087 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6088 buf[recps].content.mask[i + 1] =
6089 CPU_TO_LE16(entry->fv_mask[i]);
6092 if (rm->n_grp_count > 1) {
6093 /* Checks to see if there really is a valid result index
6096 if (chain_idx >= ICE_MAX_FV_WORDS) {
6097 ice_debug(hw, ICE_DBG_SW,
6098 "No chain index available\n");
6099 status = ICE_ERR_MAX_LIMIT;
6103 entry->chain_idx = chain_idx;
6104 buf[recps].content.result_indx =
6105 ICE_AQ_RECIPE_RESULT_EN |
6106 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6107 ICE_AQ_RECIPE_RESULT_DATA_M);
6108 ice_clear_bit(chain_idx, result_idx_bm);
6109 chain_idx = ice_find_first_bit(result_idx_bm,
6113 /* fill recipe dependencies */
6114 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6115 ICE_MAX_NUM_RECIPES);
6116 ice_set_bit(buf[recps].recipe_indx,
6117 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6118 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6122 if (rm->n_grp_count == 1) {
6123 rm->root_rid = buf[0].recipe_indx;
6124 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6125 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6126 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6127 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6128 sizeof(buf[0].recipe_bitmap),
6129 ICE_NONDMA_TO_NONDMA);
6131 status = ICE_ERR_BAD_PTR;
6134 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6135 * the recipe which is getting created if specified
6136 * by user. Usually any advanced switch filter, which results
6137 * into new extraction sequence, ended up creating a new recipe
6138 * of type ROOT and usually recipes are associated with profiles
6139 * Switch rule referreing newly created recipe, needs to have
6140 * either/or 'fwd' or 'join' priority, otherwise switch rule
6141 * evaluation will not happen correctly. In other words, if
6142 * switch rule to be evaluated on priority basis, then recipe
6143 * needs to have priority, otherwise it will be evaluated last.
6145 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6147 struct ice_recp_grp_entry *last_chain_entry;
6150 /* Allocate the last recipe that will chain the outcomes of the
6151 * other recipes together
6153 status = ice_alloc_recipe(hw, &rid);
6157 buf[recps].recipe_indx = (u8)rid;
6158 buf[recps].content.rid = (u8)rid;
6159 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6160 /* the new entry created should also be part of rg_list to
6161 * make sure we have complete recipe
6163 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6164 sizeof(*last_chain_entry));
6165 if (!last_chain_entry) {
6166 status = ICE_ERR_NO_MEMORY;
6169 last_chain_entry->rid = rid;
6170 ice_memset(&buf[recps].content.lkup_indx, 0,
6171 sizeof(buf[recps].content.lkup_indx),
6173 /* All recipes use look-up index 0 to match switch ID. */
6174 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6175 buf[recps].content.mask[0] =
6176 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6177 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6178 buf[recps].content.lkup_indx[i] =
6179 ICE_AQ_RECIPE_LKUP_IGNORE;
6180 buf[recps].content.mask[i] = 0;
6184 /* update r_bitmap with the recp that is used for chaining */
6185 ice_set_bit(rid, rm->r_bitmap);
6186 /* this is the recipe that chains all the other recipes so it
6187 * should not have a chaining ID to indicate the same
6189 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6190 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6192 last_chain_entry->fv_idx[i] = entry->chain_idx;
6193 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6194 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6195 ice_set_bit(entry->rid, rm->r_bitmap);
6197 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6198 if (sizeof(buf[recps].recipe_bitmap) >=
6199 sizeof(rm->r_bitmap)) {
6200 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6201 sizeof(buf[recps].recipe_bitmap),
6202 ICE_NONDMA_TO_NONDMA);
6204 status = ICE_ERR_BAD_PTR;
6207 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6210 rm->root_rid = (u8)rid;
6212 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6216 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6217 ice_release_change_lock(hw);
6221 /* Every recipe that just got created add it to the recipe
6224 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6225 struct ice_switch_info *sw = hw->switch_info;
6226 bool is_root, idx_found = false;
6227 struct ice_sw_recipe *recp;
6228 u16 idx, buf_idx = 0;
6230 /* find buffer index for copying some data */
6231 for (idx = 0; idx < rm->n_grp_count; idx++)
6232 if (buf[idx].recipe_indx == entry->rid) {
6238 status = ICE_ERR_OUT_OF_RANGE;
6242 recp = &sw->recp_list[entry->rid];
6243 is_root = (rm->root_rid == entry->rid);
6244 recp->is_root = is_root;
6246 recp->root_rid = entry->rid;
6247 recp->big_recp = (is_root && rm->n_grp_count > 1);
6249 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6250 entry->r_group.n_val_pairs *
6251 sizeof(struct ice_fv_word),
6252 ICE_NONDMA_TO_NONDMA);
6254 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6255 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6257 /* Copy non-result fv index values and masks to recipe. This
6258 * call will also update the result recipe bitmask.
6260 ice_collect_result_idx(&buf[buf_idx], recp);
6262 /* for non-root recipes, also copy to the root, this allows
6263 * easier matching of a complete chained recipe
6266 ice_collect_result_idx(&buf[buf_idx],
6267 &sw->recp_list[rm->root_rid]);
6269 recp->n_ext_words = entry->r_group.n_val_pairs;
6270 recp->chain_idx = entry->chain_idx;
6271 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6272 recp->n_grp_count = rm->n_grp_count;
6273 recp->tun_type = rm->tun_type;
6274 recp->recp_created = true;
6288 * ice_create_recipe_group - creates recipe group
6289 * @hw: pointer to hardware structure
6290 * @rm: recipe management list entry
6291 * @lkup_exts: lookup elements
6293 static enum ice_status
6294 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6295 struct ice_prot_lkup_ext *lkup_exts)
6297 enum ice_status status;
6300 rm->n_grp_count = 0;
6302 /* Create recipes for words that are marked not done by packing them
6305 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6306 &rm->rg_list, &recp_count);
6308 rm->n_grp_count += recp_count;
6309 rm->n_ext_words = lkup_exts->n_val_words;
6310 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6311 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6312 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6313 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6320 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6321 * @hw: pointer to hardware structure
6322 * @lkups: lookup elements or match criteria for the advanced recipe, one
6323 * structure per protocol header
6324 * @lkups_cnt: number of protocols
6325 * @bm: bitmap of field vectors to consider
6326 * @fv_list: pointer to a list that holds the returned field vectors
6328 static enum ice_status
6329 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6330 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6332 enum ice_status status;
6339 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6341 return ICE_ERR_NO_MEMORY;
6343 for (i = 0; i < lkups_cnt; i++)
6344 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6345 status = ICE_ERR_CFG;
6349 /* Find field vectors that include all specified protocol types */
6350 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6353 ice_free(hw, prot_ids);
6358 * ice_tun_type_match_mask - determine if tun type needs a match mask
6359 * @tun_type: tunnel type
6360 * @mask: mask to be used for the tunnel
6362 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6365 case ICE_SW_TUN_VXLAN_GPE:
6366 case ICE_SW_TUN_GENEVE:
6367 case ICE_SW_TUN_VXLAN:
6368 case ICE_SW_TUN_NVGRE:
6369 case ICE_SW_TUN_UDP:
6370 case ICE_ALL_TUNNELS:
6371 *mask = ICE_TUN_FLAG_MASK;
6374 case ICE_SW_TUN_GENEVE_VLAN:
6375 case ICE_SW_TUN_VXLAN_VLAN:
6376 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6386 * ice_add_special_words - Add words that are not protocols, such as metadata
6387 * @rinfo: other information regarding the rule e.g. priority and action info
6388 * @lkup_exts: lookup word structure
6390 static enum ice_status
6391 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6392 struct ice_prot_lkup_ext *lkup_exts)
6396 /* If this is a tunneled packet, then add recipe index to match the
6397 * tunnel bit in the packet metadata flags.
6399 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6400 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6401 u8 word = lkup_exts->n_val_words++;
6403 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6404 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6405 lkup_exts->field_mask[word] = mask;
6407 return ICE_ERR_MAX_LIMIT;
6414 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6415 * @hw: pointer to hardware structure
6416 * @rinfo: other information regarding the rule e.g. priority and action info
6417 * @bm: pointer to memory for returning the bitmap of field vectors
6420 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6423 enum ice_prof_type prof_type;
6425 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6427 switch (rinfo->tun_type) {
6429 prof_type = ICE_PROF_NON_TUN;
6431 case ICE_ALL_TUNNELS:
6432 prof_type = ICE_PROF_TUN_ALL;
6434 case ICE_SW_TUN_VXLAN_GPE:
6435 case ICE_SW_TUN_GENEVE:
6436 case ICE_SW_TUN_GENEVE_VLAN:
6437 case ICE_SW_TUN_VXLAN:
6438 case ICE_SW_TUN_VXLAN_VLAN:
6439 case ICE_SW_TUN_UDP:
6440 case ICE_SW_TUN_GTP:
6441 prof_type = ICE_PROF_TUN_UDP;
6443 case ICE_SW_TUN_NVGRE:
6444 prof_type = ICE_PROF_TUN_GRE;
6446 case ICE_SW_TUN_PPPOE:
6447 prof_type = ICE_PROF_TUN_PPPOE;
6449 case ICE_SW_TUN_PPPOE_PAY:
6450 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6452 case ICE_SW_TUN_PPPOE_IPV4:
6453 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6454 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6455 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6457 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6458 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6460 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6461 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6463 case ICE_SW_TUN_PPPOE_IPV6:
6464 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6465 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6466 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6468 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6469 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6471 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6472 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6474 case ICE_SW_TUN_PROFID_IPV6_ESP:
6475 case ICE_SW_TUN_IPV6_ESP:
6476 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6478 case ICE_SW_TUN_PROFID_IPV6_AH:
6479 case ICE_SW_TUN_IPV6_AH:
6480 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6482 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6483 case ICE_SW_TUN_IPV6_L2TPV3:
6484 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6486 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6487 case ICE_SW_TUN_IPV6_NAT_T:
6488 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6490 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6491 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6493 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6494 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6496 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6497 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6499 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6500 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6502 case ICE_SW_TUN_IPV4_NAT_T:
6503 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6505 case ICE_SW_TUN_IPV4_L2TPV3:
6506 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6508 case ICE_SW_TUN_IPV4_ESP:
6509 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6511 case ICE_SW_TUN_IPV4_AH:
6512 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6514 case ICE_SW_IPV4_TCP:
6515 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6517 case ICE_SW_IPV4_UDP:
6518 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6520 case ICE_SW_IPV6_TCP:
6521 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6523 case ICE_SW_IPV6_UDP:
6524 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6526 case ICE_SW_TUN_AND_NON_TUN:
6528 prof_type = ICE_PROF_ALL;
6532 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6536 * ice_is_prof_rule - determine if rule type is a profile rule
6537 * @type: the rule type
6539 * if the rule type is a profile rule, that means that there no field value
6540 * match required, in this case just a profile hit is required.
6542 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6545 case ICE_SW_TUN_PROFID_IPV6_ESP:
6546 case ICE_SW_TUN_PROFID_IPV6_AH:
6547 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6548 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6549 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6550 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6551 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6552 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6562 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6563 * @hw: pointer to hardware structure
6564 * @lkups: lookup elements or match criteria for the advanced recipe, one
6565 * structure per protocol header
6566 * @lkups_cnt: number of protocols
6567 * @rinfo: other information regarding the rule e.g. priority and action info
6568 * @rid: return the recipe ID of the recipe created
6570 static enum ice_status
6571 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6572 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6574 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6575 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6576 struct ice_prot_lkup_ext *lkup_exts;
6577 struct ice_recp_grp_entry *r_entry;
6578 struct ice_sw_fv_list_entry *fvit;
6579 struct ice_recp_grp_entry *r_tmp;
6580 struct ice_sw_fv_list_entry *tmp;
6581 enum ice_status status = ICE_SUCCESS;
6582 struct ice_sw_recipe *rm;
6585 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6586 return ICE_ERR_PARAM;
6588 lkup_exts = (struct ice_prot_lkup_ext *)
6589 ice_malloc(hw, sizeof(*lkup_exts));
6591 return ICE_ERR_NO_MEMORY;
6593 /* Determine the number of words to be matched and if it exceeds a
6594 * recipe's restrictions
6596 for (i = 0; i < lkups_cnt; i++) {
6599 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6600 status = ICE_ERR_CFG;
6601 goto err_free_lkup_exts;
6604 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6606 status = ICE_ERR_CFG;
6607 goto err_free_lkup_exts;
6611 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6613 status = ICE_ERR_NO_MEMORY;
6614 goto err_free_lkup_exts;
6617 /* Get field vectors that contain fields extracted from all the protocol
6618 * headers being programmed.
6620 INIT_LIST_HEAD(&rm->fv_list);
6621 INIT_LIST_HEAD(&rm->rg_list);
6623 /* Get bitmap of field vectors (profiles) that are compatible with the
6624 * rule request; only these will be searched in the subsequent call to
6627 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6629 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6633 /* Create any special protocol/offset pairs, such as looking at tunnel
6634 * bits by extracting metadata
6636 status = ice_add_special_words(rinfo, lkup_exts);
6638 goto err_free_lkup_exts;
6640 /* Group match words into recipes using preferred recipe grouping
6643 status = ice_create_recipe_group(hw, rm, lkup_exts);
6647 /* set the recipe priority if specified */
6648 rm->priority = (u8)rinfo->priority;
6650 /* Find offsets from the field vector. Pick the first one for all the
6653 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6657 /* An empty FV list means to use all the profiles returned in the
6660 if (LIST_EMPTY(&rm->fv_list)) {
6663 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6664 if (ice_is_bit_set(fv_bitmap, j)) {
6665 struct ice_sw_fv_list_entry *fvl;
6667 fvl = (struct ice_sw_fv_list_entry *)
6668 ice_malloc(hw, sizeof(*fvl));
6672 fvl->profile_id = j;
6673 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6677 /* get bitmap of all profiles the recipe will be associated with */
6678 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6679 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6681 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6682 ice_set_bit((u16)fvit->profile_id, profiles);
6685 /* Look for a recipe which matches our requested fv / mask list */
6686 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6687 if (*rid < ICE_MAX_NUM_RECIPES)
6688 /* Success if found a recipe that match the existing criteria */
6691 rm->tun_type = rinfo->tun_type;
6692 /* Recipe we need does not exist, add a recipe */
6693 status = ice_add_sw_recipe(hw, rm, profiles);
6697 /* Associate all the recipes created with all the profiles in the
6698 * common field vector.
6700 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6702 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6705 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6706 (u8 *)r_bitmap, NULL);
6710 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6711 ICE_MAX_NUM_RECIPES);
6712 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6716 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6719 ice_release_change_lock(hw);
6724 /* Update profile to recipe bitmap array */
6725 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6726 ICE_MAX_NUM_RECIPES);
6728 /* Update recipe to profile bitmap array */
6729 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6730 if (ice_is_bit_set(r_bitmap, j))
6731 ice_set_bit((u16)fvit->profile_id,
6732 recipe_to_profile[j]);
6735 *rid = rm->root_rid;
6736 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6737 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6739 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6740 ice_recp_grp_entry, l_entry) {
6741 LIST_DEL(&r_entry->l_entry);
6742 ice_free(hw, r_entry);
6745 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6747 LIST_DEL(&fvit->list_entry);
6752 ice_free(hw, rm->root_buf);
6757 ice_free(hw, lkup_exts);
6763 * ice_find_dummy_packet - find dummy packet by tunnel type
6765 * @lkups: lookup elements or match criteria for the advanced recipe, one
6766 * structure per protocol header
6767 * @lkups_cnt: number of protocols
6768 * @tun_type: tunnel type from the match criteria
6769 * @pkt: dummy packet to fill according to filter match criteria
6770 * @pkt_len: packet length of dummy packet
6771 * @offsets: pointer to receive the pointer to the offsets for the packet
6774 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6775 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6777 const struct ice_dummy_pkt_offsets **offsets)
6779 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6783 for (i = 0; i < lkups_cnt; i++) {
6784 if (lkups[i].type == ICE_UDP_ILOS)
6786 else if (lkups[i].type == ICE_TCP_IL)
6788 else if (lkups[i].type == ICE_IPV6_OFOS)
6790 else if (lkups[i].type == ICE_VLAN_OFOS)
6792 else if (lkups[i].type == ICE_IPV4_OFOS &&
6793 lkups[i].h_u.ipv4_hdr.protocol ==
6794 ICE_IPV4_NVGRE_PROTO_ID &&
6795 lkups[i].m_u.ipv4_hdr.protocol ==
6798 else if (lkups[i].type == ICE_PPPOE &&
6799 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6800 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6801 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6804 else if (lkups[i].type == ICE_ETYPE_OL &&
6805 lkups[i].h_u.ethertype.ethtype_id ==
6806 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6807 lkups[i].m_u.ethertype.ethtype_id ==
6810 else if (lkups[i].type == ICE_IPV4_IL &&
6811 lkups[i].h_u.ipv4_hdr.protocol ==
6813 lkups[i].m_u.ipv4_hdr.protocol ==
6818 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6819 *pkt = dummy_ipv4_esp_pkt;
6820 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6821 *offsets = dummy_ipv4_esp_packet_offsets;
6825 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6826 *pkt = dummy_ipv6_esp_pkt;
6827 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6828 *offsets = dummy_ipv6_esp_packet_offsets;
6832 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6833 *pkt = dummy_ipv4_ah_pkt;
6834 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6835 *offsets = dummy_ipv4_ah_packet_offsets;
6839 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6840 *pkt = dummy_ipv6_ah_pkt;
6841 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6842 *offsets = dummy_ipv6_ah_packet_offsets;
6846 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6847 *pkt = dummy_ipv4_nat_pkt;
6848 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6849 *offsets = dummy_ipv4_nat_packet_offsets;
6853 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6854 *pkt = dummy_ipv6_nat_pkt;
6855 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6856 *offsets = dummy_ipv6_nat_packet_offsets;
6860 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6861 *pkt = dummy_ipv4_l2tpv3_pkt;
6862 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6863 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6867 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6868 *pkt = dummy_ipv6_l2tpv3_pkt;
6869 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6870 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6874 if (tun_type == ICE_SW_TUN_GTP) {
6875 *pkt = dummy_udp_gtp_packet;
6876 *pkt_len = sizeof(dummy_udp_gtp_packet);
6877 *offsets = dummy_udp_gtp_packet_offsets;
6881 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6882 *pkt = dummy_pppoe_ipv6_packet;
6883 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6884 *offsets = dummy_pppoe_packet_offsets;
6886 } else if (tun_type == ICE_SW_TUN_PPPOE ||
6887 tun_type == ICE_SW_TUN_PPPOE_PAY) {
6888 *pkt = dummy_pppoe_ipv4_packet;
6889 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6890 *offsets = dummy_pppoe_packet_offsets;
6894 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
6895 *pkt = dummy_pppoe_ipv4_packet;
6896 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6897 *offsets = dummy_pppoe_packet_ipv4_offsets;
6901 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
6902 *pkt = dummy_pppoe_ipv4_tcp_packet;
6903 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
6904 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
6908 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
6909 *pkt = dummy_pppoe_ipv4_udp_packet;
6910 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
6911 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
6915 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
6916 *pkt = dummy_pppoe_ipv6_packet;
6917 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6918 *offsets = dummy_pppoe_packet_ipv6_offsets;
6922 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
6923 *pkt = dummy_pppoe_ipv6_tcp_packet;
6924 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
6925 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
6929 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
6930 *pkt = dummy_pppoe_ipv6_udp_packet;
6931 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
6932 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
6936 if (tun_type == ICE_SW_IPV4_TCP) {
6937 *pkt = dummy_tcp_packet;
6938 *pkt_len = sizeof(dummy_tcp_packet);
6939 *offsets = dummy_tcp_packet_offsets;
6943 if (tun_type == ICE_SW_IPV4_UDP) {
6944 *pkt = dummy_udp_packet;
6945 *pkt_len = sizeof(dummy_udp_packet);
6946 *offsets = dummy_udp_packet_offsets;
6950 if (tun_type == ICE_SW_IPV6_TCP) {
6951 *pkt = dummy_tcp_ipv6_packet;
6952 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6953 *offsets = dummy_tcp_ipv6_packet_offsets;
6957 if (tun_type == ICE_SW_IPV6_UDP) {
6958 *pkt = dummy_udp_ipv6_packet;
6959 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6960 *offsets = dummy_udp_ipv6_packet_offsets;
6964 if (tun_type == ICE_ALL_TUNNELS) {
6965 *pkt = dummy_gre_udp_packet;
6966 *pkt_len = sizeof(dummy_gre_udp_packet);
6967 *offsets = dummy_gre_udp_packet_offsets;
6971 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6973 *pkt = dummy_gre_tcp_packet;
6974 *pkt_len = sizeof(dummy_gre_tcp_packet);
6975 *offsets = dummy_gre_tcp_packet_offsets;
6979 *pkt = dummy_gre_udp_packet;
6980 *pkt_len = sizeof(dummy_gre_udp_packet);
6981 *offsets = dummy_gre_udp_packet_offsets;
6985 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6986 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
6987 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
6988 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
6990 *pkt = dummy_udp_tun_tcp_packet;
6991 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6992 *offsets = dummy_udp_tun_tcp_packet_offsets;
6996 *pkt = dummy_udp_tun_udp_packet;
6997 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6998 *offsets = dummy_udp_tun_udp_packet_offsets;
7004 *pkt = dummy_vlan_udp_packet;
7005 *pkt_len = sizeof(dummy_vlan_udp_packet);
7006 *offsets = dummy_vlan_udp_packet_offsets;
7009 *pkt = dummy_udp_packet;
7010 *pkt_len = sizeof(dummy_udp_packet);
7011 *offsets = dummy_udp_packet_offsets;
7013 } else if (udp && ipv6) {
7015 *pkt = dummy_vlan_udp_ipv6_packet;
7016 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7017 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7020 *pkt = dummy_udp_ipv6_packet;
7021 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7022 *offsets = dummy_udp_ipv6_packet_offsets;
7024 } else if ((tcp && ipv6) || ipv6) {
7026 *pkt = dummy_vlan_tcp_ipv6_packet;
7027 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7028 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7031 *pkt = dummy_tcp_ipv6_packet;
7032 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7033 *offsets = dummy_tcp_ipv6_packet_offsets;
7038 *pkt = dummy_vlan_tcp_packet;
7039 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7040 *offsets = dummy_vlan_tcp_packet_offsets;
7042 *pkt = dummy_tcp_packet;
7043 *pkt_len = sizeof(dummy_tcp_packet);
7044 *offsets = dummy_tcp_packet_offsets;
7049 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7051 * @lkups: lookup elements or match criteria for the advanced recipe, one
7052 * structure per protocol header
7053 * @lkups_cnt: number of protocols
7054 * @s_rule: stores rule information from the match criteria
7055 * @dummy_pkt: dummy packet to fill according to filter match criteria
7056 * @pkt_len: packet length of dummy packet
7057 * @offsets: offset info for the dummy packet
7059 static enum ice_status
7060 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7061 struct ice_aqc_sw_rules_elem *s_rule,
7062 const u8 *dummy_pkt, u16 pkt_len,
7063 const struct ice_dummy_pkt_offsets *offsets)
7068 /* Start with a packet with a pre-defined/dummy content. Then, fill
7069 * in the header values to be looked up or matched.
7071 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7073 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7075 for (i = 0; i < lkups_cnt; i++) {
7076 enum ice_protocol_type type;
7077 u16 offset = 0, len = 0, j;
7080 /* find the start of this layer; it should be found since this
7081 * was already checked when search for the dummy packet
7083 type = lkups[i].type;
7084 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7085 if (type == offsets[j].type) {
7086 offset = offsets[j].offset;
7091 /* this should never happen in a correct calling sequence */
7093 return ICE_ERR_PARAM;
7095 switch (lkups[i].type) {
7098 len = sizeof(struct ice_ether_hdr);
7101 len = sizeof(struct ice_ethtype_hdr);
7104 len = sizeof(struct ice_vlan_hdr);
7108 len = sizeof(struct ice_ipv4_hdr);
7112 len = sizeof(struct ice_ipv6_hdr);
7117 len = sizeof(struct ice_l4_hdr);
7120 len = sizeof(struct ice_sctp_hdr);
7123 len = sizeof(struct ice_nvgre);
7128 len = sizeof(struct ice_udp_tnl_hdr);
7132 len = sizeof(struct ice_udp_gtp_hdr);
7135 len = sizeof(struct ice_pppoe_hdr);
7138 len = sizeof(struct ice_esp_hdr);
7141 len = sizeof(struct ice_nat_t_hdr);
7144 len = sizeof(struct ice_ah_hdr);
7147 len = sizeof(struct ice_l2tpv3_sess_hdr);
7150 return ICE_ERR_PARAM;
7153 /* the length should be a word multiple */
7154 if (len % ICE_BYTES_PER_WORD)
7157 /* We have the offset to the header start, the length, the
7158 * caller's header values and mask. Use this information to
7159 * copy the data into the dummy packet appropriately based on
7160 * the mask. Note that we need to only write the bits as
7161 * indicated by the mask to make sure we don't improperly write
7162 * over any significant packet data.
7164 for (j = 0; j < len / sizeof(u16); j++)
7165 if (((u16 *)&lkups[i].m_u)[j])
7166 ((u16 *)(pkt + offset))[j] =
7167 (((u16 *)(pkt + offset))[j] &
7168 ~((u16 *)&lkups[i].m_u)[j]) |
7169 (((u16 *)&lkups[i].h_u)[j] &
7170 ((u16 *)&lkups[i].m_u)[j]);
7173 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7179 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7180 * @hw: pointer to the hardware structure
7181 * @tun_type: tunnel type
7182 * @pkt: dummy packet to fill in
7183 * @offsets: offset info for the dummy packet
7185 static enum ice_status
7186 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7187 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7192 case ICE_SW_TUN_AND_NON_TUN:
7193 case ICE_SW_TUN_VXLAN_GPE:
7194 case ICE_SW_TUN_VXLAN:
7195 case ICE_SW_TUN_VXLAN_VLAN:
7196 case ICE_SW_TUN_UDP:
7197 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7201 case ICE_SW_TUN_GENEVE:
7202 case ICE_SW_TUN_GENEVE_VLAN:
7203 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7208 /* Nothing needs to be done for this tunnel type */
7212 /* Find the outer UDP protocol header and insert the port number */
7213 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7214 if (offsets[i].type == ICE_UDP_OF) {
7215 struct ice_l4_hdr *hdr;
7218 offset = offsets[i].offset;
7219 hdr = (struct ice_l4_hdr *)&pkt[offset];
7220 hdr->dst_port = CPU_TO_BE16(open_port);
7230 * ice_find_adv_rule_entry - Search a rule entry
7231 * @hw: pointer to the hardware structure
7232 * @lkups: lookup elements or match criteria for the advanced recipe, one
7233 * structure per protocol header
7234 * @lkups_cnt: number of protocols
7235 * @recp_id: recipe ID for which we are finding the rule
7236 * @rinfo: other information regarding the rule e.g. priority and action info
7238 * Helper function to search for a given advance rule entry
7239 * Returns pointer to entry storing the rule if found
7241 static struct ice_adv_fltr_mgmt_list_entry *
7242 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7243 u16 lkups_cnt, u16 recp_id,
7244 struct ice_adv_rule_info *rinfo)
7246 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7247 struct ice_switch_info *sw = hw->switch_info;
7250 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7251 ice_adv_fltr_mgmt_list_entry, list_entry) {
7252 bool lkups_matched = true;
7254 if (lkups_cnt != list_itr->lkups_cnt)
7256 for (i = 0; i < list_itr->lkups_cnt; i++)
7257 if (memcmp(&list_itr->lkups[i], &lkups[i],
7259 lkups_matched = false;
7262 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7263 rinfo->tun_type == list_itr->rule_info.tun_type &&
7271 * ice_adv_add_update_vsi_list
7272 * @hw: pointer to the hardware structure
7273 * @m_entry: pointer to current adv filter management list entry
7274 * @cur_fltr: filter information from the book keeping entry
7275 * @new_fltr: filter information with the new VSI to be added
7277 * Call AQ command to add or update previously created VSI list with new VSI.
7279 * Helper function to do book keeping associated with adding filter information
7280 * The algorithm to do the booking keeping is described below :
7281 * When a VSI needs to subscribe to a given advanced filter
7282 * if only one VSI has been added till now
7283 * Allocate a new VSI list and add two VSIs
7284 * to this list using switch rule command
7285 * Update the previously created switch rule with the
7286 * newly created VSI list ID
7287 * if a VSI list was previously created
7288 * Add the new VSI to the previously created VSI list set
7289 * using the update switch rule command
7291 static enum ice_status
7292 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7293 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7294 struct ice_adv_rule_info *cur_fltr,
7295 struct ice_adv_rule_info *new_fltr)
7297 enum ice_status status;
7298 u16 vsi_list_id = 0;
7300 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7301 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7302 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7303 return ICE_ERR_NOT_IMPL;
7305 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7306 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7307 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7308 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7309 return ICE_ERR_NOT_IMPL;
7311 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7312 /* Only one entry existed in the mapping and it was not already
7313 * a part of a VSI list. So, create a VSI list with the old and
7316 struct ice_fltr_info tmp_fltr;
7317 u16 vsi_handle_arr[2];
7319 /* A rule already exists with the new VSI being added */
7320 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7321 new_fltr->sw_act.fwd_id.hw_vsi_id)
7322 return ICE_ERR_ALREADY_EXISTS;
7324 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7325 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7326 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7332 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7333 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7334 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7335 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7336 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7337 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7339 /* Update the previous switch rule of "forward to VSI" to
7342 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7346 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7347 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7348 m_entry->vsi_list_info =
7349 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7352 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7354 if (!m_entry->vsi_list_info)
7357 /* A rule already exists with the new VSI being added */
7358 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7361 /* Update the previously created VSI list set with
7362 * the new VSI ID passed in
7364 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7366 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7368 ice_aqc_opc_update_sw_rules,
7370 /* update VSI list mapping info with new VSI ID */
7372 ice_set_bit(vsi_handle,
7373 m_entry->vsi_list_info->vsi_map);
7376 m_entry->vsi_count++;
7381 * ice_add_adv_rule - helper function to create an advanced switch rule
7382 * @hw: pointer to the hardware structure
7383 * @lkups: information on the words that needs to be looked up. All words
7384 * together makes one recipe
7385 * @lkups_cnt: num of entries in the lkups array
7386 * @rinfo: other information related to the rule that needs to be programmed
7387 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7388 * ignored is case of error.
7390 * This function can program only 1 rule at a time. The lkups is used to
7391 * describe the all the words that forms the "lookup" portion of the recipe.
7392 * These words can span multiple protocols. Callers to this function need to
7393 * pass in a list of protocol headers with lookup information along and mask
7394 * that determines which words are valid from the given protocol header.
7395 * rinfo describes other information related to this rule such as forwarding
7396 * IDs, priority of this rule, etc.
7399 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7400 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7401 struct ice_rule_query_data *added_entry)
7403 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7404 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7405 const struct ice_dummy_pkt_offsets *pkt_offsets;
7406 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7407 struct LIST_HEAD_TYPE *rule_head;
7408 struct ice_switch_info *sw;
7409 enum ice_status status;
7410 const u8 *pkt = NULL;
7416 /* Initialize profile to result index bitmap */
7417 if (!hw->switch_info->prof_res_bm_init) {
7418 hw->switch_info->prof_res_bm_init = 1;
7419 ice_init_prof_result_bm(hw);
7422 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7423 if (!prof_rule && !lkups_cnt)
7424 return ICE_ERR_PARAM;
7426 /* get # of words we need to match */
7428 for (i = 0; i < lkups_cnt; i++) {
7431 ptr = (u16 *)&lkups[i].m_u;
7432 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7438 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7439 return ICE_ERR_PARAM;
7441 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7442 return ICE_ERR_PARAM;
7445 /* make sure that we can locate a dummy packet */
7446 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7449 status = ICE_ERR_PARAM;
7450 goto err_ice_add_adv_rule;
7453 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7454 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7455 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7456 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7459 vsi_handle = rinfo->sw_act.vsi_handle;
7460 if (!ice_is_vsi_valid(hw, vsi_handle))
7461 return ICE_ERR_PARAM;
7463 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7464 rinfo->sw_act.fwd_id.hw_vsi_id =
7465 ice_get_hw_vsi_num(hw, vsi_handle);
7466 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7467 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7469 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7472 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7474 /* we have to add VSI to VSI_LIST and increment vsi_count.
7475 * Also Update VSI list so that we can change forwarding rule
7476 * if the rule already exists, we will check if it exists with
7477 * same vsi_id, if not then add it to the VSI list if it already
7478 * exists if not then create a VSI list and add the existing VSI
7479 * ID and the new VSI ID to the list
7480 * We will add that VSI to the list
7482 status = ice_adv_add_update_vsi_list(hw, m_entry,
7483 &m_entry->rule_info,
7486 added_entry->rid = rid;
7487 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7488 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7492 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7493 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7495 return ICE_ERR_NO_MEMORY;
7496 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7497 switch (rinfo->sw_act.fltr_act) {
7498 case ICE_FWD_TO_VSI:
7499 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7500 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7501 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7504 act |= ICE_SINGLE_ACT_TO_Q;
7505 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7506 ICE_SINGLE_ACT_Q_INDEX_M;
7508 case ICE_FWD_TO_QGRP:
7509 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7510 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7511 act |= ICE_SINGLE_ACT_TO_Q;
7512 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7513 ICE_SINGLE_ACT_Q_INDEX_M;
7514 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7515 ICE_SINGLE_ACT_Q_REGION_M;
7517 case ICE_DROP_PACKET:
7518 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7519 ICE_SINGLE_ACT_VALID_BIT;
7522 status = ICE_ERR_CFG;
7523 goto err_ice_add_adv_rule;
7526 /* set the rule LOOKUP type based on caller specified 'RX'
7527 * instead of hardcoding it to be either LOOKUP_TX/RX
7529 * for 'RX' set the source to be the port number
7530 * for 'TX' set the source to be the source HW VSI number (determined
7534 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7535 s_rule->pdata.lkup_tx_rx.src =
7536 CPU_TO_LE16(hw->port_info->lport);
7538 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7539 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7542 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7543 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7545 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7546 pkt_len, pkt_offsets);
7548 goto err_ice_add_adv_rule;
7550 if (rinfo->tun_type != ICE_NON_TUN &&
7551 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7552 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7553 s_rule->pdata.lkup_tx_rx.hdr,
7556 goto err_ice_add_adv_rule;
7559 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7560 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7563 goto err_ice_add_adv_rule;
7564 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7565 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7567 status = ICE_ERR_NO_MEMORY;
7568 goto err_ice_add_adv_rule;
7571 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7572 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7573 ICE_NONDMA_TO_NONDMA);
7574 if (!adv_fltr->lkups && !prof_rule) {
7575 status = ICE_ERR_NO_MEMORY;
7576 goto err_ice_add_adv_rule;
7579 adv_fltr->lkups_cnt = lkups_cnt;
7580 adv_fltr->rule_info = *rinfo;
7581 adv_fltr->rule_info.fltr_rule_id =
7582 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7583 sw = hw->switch_info;
7584 sw->recp_list[rid].adv_rule = true;
7585 rule_head = &sw->recp_list[rid].filt_rules;
7587 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7588 adv_fltr->vsi_count = 1;
7590 /* Add rule entry to book keeping list */
7591 LIST_ADD(&adv_fltr->list_entry, rule_head);
7593 added_entry->rid = rid;
7594 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7595 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7597 err_ice_add_adv_rule:
7598 if (status && adv_fltr) {
7599 ice_free(hw, adv_fltr->lkups);
7600 ice_free(hw, adv_fltr);
7603 ice_free(hw, s_rule);
7609 * ice_adv_rem_update_vsi_list
7610 * @hw: pointer to the hardware structure
7611 * @vsi_handle: VSI handle of the VSI to remove
7612 * @fm_list: filter management entry for which the VSI list management needs to
7615 static enum ice_status
7616 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7617 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7619 struct ice_vsi_list_map_info *vsi_list_info;
7620 enum ice_sw_lkup_type lkup_type;
7621 enum ice_status status;
7624 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7625 fm_list->vsi_count == 0)
7626 return ICE_ERR_PARAM;
7628 /* A rule with the VSI being removed does not exist */
7629 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7630 return ICE_ERR_DOES_NOT_EXIST;
7632 lkup_type = ICE_SW_LKUP_LAST;
7633 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7634 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7635 ice_aqc_opc_update_sw_rules,
7640 fm_list->vsi_count--;
7641 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7642 vsi_list_info = fm_list->vsi_list_info;
7643 if (fm_list->vsi_count == 1) {
7644 struct ice_fltr_info tmp_fltr;
7647 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7649 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7650 return ICE_ERR_OUT_OF_RANGE;
7652 /* Make sure VSI list is empty before removing it below */
7653 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7655 ice_aqc_opc_update_sw_rules,
7660 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7661 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7662 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7663 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7664 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7665 tmp_fltr.fwd_id.hw_vsi_id =
7666 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7667 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7668 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7669 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7671 /* Update the previous switch rule of "MAC forward to VSI" to
7672 * "MAC fwd to VSI list"
7674 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7676 ice_debug(hw, ICE_DBG_SW,
7677 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7678 tmp_fltr.fwd_id.hw_vsi_id, status);
7681 fm_list->vsi_list_info->ref_cnt--;
7683 /* Remove the VSI list since it is no longer used */
7684 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7686 ice_debug(hw, ICE_DBG_SW,
7687 "Failed to remove VSI list %d, error %d\n",
7688 vsi_list_id, status);
7692 LIST_DEL(&vsi_list_info->list_entry);
7693 ice_free(hw, vsi_list_info);
7694 fm_list->vsi_list_info = NULL;
7701 * ice_rem_adv_rule - removes existing advanced switch rule
7702 * @hw: pointer to the hardware structure
7703 * @lkups: information on the words that needs to be looked up. All words
7704 * together makes one recipe
7705 * @lkups_cnt: num of entries in the lkups array
7706 * @rinfo: Its the pointer to the rule information for the rule
7708 * This function can be used to remove 1 rule at a time. The lkups is
7709 * used to describe all the words that forms the "lookup" portion of the
7710 * rule. These words can span multiple protocols. Callers to this function
7711 * need to pass in a list of protocol headers with lookup information along
7712 * and mask that determines which words are valid from the given protocol
7713 * header. rinfo describes other information related to this rule such as
7714 * forwarding IDs, priority of this rule, etc.
7717 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7718 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7720 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7721 struct ice_prot_lkup_ext lkup_exts;
7722 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7723 enum ice_status status = ICE_SUCCESS;
7724 bool remove_rule = false;
7725 u16 i, rid, vsi_handle;
7727 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7728 for (i = 0; i < lkups_cnt; i++) {
7731 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7734 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7739 /* Create any special protocol/offset pairs, such as looking at tunnel
7740 * bits by extracting metadata
7742 status = ice_add_special_words(rinfo, &lkup_exts);
7746 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7747 /* If did not find a recipe that match the existing criteria */
7748 if (rid == ICE_MAX_NUM_RECIPES)
7749 return ICE_ERR_PARAM;
7751 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7752 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7753 /* the rule is already removed */
7756 ice_acquire_lock(rule_lock);
7757 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7759 } else if (list_elem->vsi_count > 1) {
7760 remove_rule = false;
7761 vsi_handle = rinfo->sw_act.vsi_handle;
7762 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7764 vsi_handle = rinfo->sw_act.vsi_handle;
7765 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7767 ice_release_lock(rule_lock);
7770 if (list_elem->vsi_count == 0)
7773 ice_release_lock(rule_lock);
7775 struct ice_aqc_sw_rules_elem *s_rule;
7778 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7780 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7783 return ICE_ERR_NO_MEMORY;
7784 s_rule->pdata.lkup_tx_rx.act = 0;
7785 s_rule->pdata.lkup_tx_rx.index =
7786 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7787 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7788 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7790 ice_aqc_opc_remove_sw_rules, NULL);
7791 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
7792 struct ice_switch_info *sw = hw->switch_info;
7794 ice_acquire_lock(rule_lock);
7795 LIST_DEL(&list_elem->list_entry);
7796 ice_free(hw, list_elem->lkups);
7797 ice_free(hw, list_elem);
7798 ice_release_lock(rule_lock);
7799 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
7800 sw->recp_list[rid].adv_rule = false;
7802 ice_free(hw, s_rule);
7808 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7809 * @hw: pointer to the hardware structure
7810 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7812 * This function is used to remove 1 rule at a time. The removal is based on
7813 * the remove_entry parameter. This function will remove rule for a given
7814 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7817 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7818 struct ice_rule_query_data *remove_entry)
7820 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7821 struct LIST_HEAD_TYPE *list_head;
7822 struct ice_adv_rule_info rinfo;
7823 struct ice_switch_info *sw;
7825 sw = hw->switch_info;
7826 if (!sw->recp_list[remove_entry->rid].recp_created)
7827 return ICE_ERR_PARAM;
7828 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7829 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7831 if (list_itr->rule_info.fltr_rule_id ==
7832 remove_entry->rule_id) {
7833 rinfo = list_itr->rule_info;
7834 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7835 return ice_rem_adv_rule(hw, list_itr->lkups,
7836 list_itr->lkups_cnt, &rinfo);
7839 /* either list is empty or unable to find rule */
7840 return ICE_ERR_DOES_NOT_EXIST;
7844 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7846 * @hw: pointer to the hardware structure
7847 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7849 * This function is used to remove all the rules for a given VSI and as soon
7850 * as removing a rule fails, it will return immediately with the error code,
7851 * else it will return ICE_SUCCESS
7853 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7855 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7856 struct ice_vsi_list_map_info *map_info;
7857 struct LIST_HEAD_TYPE *list_head;
7858 struct ice_adv_rule_info rinfo;
7859 struct ice_switch_info *sw;
7860 enum ice_status status;
7861 u16 vsi_list_id = 0;
7864 sw = hw->switch_info;
7865 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7866 if (!sw->recp_list[rid].recp_created)
7868 if (!sw->recp_list[rid].adv_rule)
7870 list_head = &sw->recp_list[rid].filt_rules;
7872 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7873 ice_adv_fltr_mgmt_list_entry, list_entry) {
7874 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7879 rinfo = list_itr->rule_info;
7880 rinfo.sw_act.vsi_handle = vsi_handle;
7881 status = ice_rem_adv_rule(hw, list_itr->lkups,
7882 list_itr->lkups_cnt, &rinfo);
7892 * ice_replay_fltr - Replay all the filters stored by a specific list head
7893 * @hw: pointer to the hardware structure
7894 * @list_head: list for which filters needs to be replayed
7895 * @recp_id: Recipe ID for which rules need to be replayed
7897 static enum ice_status
7898 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7900 struct ice_fltr_mgmt_list_entry *itr;
7901 enum ice_status status = ICE_SUCCESS;
7902 struct ice_sw_recipe *recp_list;
7903 u8 lport = hw->port_info->lport;
7904 struct LIST_HEAD_TYPE l_head;
7906 if (LIST_EMPTY(list_head))
7909 recp_list = &hw->switch_info->recp_list[recp_id];
7910 /* Move entries from the given list_head to a temporary l_head so that
7911 * they can be replayed. Otherwise when trying to re-add the same
7912 * filter, the function will return already exists
7914 LIST_REPLACE_INIT(list_head, &l_head);
7916 /* Mark the given list_head empty by reinitializing it so filters
7917 * could be added again by *handler
7919 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7921 struct ice_fltr_list_entry f_entry;
7923 f_entry.fltr_info = itr->fltr_info;
7924 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7925 status = ice_add_rule_internal(hw, recp_list, lport,
7927 if (status != ICE_SUCCESS)
7932 /* Add a filter per VSI separately */
7937 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7939 if (!ice_is_vsi_valid(hw, vsi_handle))
7942 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7943 f_entry.fltr_info.vsi_handle = vsi_handle;
7944 f_entry.fltr_info.fwd_id.hw_vsi_id =
7945 ice_get_hw_vsi_num(hw, vsi_handle);
7946 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7947 if (recp_id == ICE_SW_LKUP_VLAN)
7948 status = ice_add_vlan_internal(hw, recp_list,
7951 status = ice_add_rule_internal(hw, recp_list,
7954 if (status != ICE_SUCCESS)
7959 /* Clear the filter management list */
7960 ice_rem_sw_rule_info(hw, &l_head);
7965 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7966 * @hw: pointer to the hardware structure
7968 * NOTE: This function does not clean up partially added filters on error.
7969 * It is up to caller of the function to issue a reset or fail early.
7971 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7973 struct ice_switch_info *sw = hw->switch_info;
7974 enum ice_status status = ICE_SUCCESS;
7977 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7978 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7980 status = ice_replay_fltr(hw, i, head);
7981 if (status != ICE_SUCCESS)
7988 * ice_replay_vsi_fltr - Replay filters for requested VSI
7989 * @hw: pointer to the hardware structure
7990 * @pi: pointer to port information structure
7991 * @sw: pointer to switch info struct for which function replays filters
7992 * @vsi_handle: driver VSI handle
7993 * @recp_id: Recipe ID for which rules need to be replayed
7994 * @list_head: list for which filters need to be replayed
7996 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7997 * It is required to pass valid VSI handle.
7999 static enum ice_status
8000 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8001 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8002 struct LIST_HEAD_TYPE *list_head)
8004 struct ice_fltr_mgmt_list_entry *itr;
8005 enum ice_status status = ICE_SUCCESS;
8006 struct ice_sw_recipe *recp_list;
8009 if (LIST_EMPTY(list_head))
8011 recp_list = &sw->recp_list[recp_id];
8012 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8014 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8016 struct ice_fltr_list_entry f_entry;
8018 f_entry.fltr_info = itr->fltr_info;
8019 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8020 itr->fltr_info.vsi_handle == vsi_handle) {
8021 /* update the src in case it is VSI num */
8022 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8023 f_entry.fltr_info.src = hw_vsi_id;
8024 status = ice_add_rule_internal(hw, recp_list,
8027 if (status != ICE_SUCCESS)
8031 if (!itr->vsi_list_info ||
8032 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8034 /* Clearing it so that the logic can add it back */
8035 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8036 f_entry.fltr_info.vsi_handle = vsi_handle;
8037 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8038 /* update the src in case it is VSI num */
8039 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8040 f_entry.fltr_info.src = hw_vsi_id;
8041 if (recp_id == ICE_SW_LKUP_VLAN)
8042 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8044 status = ice_add_rule_internal(hw, recp_list,
8047 if (status != ICE_SUCCESS)
8055 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8056 * @hw: pointer to the hardware structure
8057 * @vsi_handle: driver VSI handle
8058 * @list_head: list for which filters need to be replayed
8060 * Replay the advanced rule for the given VSI.
8062 static enum ice_status
8063 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8064 struct LIST_HEAD_TYPE *list_head)
8066 struct ice_rule_query_data added_entry = { 0 };
8067 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8068 enum ice_status status = ICE_SUCCESS;
8070 if (LIST_EMPTY(list_head))
8072 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8074 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8075 u16 lk_cnt = adv_fltr->lkups_cnt;
8077 if (vsi_handle != rinfo->sw_act.vsi_handle)
8079 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8088 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8089 * @hw: pointer to the hardware structure
8090 * @pi: pointer to port information structure
8091 * @vsi_handle: driver VSI handle
8093 * Replays filters for requested VSI via vsi_handle.
8096 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8099 struct ice_switch_info *sw = hw->switch_info;
8100 enum ice_status status;
8103 /* Update the recipes that were created */
8104 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8105 struct LIST_HEAD_TYPE *head;
8107 head = &sw->recp_list[i].filt_replay_rules;
8108 if (!sw->recp_list[i].adv_rule)
8109 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8112 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8113 if (status != ICE_SUCCESS)
8121 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8122 * @hw: pointer to the HW struct
8123 * @sw: pointer to switch info struct for which function removes filters
8125 * Deletes the filter replay rules for given switch
8127 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8134 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8135 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8136 struct LIST_HEAD_TYPE *l_head;
8138 l_head = &sw->recp_list[i].filt_replay_rules;
8139 if (!sw->recp_list[i].adv_rule)
8140 ice_rem_sw_rule_info(hw, l_head);
8142 ice_rem_adv_rule_info(hw, l_head);
8148 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8149 * @hw: pointer to the HW struct
8151 * Deletes the filter replay rules.
8153 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8155 ice_rm_sw_replay_rule_info(hw, hw->switch_info);