1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
509 { ICE_IPV4_OFOS, 14 },
512 { ICE_PROTOCOL_LAST, 0 },
515 static const u8 dummy_udp_gtp_packet[] = {
516 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
521 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x11, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 0x00, 0x1c, 0x00, 0x00,
530 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x00, 0x85,
534 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 0x00, 0x00, 0x00, 0x00,
538 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
540 { ICE_ETYPE_OL, 12 },
541 { ICE_VLAN_OFOS, 14},
543 { ICE_PROTOCOL_LAST, 0 },
546 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
548 { ICE_ETYPE_OL, 12 },
549 { ICE_VLAN_OFOS, 14},
551 { ICE_IPV4_OFOS, 26 },
552 { ICE_PROTOCOL_LAST, 0 },
555 static const u8 dummy_pppoe_ipv4_packet[] = {
556 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x81, 0x00, /* ICE_ETYPE_OL 12 */
562 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
564 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
567 0x00, 0x21, /* PPP Link Layer 24 */
569 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
579 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
581 { ICE_ETYPE_OL, 12 },
582 { ICE_VLAN_OFOS, 14},
584 { ICE_IPV4_OFOS, 26 },
586 { ICE_PROTOCOL_LAST, 0 },
589 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
590 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
591 0x00, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
594 0x81, 0x00, /* ICE_ETYPE_OL 12 */
596 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
598 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
601 0x00, 0x21, /* PPP Link Layer 24 */
603 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
604 0x00, 0x01, 0x00, 0x00,
605 0x00, 0x06, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x00, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
610 0x00, 0x00, 0x00, 0x00,
611 0x00, 0x00, 0x00, 0x00,
612 0x50, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
619 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
621 { ICE_ETYPE_OL, 12 },
622 { ICE_VLAN_OFOS, 14},
624 { ICE_IPV4_OFOS, 26 },
625 { ICE_UDP_ILOS, 46 },
626 { ICE_PROTOCOL_LAST, 0 },
629 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
630 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
631 0x00, 0x00, 0x00, 0x00,
632 0x00, 0x00, 0x00, 0x00,
634 0x81, 0x00, /* ICE_ETYPE_OL 12 */
636 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
638 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
641 0x00, 0x21, /* PPP Link Layer 24 */
643 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
644 0x00, 0x01, 0x00, 0x00,
645 0x00, 0x11, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
650 0x00, 0x08, 0x00, 0x00,
652 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
655 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
657 { ICE_ETYPE_OL, 12 },
658 { ICE_VLAN_OFOS, 14},
660 { ICE_IPV6_OFOS, 26 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_pppoe_ipv6_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
669 0x81, 0x00, /* ICE_ETYPE_OL 12 */
671 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
673 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
676 0x00, 0x57, /* PPP Link Layer 24 */
678 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
679 0x00, 0x00, 0x3b, 0x00,
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
685 0x00, 0x00, 0x00, 0x00,
686 0x00, 0x00, 0x00, 0x00,
687 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
693 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
695 { ICE_ETYPE_OL, 12 },
696 { ICE_VLAN_OFOS, 14},
698 { ICE_IPV6_OFOS, 26 },
700 { ICE_PROTOCOL_LAST, 0 },
703 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
704 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
705 0x00, 0x00, 0x00, 0x00,
706 0x00, 0x00, 0x00, 0x00,
708 0x81, 0x00, /* ICE_ETYPE_OL 12 */
710 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
712 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
715 0x00, 0x57, /* PPP Link Layer 24 */
717 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
718 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
729 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x00, 0x00,
731 0x50, 0x00, 0x00, 0x00,
732 0x00, 0x00, 0x00, 0x00,
734 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
738 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
740 { ICE_ETYPE_OL, 12 },
741 { ICE_VLAN_OFOS, 14},
743 { ICE_IPV6_OFOS, 26 },
744 { ICE_UDP_ILOS, 66 },
745 { ICE_PROTOCOL_LAST, 0 },
748 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
749 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
753 0x81, 0x00, /* ICE_ETYPE_OL 12 */
755 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
757 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
760 0x00, 0x57, /* PPP Link Layer 24 */
762 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
763 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
774 0x00, 0x08, 0x00, 0x00,
776 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
779 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
781 { ICE_IPV4_OFOS, 14 },
783 { ICE_PROTOCOL_LAST, 0 },
786 static const u8 dummy_ipv4_esp_pkt[] = {
787 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
788 0x00, 0x00, 0x00, 0x00,
789 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
793 0x00, 0x00, 0x40, 0x00,
794 0x40, 0x32, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
803 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
805 { ICE_IPV6_OFOS, 14 },
807 { ICE_PROTOCOL_LAST, 0 },
810 static const u8 dummy_ipv6_esp_pkt[] = {
811 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
812 0x00, 0x00, 0x00, 0x00,
813 0x00, 0x00, 0x00, 0x00,
816 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
817 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
832 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
834 { ICE_IPV4_OFOS, 14 },
836 { ICE_PROTOCOL_LAST, 0 },
839 static const u8 dummy_ipv4_ah_pkt[] = {
840 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
841 0x00, 0x00, 0x00, 0x00,
842 0x00, 0x00, 0x00, 0x00,
845 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
846 0x00, 0x00, 0x40, 0x00,
847 0x40, 0x33, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
857 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
859 { ICE_IPV6_OFOS, 14 },
861 { ICE_PROTOCOL_LAST, 0 },
864 static const u8 dummy_ipv6_ah_pkt[] = {
865 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
866 0x00, 0x00, 0x00, 0x00,
867 0x00, 0x00, 0x00, 0x00,
870 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
871 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
887 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
889 { ICE_IPV4_OFOS, 14 },
890 { ICE_UDP_ILOS, 34 },
892 { ICE_PROTOCOL_LAST, 0 },
895 static const u8 dummy_ipv4_nat_pkt[] = {
896 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
897 0x00, 0x00, 0x00, 0x00,
898 0x00, 0x00, 0x00, 0x00,
901 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
902 0x00, 0x00, 0x40, 0x00,
903 0x40, 0x11, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
908 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
915 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
917 { ICE_IPV6_OFOS, 14 },
918 { ICE_UDP_ILOS, 54 },
920 { ICE_PROTOCOL_LAST, 0 },
923 static const u8 dummy_ipv6_nat_pkt[] = {
924 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
929 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
930 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x00, 0x00,
936 0x00, 0x00, 0x00, 0x00,
937 0x00, 0x00, 0x00, 0x00,
938 0x00, 0x00, 0x00, 0x00,
940 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
941 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, 0x00, 0x00,
945 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
949 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
951 { ICE_IPV4_OFOS, 14 },
953 { ICE_PROTOCOL_LAST, 0 },
956 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
957 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
958 0x00, 0x00, 0x00, 0x00,
959 0x00, 0x00, 0x00, 0x00,
962 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
963 0x00, 0x00, 0x40, 0x00,
964 0x40, 0x73, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
974 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
976 { ICE_IPV6_OFOS, 14 },
978 { ICE_PROTOCOL_LAST, 0 },
981 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
982 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
983 0x00, 0x00, 0x00, 0x00,
984 0x00, 0x00, 0x00, 0x00,
987 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
988 0x00, 0x0c, 0x73, 0x40,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
998 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1004 /* this is a recipe to profile association bitmap */
1005 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1006 ICE_MAX_NUM_PROFILES);
1008 /* this is a profile to recipe association bitmap */
1009 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1010 ICE_MAX_NUM_RECIPES);
1012 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1015 * ice_collect_result_idx - copy result index values
1016 * @buf: buffer that contains the result index
1017 * @recp: the recipe struct to copy data into
1019 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1020 struct ice_sw_recipe *recp)
1022 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1023 ice_set_bit(buf->content.result_indx &
1024 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1028 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1029 * @rid: recipe ID that we are populating
1031 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
1033 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1034 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1035 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1036 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1037 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1038 u16 i, j, profile_num = 0;
1039 bool non_tun_valid = false;
1040 bool pppoe_valid = false;
1041 bool vxlan_valid = false;
1042 bool gre_valid = false;
1043 bool gtp_valid = false;
1044 bool flag_valid = false;
1046 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1047 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1052 for (i = 0; i < 12; i++) {
1053 if (gre_profile[i] == j)
1057 for (i = 0; i < 12; i++) {
1058 if (vxlan_profile[i] == j)
1062 for (i = 0; i < 7; i++) {
1063 if (pppoe_profile[i] == j)
1067 for (i = 0; i < 6; i++) {
1068 if (non_tun_profile[i] == j)
1069 non_tun_valid = true;
1072 if (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1073 j <= ICE_PROFID_IPV6_GTPU_IPV6_OTHER)
1076 if (j >= ICE_PROFID_IPV4_ESP &&
1077 j <= ICE_PROFID_IPV6_PFCP_SESSION)
1081 if (!non_tun_valid && vxlan_valid)
1082 tun_type = ICE_SW_TUN_VXLAN;
1083 else if (!non_tun_valid && gre_valid)
1084 tun_type = ICE_SW_TUN_NVGRE;
1085 else if (!non_tun_valid && pppoe_valid)
1086 tun_type = ICE_SW_TUN_PPPOE;
1087 else if (!non_tun_valid && gtp_valid)
1088 tun_type = ICE_SW_TUN_GTP;
1089 else if (non_tun_valid &&
1090 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1091 tun_type = ICE_SW_TUN_AND_NON_TUN;
1092 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1094 tun_type = ICE_NON_TUN;
1096 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1097 i = ice_is_bit_set(recipe_to_profile[rid],
1098 ICE_PROFID_PPPOE_IPV4_OTHER);
1099 j = ice_is_bit_set(recipe_to_profile[rid],
1100 ICE_PROFID_PPPOE_IPV6_OTHER);
1102 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1104 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1107 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1108 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1109 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1111 case ICE_PROFID_IPV4_TCP:
1112 tun_type = ICE_SW_IPV4_TCP;
1114 case ICE_PROFID_IPV4_UDP:
1115 tun_type = ICE_SW_IPV4_UDP;
1117 case ICE_PROFID_IPV6_TCP:
1118 tun_type = ICE_SW_IPV6_TCP;
1120 case ICE_PROFID_IPV6_UDP:
1121 tun_type = ICE_SW_IPV6_UDP;
1123 case ICE_PROFID_PPPOE_PAY:
1124 tun_type = ICE_SW_TUN_PPPOE_PAY;
1126 case ICE_PROFID_PPPOE_IPV4_TCP:
1127 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1129 case ICE_PROFID_PPPOE_IPV4_UDP:
1130 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1132 case ICE_PROFID_PPPOE_IPV4_OTHER:
1133 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1135 case ICE_PROFID_PPPOE_IPV6_TCP:
1136 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1138 case ICE_PROFID_PPPOE_IPV6_UDP:
1139 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1141 case ICE_PROFID_PPPOE_IPV6_OTHER:
1142 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1144 case ICE_PROFID_IPV4_ESP:
1145 tun_type = ICE_SW_TUN_IPV4_ESP;
1147 case ICE_PROFID_IPV6_ESP:
1148 tun_type = ICE_SW_TUN_IPV6_ESP;
1150 case ICE_PROFID_IPV4_AH:
1151 tun_type = ICE_SW_TUN_IPV4_AH;
1153 case ICE_PROFID_IPV6_AH:
1154 tun_type = ICE_SW_TUN_IPV6_AH;
1156 case ICE_PROFID_IPV4_NAT_T:
1157 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1159 case ICE_PROFID_IPV6_NAT_T:
1160 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1162 case ICE_PROFID_IPV4_PFCP_NODE:
1164 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1166 case ICE_PROFID_IPV6_PFCP_NODE:
1168 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1170 case ICE_PROFID_IPV4_PFCP_SESSION:
1172 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1174 case ICE_PROFID_IPV6_PFCP_SESSION:
1176 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1178 case ICE_PROFID_MAC_IPV4_L2TPV3:
1179 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1181 case ICE_PROFID_MAC_IPV6_L2TPV3:
1182 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1197 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1198 * @hw: pointer to hardware structure
1199 * @recps: struct that we need to populate
1200 * @rid: recipe ID that we are populating
1201 * @refresh_required: true if we should get recipe to profile mapping from FW
1203 * This function is used to populate all the necessary entries into our
1204 * bookkeeping so that we have a current list of all the recipes that are
1205 * programmed in the firmware.
1207 static enum ice_status
1208 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1209 bool *refresh_required)
1211 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1212 struct ice_aqc_recipe_data_elem *tmp;
1213 u16 num_recps = ICE_MAX_NUM_RECIPES;
1214 struct ice_prot_lkup_ext *lkup_exts;
1215 enum ice_status status;
1219 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1221 /* we need a buffer big enough to accommodate all the recipes */
1222 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1223 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1225 return ICE_ERR_NO_MEMORY;
1227 tmp[0].recipe_indx = rid;
1228 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1229 /* non-zero status meaning recipe doesn't exist */
1233 /* Get recipe to profile map so that we can get the fv from lkups that
1234 * we read for a recipe from FW. Since we want to minimize the number of
1235 * times we make this FW call, just make one call and cache the copy
1236 * until a new recipe is added. This operation is only required the
1237 * first time to get the changes from FW. Then to search existing
1238 * entries we don't need to update the cache again until another recipe
1241 if (*refresh_required) {
1242 ice_get_recp_to_prof_map(hw);
1243 *refresh_required = false;
1246 /* Start populating all the entries for recps[rid] based on lkups from
1247 * firmware. Note that we are only creating the root recipe in our
1250 lkup_exts = &recps[rid].lkup_exts;
1252 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1253 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1254 struct ice_recp_grp_entry *rg_entry;
1255 u8 i, prof, idx, prot = 0;
1259 rg_entry = (struct ice_recp_grp_entry *)
1260 ice_malloc(hw, sizeof(*rg_entry));
1262 status = ICE_ERR_NO_MEMORY;
1266 idx = root_bufs.recipe_indx;
1267 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1269 /* Mark all result indices in this chain */
1270 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1271 ice_set_bit(root_bufs.content.result_indx &
1272 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1274 /* get the first profile that is associated with rid */
1275 prof = ice_find_first_bit(recipe_to_profile[idx],
1276 ICE_MAX_NUM_PROFILES);
1277 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1278 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1280 rg_entry->fv_idx[i] = lkup_indx;
1281 rg_entry->fv_mask[i] =
1282 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1284 /* If the recipe is a chained recipe then all its
1285 * child recipe's result will have a result index.
1286 * To fill fv_words we should not use those result
1287 * index, we only need the protocol ids and offsets.
1288 * We will skip all the fv_idx which stores result
1289 * index in them. We also need to skip any fv_idx which
1290 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1291 * valid offset value.
1293 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1294 rg_entry->fv_idx[i]) ||
1295 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1296 rg_entry->fv_idx[i] == 0)
1299 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1300 rg_entry->fv_idx[i], &prot, &off);
1301 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1302 lkup_exts->fv_words[fv_word_idx].off = off;
1303 lkup_exts->field_mask[fv_word_idx] =
1304 rg_entry->fv_mask[i];
1307 /* populate rg_list with the data from the child entry of this
1310 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1312 /* Propagate some data to the recipe database */
1313 recps[idx].is_root = !!is_root;
1314 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1315 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1316 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1317 recps[idx].chain_idx = root_bufs.content.result_indx &
1318 ~ICE_AQ_RECIPE_RESULT_EN;
1319 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1321 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1327 /* Only do the following for root recipes entries */
1328 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1329 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1330 recps[idx].root_rid = root_bufs.content.rid &
1331 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1332 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1335 /* Complete initialization of the root recipe entry */
1336 lkup_exts->n_val_words = fv_word_idx;
1337 recps[rid].big_recp = (num_recps > 1);
1338 recps[rid].n_grp_count = (u8)num_recps;
1339 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
1340 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1341 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1342 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1343 if (!recps[rid].root_buf)
1346 /* Copy result indexes */
1347 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1348 recps[rid].recp_created = true;
1356 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1357 * @hw: pointer to hardware structure
1359 * This function is used to populate recipe_to_profile matrix where index to
1360 * this array is the recipe ID and the element is the mapping of which profiles
1361 * is this recipe mapped to.
1363 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1365 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1368 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1371 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1372 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1373 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1375 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1376 ICE_MAX_NUM_RECIPES);
1377 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
1378 if (ice_is_bit_set(r_bitmap, j))
1379 ice_set_bit(i, recipe_to_profile[j]);
1384 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1385 * @hw: pointer to the HW struct
1386 * @recp_list: pointer to sw recipe list
1388 * Allocate memory for the entire recipe table and initialize the structures/
1389 * entries corresponding to basic recipes.
1392 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1394 struct ice_sw_recipe *recps;
1397 recps = (struct ice_sw_recipe *)
1398 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1400 return ICE_ERR_NO_MEMORY;
1402 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1403 recps[i].root_rid = i;
1404 INIT_LIST_HEAD(&recps[i].filt_rules);
1405 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1406 INIT_LIST_HEAD(&recps[i].rg_list);
1407 ice_init_lock(&recps[i].filt_rule_lock);
1416 * ice_aq_get_sw_cfg - get switch configuration
1417 * @hw: pointer to the hardware structure
1418 * @buf: pointer to the result buffer
1419 * @buf_size: length of the buffer available for response
1420 * @req_desc: pointer to requested descriptor
1421 * @num_elems: pointer to number of elements
1422 * @cd: pointer to command details structure or NULL
1424 * Get switch configuration (0x0200) to be placed in 'buff'.
1425 * This admin command returns information such as initial VSI/port number
1426 * and switch ID it belongs to.
1428 * NOTE: *req_desc is both an input/output parameter.
1429 * The caller of this function first calls this function with *request_desc set
1430 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1431 * configuration information has been returned; if non-zero (meaning not all
1432 * the information was returned), the caller should call this function again
1433 * with *req_desc set to the previous value returned by f/w to get the
1434 * next block of switch configuration information.
1436 * *num_elems is output only parameter. This reflects the number of elements
1437 * in response buffer. The caller of this function to use *num_elems while
1438 * parsing the response buffer.
1440 static enum ice_status
1441 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
1442 u16 buf_size, u16 *req_desc, u16 *num_elems,
1443 struct ice_sq_cd *cd)
1445 struct ice_aqc_get_sw_cfg *cmd;
1446 enum ice_status status;
1447 struct ice_aq_desc desc;
1449 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1450 cmd = &desc.params.get_sw_conf;
1451 cmd->element = CPU_TO_LE16(*req_desc);
1453 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1455 *req_desc = LE16_TO_CPU(cmd->element);
1456 *num_elems = LE16_TO_CPU(cmd->num_elems);
1463 * ice_alloc_sw - allocate resources specific to switch
1464 * @hw: pointer to the HW struct
1465 * @ena_stats: true to turn on VEB stats
1466 * @shared_res: true for shared resource, false for dedicated resource
1467 * @sw_id: switch ID returned
1468 * @counter_id: VEB counter ID returned
1470 * allocates switch resources (SWID and VEB counter) (0x0208)
1473 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1476 struct ice_aqc_alloc_free_res_elem *sw_buf;
1477 struct ice_aqc_res_elem *sw_ele;
1478 enum ice_status status;
1481 buf_len = sizeof(*sw_buf);
1482 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1483 ice_malloc(hw, buf_len);
1485 return ICE_ERR_NO_MEMORY;
1487 /* Prepare buffer for switch ID.
1488 * The number of resource entries in buffer is passed as 1 since only a
1489 * single switch/VEB instance is allocated, and hence a single sw_id
1492 sw_buf->num_elems = CPU_TO_LE16(1);
1494 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1495 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1496 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1498 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1499 ice_aqc_opc_alloc_res, NULL);
1502 goto ice_alloc_sw_exit;
1504 sw_ele = &sw_buf->elem[0];
1505 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1508 /* Prepare buffer for VEB Counter */
1509 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1510 struct ice_aqc_alloc_free_res_elem *counter_buf;
1511 struct ice_aqc_res_elem *counter_ele;
1513 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1514 ice_malloc(hw, buf_len);
1516 status = ICE_ERR_NO_MEMORY;
1517 goto ice_alloc_sw_exit;
1520 /* The number of resource entries in buffer is passed as 1 since
1521 * only a single switch/VEB instance is allocated, and hence a
1522 * single VEB counter is requested.
1524 counter_buf->num_elems = CPU_TO_LE16(1);
1525 counter_buf->res_type =
1526 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1527 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1528 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1532 ice_free(hw, counter_buf);
1533 goto ice_alloc_sw_exit;
1535 counter_ele = &counter_buf->elem[0];
1536 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1537 ice_free(hw, counter_buf);
1541 ice_free(hw, sw_buf);
1546 * ice_free_sw - free resources specific to switch
1547 * @hw: pointer to the HW struct
1548 * @sw_id: switch ID returned
1549 * @counter_id: VEB counter ID returned
1551 * free switch resources (SWID and VEB counter) (0x0209)
1553 * NOTE: This function frees multiple resources. It continues
1554 * releasing other resources even after it encounters error.
1555 * The error code returned is the last error it encountered.
1557 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1559 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1560 enum ice_status status, ret_status;
1563 buf_len = sizeof(*sw_buf);
1564 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1565 ice_malloc(hw, buf_len);
1567 return ICE_ERR_NO_MEMORY;
1569 /* Prepare buffer to free for switch ID res.
1570 * The number of resource entries in buffer is passed as 1 since only a
1571 * single switch/VEB instance is freed, and hence a single sw_id
1574 sw_buf->num_elems = CPU_TO_LE16(1);
1575 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1576 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1578 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1579 ice_aqc_opc_free_res, NULL);
1582 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1584 /* Prepare buffer to free for VEB Counter resource */
1585 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1586 ice_malloc(hw, buf_len);
1588 ice_free(hw, sw_buf);
1589 return ICE_ERR_NO_MEMORY;
1592 /* The number of resource entries in buffer is passed as 1 since only a
1593 * single switch/VEB instance is freed, and hence a single VEB counter
1596 counter_buf->num_elems = CPU_TO_LE16(1);
1597 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1598 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1600 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1601 ice_aqc_opc_free_res, NULL);
1603 ice_debug(hw, ICE_DBG_SW,
1604 "VEB counter resource could not be freed\n");
1605 ret_status = status;
1608 ice_free(hw, counter_buf);
1609 ice_free(hw, sw_buf);
1615 * @hw: pointer to the HW struct
1616 * @vsi_ctx: pointer to a VSI context struct
1617 * @cd: pointer to command details structure or NULL
1619 * Add a VSI context to the hardware (0x0210)
1622 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1623 struct ice_sq_cd *cd)
1625 struct ice_aqc_add_update_free_vsi_resp *res;
1626 struct ice_aqc_add_get_update_free_vsi *cmd;
1627 struct ice_aq_desc desc;
1628 enum ice_status status;
1630 cmd = &desc.params.vsi_cmd;
1631 res = &desc.params.add_update_free_vsi_res;
1633 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1635 if (!vsi_ctx->alloc_from_pool)
1636 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1637 ICE_AQ_VSI_IS_VALID);
1639 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1641 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1643 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1644 sizeof(vsi_ctx->info), cd);
1647 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1648 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1649 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1657 * @hw: pointer to the HW struct
1658 * @vsi_ctx: pointer to a VSI context struct
1659 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1660 * @cd: pointer to command details structure or NULL
1662 * Free VSI context info from hardware (0x0213)
1665 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1666 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1668 struct ice_aqc_add_update_free_vsi_resp *resp;
1669 struct ice_aqc_add_get_update_free_vsi *cmd;
1670 struct ice_aq_desc desc;
1671 enum ice_status status;
1673 cmd = &desc.params.vsi_cmd;
1674 resp = &desc.params.add_update_free_vsi_res;
1676 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1678 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1680 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1682 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1684 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1685 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1693 * @hw: pointer to the HW struct
1694 * @vsi_ctx: pointer to a VSI context struct
1695 * @cd: pointer to command details structure or NULL
1697 * Update VSI context in the hardware (0x0211)
1700 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1701 struct ice_sq_cd *cd)
1703 struct ice_aqc_add_update_free_vsi_resp *resp;
1704 struct ice_aqc_add_get_update_free_vsi *cmd;
1705 struct ice_aq_desc desc;
1706 enum ice_status status;
1708 cmd = &desc.params.vsi_cmd;
1709 resp = &desc.params.add_update_free_vsi_res;
1711 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1713 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1715 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1717 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1718 sizeof(vsi_ctx->info), cd);
1721 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1722 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1729 * ice_is_vsi_valid - check whether the VSI is valid or not
1730 * @hw: pointer to the HW struct
1731 * @vsi_handle: VSI handle
1733 * check whether the VSI is valid or not
1735 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1737 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1741 * ice_get_hw_vsi_num - return the HW VSI number
1742 * @hw: pointer to the HW struct
1743 * @vsi_handle: VSI handle
1745 * return the HW VSI number
1746 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1748 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1750 return hw->vsi_ctx[vsi_handle]->vsi_num;
1754 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1755 * @hw: pointer to the HW struct
1756 * @vsi_handle: VSI handle
1758 * return the VSI context entry for a given VSI handle
1760 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1762 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1766 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1767 * @hw: pointer to the HW struct
1768 * @vsi_handle: VSI handle
1769 * @vsi: VSI context pointer
1771 * save the VSI context entry for a given VSI handle
1774 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1776 hw->vsi_ctx[vsi_handle] = vsi;
1780 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1781 * @hw: pointer to the HW struct
1782 * @vsi_handle: VSI handle
1784 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1786 struct ice_vsi_ctx *vsi;
1789 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1792 ice_for_each_traffic_class(i) {
1793 if (vsi->lan_q_ctx[i]) {
1794 ice_free(hw, vsi->lan_q_ctx[i]);
1795 vsi->lan_q_ctx[i] = NULL;
1801 * ice_clear_vsi_ctx - clear the VSI context entry
1802 * @hw: pointer to the HW struct
1803 * @vsi_handle: VSI handle
1805 * clear the VSI context entry
1807 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1809 struct ice_vsi_ctx *vsi;
1811 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1813 ice_clear_vsi_q_ctx(hw, vsi_handle);
1815 hw->vsi_ctx[vsi_handle] = NULL;
1820 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1821 * @hw: pointer to the HW struct
1823 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1827 for (i = 0; i < ICE_MAX_VSI; i++)
1828 ice_clear_vsi_ctx(hw, i);
1832 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1833 * @hw: pointer to the HW struct
1834 * @vsi_handle: unique VSI handle provided by drivers
1835 * @vsi_ctx: pointer to a VSI context struct
1836 * @cd: pointer to command details structure or NULL
1838 * Add a VSI context to the hardware also add it into the VSI handle list.
1839 * If this function gets called after reset for existing VSIs then update
1840 * with the new HW VSI number in the corresponding VSI handle list entry.
1843 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1844 struct ice_sq_cd *cd)
1846 struct ice_vsi_ctx *tmp_vsi_ctx;
1847 enum ice_status status;
1849 if (vsi_handle >= ICE_MAX_VSI)
1850 return ICE_ERR_PARAM;
1851 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1854 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1856 /* Create a new VSI context */
1857 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1858 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1860 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1861 return ICE_ERR_NO_MEMORY;
1863 *tmp_vsi_ctx = *vsi_ctx;
1865 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1867 /* update with new HW VSI num */
1868 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1875 * ice_free_vsi- free VSI context from hardware and VSI handle list
1876 * @hw: pointer to the HW struct
1877 * @vsi_handle: unique VSI handle
1878 * @vsi_ctx: pointer to a VSI context struct
1879 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1880 * @cd: pointer to command details structure or NULL
1882 * Free VSI context info from hardware as well as from VSI handle list
1885 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1886 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1888 enum ice_status status;
1890 if (!ice_is_vsi_valid(hw, vsi_handle))
1891 return ICE_ERR_PARAM;
1892 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1893 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1895 ice_clear_vsi_ctx(hw, vsi_handle);
1901 * @hw: pointer to the HW struct
1902 * @vsi_handle: unique VSI handle
1903 * @vsi_ctx: pointer to a VSI context struct
1904 * @cd: pointer to command details structure or NULL
1906 * Update VSI context in the hardware
1909 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1910 struct ice_sq_cd *cd)
1912 if (!ice_is_vsi_valid(hw, vsi_handle))
1913 return ICE_ERR_PARAM;
1914 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1915 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1919 * ice_aq_get_vsi_params
1920 * @hw: pointer to the HW struct
1921 * @vsi_ctx: pointer to a VSI context struct
1922 * @cd: pointer to command details structure or NULL
1924 * Get VSI context info from hardware (0x0212)
1927 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1928 struct ice_sq_cd *cd)
1930 struct ice_aqc_add_get_update_free_vsi *cmd;
1931 struct ice_aqc_get_vsi_resp *resp;
1932 struct ice_aq_desc desc;
1933 enum ice_status status;
1935 cmd = &desc.params.vsi_cmd;
1936 resp = &desc.params.get_vsi_resp;
1938 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1940 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1942 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1943 sizeof(vsi_ctx->info), cd);
1945 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1947 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1948 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1955 * ice_aq_add_update_mir_rule - add/update a mirror rule
1956 * @hw: pointer to the HW struct
1957 * @rule_type: Rule Type
1958 * @dest_vsi: VSI number to which packets will be mirrored
1959 * @count: length of the list
1960 * @mr_buf: buffer for list of mirrored VSI numbers
1961 * @cd: pointer to command details structure or NULL
1964 * Add/Update Mirror Rule (0x260).
1967 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1968 u16 count, struct ice_mir_rule_buf *mr_buf,
1969 struct ice_sq_cd *cd, u16 *rule_id)
1971 struct ice_aqc_add_update_mir_rule *cmd;
1972 struct ice_aq_desc desc;
1973 enum ice_status status;
1974 __le16 *mr_list = NULL;
1977 switch (rule_type) {
1978 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1979 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1980 /* Make sure count and mr_buf are set for these rule_types */
1981 if (!(count && mr_buf))
1982 return ICE_ERR_PARAM;
1984 buf_size = count * sizeof(__le16);
1985 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1987 return ICE_ERR_NO_MEMORY;
1989 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1990 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1991 /* Make sure count and mr_buf are not set for these
1994 if (count || mr_buf)
1995 return ICE_ERR_PARAM;
1998 ice_debug(hw, ICE_DBG_SW,
1999 "Error due to unsupported rule_type %u\n", rule_type);
2000 return ICE_ERR_OUT_OF_RANGE;
2003 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2005 /* Pre-process 'mr_buf' items for add/update of virtual port
2006 * ingress/egress mirroring (but not physical port ingress/egress
2012 for (i = 0; i < count; i++) {
2015 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2017 /* Validate specified VSI number, make sure it is less
2018 * than ICE_MAX_VSI, if not return with error.
2020 if (id >= ICE_MAX_VSI) {
2021 ice_debug(hw, ICE_DBG_SW,
2022 "Error VSI index (%u) out-of-range\n",
2024 ice_free(hw, mr_list);
2025 return ICE_ERR_OUT_OF_RANGE;
2028 /* add VSI to mirror rule */
2031 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2032 else /* remove VSI from mirror rule */
2033 mr_list[i] = CPU_TO_LE16(id);
2037 cmd = &desc.params.add_update_rule;
2038 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2039 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2040 ICE_AQC_RULE_ID_VALID_M);
2041 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2042 cmd->num_entries = CPU_TO_LE16(count);
2043 cmd->dest = CPU_TO_LE16(dest_vsi);
2045 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2047 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2049 ice_free(hw, mr_list);
2055 * ice_aq_delete_mir_rule - delete a mirror rule
2056 * @hw: pointer to the HW struct
2057 * @rule_id: Mirror rule ID (to be deleted)
2058 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2059 * otherwise it is returned to the shared pool
2060 * @cd: pointer to command details structure or NULL
2062 * Delete Mirror Rule (0x261).
2065 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2066 struct ice_sq_cd *cd)
2068 struct ice_aqc_delete_mir_rule *cmd;
2069 struct ice_aq_desc desc;
2071 /* rule_id should be in the range 0...63 */
2072 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2073 return ICE_ERR_OUT_OF_RANGE;
2075 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2077 cmd = &desc.params.del_rule;
2078 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2079 cmd->rule_id = CPU_TO_LE16(rule_id);
2082 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2084 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2088 * ice_aq_alloc_free_vsi_list
2089 * @hw: pointer to the HW struct
2090 * @vsi_list_id: VSI list ID returned or used for lookup
2091 * @lkup_type: switch rule filter lookup type
2092 * @opc: switch rules population command type - pass in the command opcode
2094 * allocates or free a VSI list resource
2096 static enum ice_status
2097 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2098 enum ice_sw_lkup_type lkup_type,
2099 enum ice_adminq_opc opc)
2101 struct ice_aqc_alloc_free_res_elem *sw_buf;
2102 struct ice_aqc_res_elem *vsi_ele;
2103 enum ice_status status;
2106 buf_len = sizeof(*sw_buf);
2107 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
2108 ice_malloc(hw, buf_len);
2110 return ICE_ERR_NO_MEMORY;
2111 sw_buf->num_elems = CPU_TO_LE16(1);
2113 if (lkup_type == ICE_SW_LKUP_MAC ||
2114 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2115 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2116 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2117 lkup_type == ICE_SW_LKUP_PROMISC ||
2118 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2119 lkup_type == ICE_SW_LKUP_LAST) {
2120 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2121 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2123 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2125 status = ICE_ERR_PARAM;
2126 goto ice_aq_alloc_free_vsi_list_exit;
2129 if (opc == ice_aqc_opc_free_res)
2130 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2132 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2134 goto ice_aq_alloc_free_vsi_list_exit;
2136 if (opc == ice_aqc_opc_alloc_res) {
2137 vsi_ele = &sw_buf->elem[0];
2138 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2141 ice_aq_alloc_free_vsi_list_exit:
2142 ice_free(hw, sw_buf);
2147 * ice_aq_set_storm_ctrl - Sets storm control configuration
2148 * @hw: pointer to the HW struct
2149 * @bcast_thresh: represents the upper threshold for broadcast storm control
2150 * @mcast_thresh: represents the upper threshold for multicast storm control
2151 * @ctl_bitmask: storm control control knobs
2153 * Sets the storm control configuration (0x0280)
2156 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2159 struct ice_aqc_storm_cfg *cmd;
2160 struct ice_aq_desc desc;
2162 cmd = &desc.params.storm_conf;
2164 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2166 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2167 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2168 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2170 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2174 * ice_aq_get_storm_ctrl - gets storm control configuration
2175 * @hw: pointer to the HW struct
2176 * @bcast_thresh: represents the upper threshold for broadcast storm control
2177 * @mcast_thresh: represents the upper threshold for multicast storm control
2178 * @ctl_bitmask: storm control control knobs
2180 * Gets the storm control configuration (0x0281)
2183 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2186 enum ice_status status;
2187 struct ice_aq_desc desc;
2189 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2191 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2193 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2196 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2199 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2202 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2209 * ice_aq_sw_rules - add/update/remove switch rules
2210 * @hw: pointer to the HW struct
2211 * @rule_list: pointer to switch rule population list
2212 * @rule_list_sz: total size of the rule list in bytes
2213 * @num_rules: number of switch rules in the rule_list
2214 * @opc: switch rules population command type - pass in the command opcode
2215 * @cd: pointer to command details structure or NULL
2217 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2219 static enum ice_status
2220 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2221 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2223 struct ice_aq_desc desc;
2224 enum ice_status status;
2226 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2228 if (opc != ice_aqc_opc_add_sw_rules &&
2229 opc != ice_aqc_opc_update_sw_rules &&
2230 opc != ice_aqc_opc_remove_sw_rules)
2231 return ICE_ERR_PARAM;
2233 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2235 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2236 desc.params.sw_rules.num_rules_fltr_entry_index =
2237 CPU_TO_LE16(num_rules);
2238 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2239 if (opc != ice_aqc_opc_add_sw_rules &&
2240 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2241 status = ICE_ERR_DOES_NOT_EXIST;
2247 * ice_aq_add_recipe - add switch recipe
2248 * @hw: pointer to the HW struct
2249 * @s_recipe_list: pointer to switch rule population list
2250 * @num_recipes: number of switch recipes in the list
2251 * @cd: pointer to command details structure or NULL
2256 ice_aq_add_recipe(struct ice_hw *hw,
2257 struct ice_aqc_recipe_data_elem *s_recipe_list,
2258 u16 num_recipes, struct ice_sq_cd *cd)
2260 struct ice_aqc_add_get_recipe *cmd;
2261 struct ice_aq_desc desc;
2264 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2265 cmd = &desc.params.add_get_recipe;
2266 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2268 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2269 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2271 buf_size = num_recipes * sizeof(*s_recipe_list);
2273 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2277 * ice_aq_get_recipe - get switch recipe
2278 * @hw: pointer to the HW struct
2279 * @s_recipe_list: pointer to switch rule population list
2280 * @num_recipes: pointer to the number of recipes (input and output)
2281 * @recipe_root: root recipe number of recipe(s) to retrieve
2282 * @cd: pointer to command details structure or NULL
2286 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2287 * On output, *num_recipes will equal the number of entries returned in
2290 * The caller must supply enough space in s_recipe_list to hold all possible
2291 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2294 ice_aq_get_recipe(struct ice_hw *hw,
2295 struct ice_aqc_recipe_data_elem *s_recipe_list,
2296 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2298 struct ice_aqc_add_get_recipe *cmd;
2299 struct ice_aq_desc desc;
2300 enum ice_status status;
2303 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2304 return ICE_ERR_PARAM;
2306 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2307 cmd = &desc.params.add_get_recipe;
2308 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2310 cmd->return_index = CPU_TO_LE16(recipe_root);
2311 cmd->num_sub_recipes = 0;
2313 buf_size = *num_recipes * sizeof(*s_recipe_list);
2315 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2316 /* cppcheck-suppress constArgument */
2317 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2323 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2324 * @hw: pointer to the HW struct
2325 * @profile_id: package profile ID to associate the recipe with
2326 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2327 * @cd: pointer to command details structure or NULL
2328 * Recipe to profile association (0x0291)
2331 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2332 struct ice_sq_cd *cd)
2334 struct ice_aqc_recipe_to_profile *cmd;
2335 struct ice_aq_desc desc;
2337 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2338 cmd = &desc.params.recipe_to_profile;
2339 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2340 cmd->profile_id = CPU_TO_LE16(profile_id);
2341 /* Set the recipe ID bit in the bitmask to let the device know which
2342 * profile we are associating the recipe to
2344 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2345 ICE_NONDMA_TO_NONDMA);
2347 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2351 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2352 * @hw: pointer to the HW struct
2353 * @profile_id: package profile ID to associate the recipe with
2354 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2355 * @cd: pointer to command details structure or NULL
2356 * Associate profile ID with given recipe (0x0293)
2359 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2360 struct ice_sq_cd *cd)
2362 struct ice_aqc_recipe_to_profile *cmd;
2363 struct ice_aq_desc desc;
2364 enum ice_status status;
2366 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2367 cmd = &desc.params.recipe_to_profile;
2368 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2369 cmd->profile_id = CPU_TO_LE16(profile_id);
2371 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2373 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2374 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2380 * ice_alloc_recipe - add recipe resource
2381 * @hw: pointer to the hardware structure
2382 * @rid: recipe ID returned as response to AQ call
2384 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2386 struct ice_aqc_alloc_free_res_elem *sw_buf;
2387 enum ice_status status;
2390 buf_len = sizeof(*sw_buf);
2391 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2393 return ICE_ERR_NO_MEMORY;
2395 sw_buf->num_elems = CPU_TO_LE16(1);
2396 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2397 ICE_AQC_RES_TYPE_S) |
2398 ICE_AQC_RES_TYPE_FLAG_SHARED);
2399 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2400 ice_aqc_opc_alloc_res, NULL);
2402 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2403 ice_free(hw, sw_buf);
2408 /* ice_init_port_info - Initialize port_info with switch configuration data
2409 * @pi: pointer to port_info
2410 * @vsi_port_num: VSI number or port number
2411 * @type: Type of switch element (port or VSI)
2412 * @swid: switch ID of the switch the element is attached to
2413 * @pf_vf_num: PF or VF number
2414 * @is_vf: true if the element is a VF, false otherwise
2417 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2418 u16 swid, u16 pf_vf_num, bool is_vf)
2421 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2422 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2424 pi->pf_vf_num = pf_vf_num;
2426 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2427 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2430 ice_debug(pi->hw, ICE_DBG_SW,
2431 "incorrect VSI/port type received\n");
2436 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2437 * @hw: pointer to the hardware structure
2439 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2441 struct ice_aqc_get_sw_cfg_resp *rbuf;
2442 enum ice_status status;
2449 num_total_ports = 1;
2451 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
2452 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2455 return ICE_ERR_NO_MEMORY;
2457 /* Multiple calls to ice_aq_get_sw_cfg may be required
2458 * to get all the switch configuration information. The need
2459 * for additional calls is indicated by ice_aq_get_sw_cfg
2460 * writing a non-zero value in req_desc
2463 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2464 &req_desc, &num_elems, NULL);
2469 for (i = 0; i < num_elems; i++) {
2470 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2471 u16 pf_vf_num, swid, vsi_port_num;
2475 ele = rbuf[i].elements;
2476 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2477 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2479 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2480 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2482 swid = LE16_TO_CPU(ele->swid);
2484 if (LE16_TO_CPU(ele->pf_vf_num) &
2485 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2488 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2489 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2492 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2493 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2494 if (j == num_total_ports) {
2495 ice_debug(hw, ICE_DBG_SW,
2496 "more ports than expected\n");
2497 status = ICE_ERR_CFG;
2500 ice_init_port_info(hw->port_info,
2501 vsi_port_num, res_type, swid,
2509 } while (req_desc && !status);
2512 ice_free(hw, (void *)rbuf);
2517 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2518 * @hw: pointer to the hardware structure
2519 * @fi: filter info structure to fill/update
2521 * This helper function populates the lb_en and lan_en elements of the provided
2522 * ice_fltr_info struct using the switch's type and characteristics of the
2523 * switch rule being configured.
2525 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2527 if ((fi->flag & ICE_FLTR_RX) &&
2528 (fi->fltr_act == ICE_FWD_TO_VSI ||
2529 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2530 fi->lkup_type == ICE_SW_LKUP_LAST)
2534 if ((fi->flag & ICE_FLTR_TX) &&
2535 (fi->fltr_act == ICE_FWD_TO_VSI ||
2536 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2537 fi->fltr_act == ICE_FWD_TO_Q ||
2538 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2539 /* Setting LB for prune actions will result in replicated
2540 * packets to the internal switch that will be dropped.
2542 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2545 /* Set lan_en to TRUE if
2546 * 1. The switch is a VEB AND
2548 * 2.1 The lookup is a directional lookup like ethertype,
2549 * promiscuous, ethertype-MAC, promiscuous-VLAN
2550 * and default-port OR
2551 * 2.2 The lookup is VLAN, OR
2552 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2553 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2557 * The switch is a VEPA.
2559 * In all other cases, the LAN enable has to be set to false.
2562 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2563 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2564 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2565 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2566 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2567 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2568 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2569 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2570 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2571 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2580 * ice_fill_sw_rule - Helper function to fill switch rule structure
2581 * @hw: pointer to the hardware structure
2582 * @f_info: entry containing packet forwarding information
2583 * @s_rule: switch rule structure to be filled in based on mac_entry
2584 * @opc: switch rules population command type - pass in the command opcode
2587 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2588 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2590 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2598 if (opc == ice_aqc_opc_remove_sw_rules) {
2599 s_rule->pdata.lkup_tx_rx.act = 0;
2600 s_rule->pdata.lkup_tx_rx.index =
2601 CPU_TO_LE16(f_info->fltr_rule_id);
2602 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2606 eth_hdr_sz = sizeof(dummy_eth_header);
2607 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2609 /* initialize the ether header with a dummy header */
2610 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2611 ice_fill_sw_info(hw, f_info);
2613 switch (f_info->fltr_act) {
2614 case ICE_FWD_TO_VSI:
2615 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2616 ICE_SINGLE_ACT_VSI_ID_M;
2617 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2618 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2619 ICE_SINGLE_ACT_VALID_BIT;
2621 case ICE_FWD_TO_VSI_LIST:
2622 act |= ICE_SINGLE_ACT_VSI_LIST;
2623 act |= (f_info->fwd_id.vsi_list_id <<
2624 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2625 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2626 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2627 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2628 ICE_SINGLE_ACT_VALID_BIT;
2631 act |= ICE_SINGLE_ACT_TO_Q;
2632 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2633 ICE_SINGLE_ACT_Q_INDEX_M;
2635 case ICE_DROP_PACKET:
2636 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2637 ICE_SINGLE_ACT_VALID_BIT;
2639 case ICE_FWD_TO_QGRP:
2640 q_rgn = f_info->qgrp_size > 0 ?
2641 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2642 act |= ICE_SINGLE_ACT_TO_Q;
2643 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2644 ICE_SINGLE_ACT_Q_INDEX_M;
2645 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2646 ICE_SINGLE_ACT_Q_REGION_M;
2653 act |= ICE_SINGLE_ACT_LB_ENABLE;
2655 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2657 switch (f_info->lkup_type) {
2658 case ICE_SW_LKUP_MAC:
2659 daddr = f_info->l_data.mac.mac_addr;
2661 case ICE_SW_LKUP_VLAN:
2662 vlan_id = f_info->l_data.vlan.vlan_id;
2663 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2664 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2665 act |= ICE_SINGLE_ACT_PRUNE;
2666 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2669 case ICE_SW_LKUP_ETHERTYPE_MAC:
2670 daddr = f_info->l_data.ethertype_mac.mac_addr;
2672 case ICE_SW_LKUP_ETHERTYPE:
2673 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2674 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2676 case ICE_SW_LKUP_MAC_VLAN:
2677 daddr = f_info->l_data.mac_vlan.mac_addr;
2678 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2680 case ICE_SW_LKUP_PROMISC_VLAN:
2681 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2683 case ICE_SW_LKUP_PROMISC:
2684 daddr = f_info->l_data.mac_vlan.mac_addr;
2690 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2691 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2692 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2694 /* Recipe set depending on lookup type */
2695 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2696 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2697 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2700 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2701 ICE_NONDMA_TO_NONDMA);
2703 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2704 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2705 *off = CPU_TO_BE16(vlan_id);
2708 /* Create the switch rule with the final dummy Ethernet header */
2709 if (opc != ice_aqc_opc_update_sw_rules)
2710 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2714 * ice_add_marker_act
2715 * @hw: pointer to the hardware structure
2716 * @m_ent: the management entry for which sw marker needs to be added
2717 * @sw_marker: sw marker to tag the Rx descriptor with
2718 * @l_id: large action resource ID
2720 * Create a large action to hold software marker and update the switch rule
2721 * entry pointed by m_ent with newly created large action
2723 static enum ice_status
2724 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2725 u16 sw_marker, u16 l_id)
2727 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2728 /* For software marker we need 3 large actions
2729 * 1. FWD action: FWD TO VSI or VSI LIST
2730 * 2. GENERIC VALUE action to hold the profile ID
2731 * 3. GENERIC VALUE action to hold the software marker ID
2733 const u16 num_lg_acts = 3;
2734 enum ice_status status;
2740 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2741 return ICE_ERR_PARAM;
2743 /* Create two back-to-back switch rules and submit them to the HW using
2744 * one memory buffer:
2748 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2749 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2750 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2752 return ICE_ERR_NO_MEMORY;
2754 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2756 /* Fill in the first switch rule i.e. large action */
2757 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2758 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2759 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2761 /* First action VSI forwarding or VSI list forwarding depending on how
2764 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2765 m_ent->fltr_info.fwd_id.hw_vsi_id;
2767 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2768 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2769 ICE_LG_ACT_VSI_LIST_ID_M;
2770 if (m_ent->vsi_count > 1)
2771 act |= ICE_LG_ACT_VSI_LIST;
2772 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2774 /* Second action descriptor type */
2775 act = ICE_LG_ACT_GENERIC;
2777 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2778 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2780 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2781 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2783 /* Third action Marker value */
2784 act |= ICE_LG_ACT_GENERIC;
2785 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2786 ICE_LG_ACT_GENERIC_VALUE_M;
2788 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2790 /* call the fill switch rule to fill the lookup Tx Rx structure */
2791 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2792 ice_aqc_opc_update_sw_rules);
2794 /* Update the action to point to the large action ID */
2795 rx_tx->pdata.lkup_tx_rx.act =
2796 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2797 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2798 ICE_SINGLE_ACT_PTR_VAL_M));
2800 /* Use the filter rule ID of the previously created rule with single
2801 * act. Once the update happens, hardware will treat this as large
2804 rx_tx->pdata.lkup_tx_rx.index =
2805 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2807 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2808 ice_aqc_opc_update_sw_rules, NULL);
2810 m_ent->lg_act_idx = l_id;
2811 m_ent->sw_marker_id = sw_marker;
2814 ice_free(hw, lg_act);
2819 * ice_add_counter_act - add/update filter rule with counter action
2820 * @hw: pointer to the hardware structure
2821 * @m_ent: the management entry for which counter needs to be added
2822 * @counter_id: VLAN counter ID returned as part of allocate resource
2823 * @l_id: large action resource ID
2825 static enum ice_status
2826 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2827 u16 counter_id, u16 l_id)
2829 struct ice_aqc_sw_rules_elem *lg_act;
2830 struct ice_aqc_sw_rules_elem *rx_tx;
2831 enum ice_status status;
2832 /* 2 actions will be added while adding a large action counter */
2833 const int num_acts = 2;
2840 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2841 return ICE_ERR_PARAM;
2843 /* Create two back-to-back switch rules and submit them to the HW using
2844 * one memory buffer:
2848 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2849 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2850 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2853 return ICE_ERR_NO_MEMORY;
2855 rx_tx = (struct ice_aqc_sw_rules_elem *)
2856 ((u8 *)lg_act + lg_act_size);
2858 /* Fill in the first switch rule i.e. large action */
2859 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2860 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2861 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2863 /* First action VSI forwarding or VSI list forwarding depending on how
2866 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2867 m_ent->fltr_info.fwd_id.hw_vsi_id;
2869 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2870 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2871 ICE_LG_ACT_VSI_LIST_ID_M;
2872 if (m_ent->vsi_count > 1)
2873 act |= ICE_LG_ACT_VSI_LIST;
2874 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2876 /* Second action counter ID */
2877 act = ICE_LG_ACT_STAT_COUNT;
2878 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2879 ICE_LG_ACT_STAT_COUNT_M;
2880 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2882 /* call the fill switch rule to fill the lookup Tx Rx structure */
2883 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2884 ice_aqc_opc_update_sw_rules);
2886 act = ICE_SINGLE_ACT_PTR;
2887 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2888 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2890 /* Use the filter rule ID of the previously created rule with single
2891 * act. Once the update happens, hardware will treat this as large
2894 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2895 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2897 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2898 ice_aqc_opc_update_sw_rules, NULL);
2900 m_ent->lg_act_idx = l_id;
2901 m_ent->counter_index = counter_id;
2904 ice_free(hw, lg_act);
2909 * ice_create_vsi_list_map
2910 * @hw: pointer to the hardware structure
2911 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2912 * @num_vsi: number of VSI handles in the array
2913 * @vsi_list_id: VSI list ID generated as part of allocate resource
2915 * Helper function to create a new entry of VSI list ID to VSI mapping
2916 * using the given VSI list ID
2918 static struct ice_vsi_list_map_info *
2919 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2922 struct ice_switch_info *sw = hw->switch_info;
2923 struct ice_vsi_list_map_info *v_map;
2926 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2931 v_map->vsi_list_id = vsi_list_id;
2933 for (i = 0; i < num_vsi; i++)
2934 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2936 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2941 * ice_update_vsi_list_rule
2942 * @hw: pointer to the hardware structure
2943 * @vsi_handle_arr: array of VSI handles to form a VSI list
2944 * @num_vsi: number of VSI handles in the array
2945 * @vsi_list_id: VSI list ID generated as part of allocate resource
2946 * @remove: Boolean value to indicate if this is a remove action
2947 * @opc: switch rules population command type - pass in the command opcode
2948 * @lkup_type: lookup type of the filter
2950 * Call AQ command to add a new switch rule or update existing switch rule
2951 * using the given VSI list ID
2953 static enum ice_status
2954 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2955 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2956 enum ice_sw_lkup_type lkup_type)
2958 struct ice_aqc_sw_rules_elem *s_rule;
2959 enum ice_status status;
2965 return ICE_ERR_PARAM;
2967 if (lkup_type == ICE_SW_LKUP_MAC ||
2968 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2969 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2970 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2971 lkup_type == ICE_SW_LKUP_PROMISC ||
2972 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2973 lkup_type == ICE_SW_LKUP_LAST)
2974 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2975 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2976 else if (lkup_type == ICE_SW_LKUP_VLAN)
2977 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2978 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2980 return ICE_ERR_PARAM;
2982 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2983 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2985 return ICE_ERR_NO_MEMORY;
2986 for (i = 0; i < num_vsi; i++) {
2987 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2988 status = ICE_ERR_PARAM;
2991 /* AQ call requires hw_vsi_id(s) */
2992 s_rule->pdata.vsi_list.vsi[i] =
2993 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2996 s_rule->type = CPU_TO_LE16(rule_type);
2997 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2998 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3000 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3003 ice_free(hw, s_rule);
3008 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3009 * @hw: pointer to the HW struct
3010 * @vsi_handle_arr: array of VSI handles to form a VSI list
3011 * @num_vsi: number of VSI handles in the array
3012 * @vsi_list_id: stores the ID of the VSI list to be created
3013 * @lkup_type: switch rule filter's lookup type
3015 static enum ice_status
3016 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3017 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3019 enum ice_status status;
3021 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3022 ice_aqc_opc_alloc_res);
3026 /* Update the newly created VSI list to include the specified VSIs */
3027 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3028 *vsi_list_id, false,
3029 ice_aqc_opc_add_sw_rules, lkup_type);
3033 * ice_create_pkt_fwd_rule
3034 * @hw: pointer to the hardware structure
3035 * @recp_list: corresponding filter management list
3036 * @f_entry: entry containing packet forwarding information
3038 * Create switch rule with given filter information and add an entry
3039 * to the corresponding filter management list to track this switch rule
3042 static enum ice_status
3043 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3044 struct ice_fltr_list_entry *f_entry)
3046 struct ice_fltr_mgmt_list_entry *fm_entry;
3047 struct ice_aqc_sw_rules_elem *s_rule;
3048 enum ice_status status;
3050 s_rule = (struct ice_aqc_sw_rules_elem *)
3051 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3053 return ICE_ERR_NO_MEMORY;
3054 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3055 ice_malloc(hw, sizeof(*fm_entry));
3057 status = ICE_ERR_NO_MEMORY;
3058 goto ice_create_pkt_fwd_rule_exit;
3061 fm_entry->fltr_info = f_entry->fltr_info;
3063 /* Initialize all the fields for the management entry */
3064 fm_entry->vsi_count = 1;
3065 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3066 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3067 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3069 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3070 ice_aqc_opc_add_sw_rules);
3072 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3073 ice_aqc_opc_add_sw_rules, NULL);
3075 ice_free(hw, fm_entry);
3076 goto ice_create_pkt_fwd_rule_exit;
3079 f_entry->fltr_info.fltr_rule_id =
3080 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3081 fm_entry->fltr_info.fltr_rule_id =
3082 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3084 /* The book keeping entries will get removed when base driver
3085 * calls remove filter AQ command
3087 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3089 ice_create_pkt_fwd_rule_exit:
3090 ice_free(hw, s_rule);
3095 * ice_update_pkt_fwd_rule
3096 * @hw: pointer to the hardware structure
3097 * @f_info: filter information for switch rule
3099 * Call AQ command to update a previously created switch rule with a
3102 static enum ice_status
3103 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3105 struct ice_aqc_sw_rules_elem *s_rule;
3106 enum ice_status status;
3108 s_rule = (struct ice_aqc_sw_rules_elem *)
3109 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3111 return ICE_ERR_NO_MEMORY;
3113 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3115 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3117 /* Update switch rule with new rule set to forward VSI list */
3118 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3119 ice_aqc_opc_update_sw_rules, NULL);
3121 ice_free(hw, s_rule);
3126 * ice_update_sw_rule_bridge_mode
3127 * @hw: pointer to the HW struct
3129 * Updates unicast switch filter rules based on VEB/VEPA mode
3131 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3133 struct ice_switch_info *sw = hw->switch_info;
3134 struct ice_fltr_mgmt_list_entry *fm_entry;
3135 enum ice_status status = ICE_SUCCESS;
3136 struct LIST_HEAD_TYPE *rule_head;
3137 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3139 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3140 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3142 ice_acquire_lock(rule_lock);
3143 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3145 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3146 u8 *addr = fi->l_data.mac.mac_addr;
3148 /* Update unicast Tx rules to reflect the selected
3151 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3152 (fi->fltr_act == ICE_FWD_TO_VSI ||
3153 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3154 fi->fltr_act == ICE_FWD_TO_Q ||
3155 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3156 status = ice_update_pkt_fwd_rule(hw, fi);
3162 ice_release_lock(rule_lock);
3168 * ice_add_update_vsi_list
3169 * @hw: pointer to the hardware structure
3170 * @m_entry: pointer to current filter management list entry
3171 * @cur_fltr: filter information from the book keeping entry
3172 * @new_fltr: filter information with the new VSI to be added
3174 * Call AQ command to add or update previously created VSI list with new VSI.
3176 * Helper function to do book keeping associated with adding filter information
3177 * The algorithm to do the book keeping is described below :
3178 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3179 * if only one VSI has been added till now
3180 * Allocate a new VSI list and add two VSIs
3181 * to this list using switch rule command
3182 * Update the previously created switch rule with the
3183 * newly created VSI list ID
3184 * if a VSI list was previously created
3185 * Add the new VSI to the previously created VSI list set
3186 * using the update switch rule command
3188 static enum ice_status
3189 ice_add_update_vsi_list(struct ice_hw *hw,
3190 struct ice_fltr_mgmt_list_entry *m_entry,
3191 struct ice_fltr_info *cur_fltr,
3192 struct ice_fltr_info *new_fltr)
3194 enum ice_status status = ICE_SUCCESS;
3195 u16 vsi_list_id = 0;
3197 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3198 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3199 return ICE_ERR_NOT_IMPL;
3201 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3202 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3203 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3204 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3205 return ICE_ERR_NOT_IMPL;
3207 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3208 /* Only one entry existed in the mapping and it was not already
3209 * a part of a VSI list. So, create a VSI list with the old and
3212 struct ice_fltr_info tmp_fltr;
3213 u16 vsi_handle_arr[2];
3215 /* A rule already exists with the new VSI being added */
3216 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3217 return ICE_ERR_ALREADY_EXISTS;
3219 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3220 vsi_handle_arr[1] = new_fltr->vsi_handle;
3221 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3223 new_fltr->lkup_type);
3227 tmp_fltr = *new_fltr;
3228 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3229 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3230 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3231 /* Update the previous switch rule of "MAC forward to VSI" to
3232 * "MAC fwd to VSI list"
3234 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3238 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3239 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3240 m_entry->vsi_list_info =
3241 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3244 /* If this entry was large action then the large action needs
3245 * to be updated to point to FWD to VSI list
3247 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3249 ice_add_marker_act(hw, m_entry,
3250 m_entry->sw_marker_id,
3251 m_entry->lg_act_idx);
3253 u16 vsi_handle = new_fltr->vsi_handle;
3254 enum ice_adminq_opc opcode;
3256 if (!m_entry->vsi_list_info)
3259 /* A rule already exists with the new VSI being added */
3260 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3263 /* Update the previously created VSI list set with
3264 * the new VSI ID passed in
3266 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3267 opcode = ice_aqc_opc_update_sw_rules;
3269 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3270 vsi_list_id, false, opcode,
3271 new_fltr->lkup_type);
3272 /* update VSI list mapping info with new VSI ID */
3274 ice_set_bit(vsi_handle,
3275 m_entry->vsi_list_info->vsi_map);
3278 m_entry->vsi_count++;
3283 * ice_find_rule_entry - Search a rule entry
3284 * @list_head: head of rule list
3285 * @f_info: rule information
3287 * Helper function to search for a given rule entry
3288 * Returns pointer to entry storing the rule if found
3290 static struct ice_fltr_mgmt_list_entry *
3291 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3292 struct ice_fltr_info *f_info)
3294 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3296 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3298 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3299 sizeof(f_info->l_data)) &&
3300 f_info->flag == list_itr->fltr_info.flag) {
3309 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3310 * @recp_list: VSI lists needs to be searched
3311 * @vsi_handle: VSI handle to be found in VSI list
3312 * @vsi_list_id: VSI list ID found containing vsi_handle
3314 * Helper function to search a VSI list with single entry containing given VSI
3315 * handle element. This can be extended further to search VSI list with more
3316 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3318 static struct ice_vsi_list_map_info *
3319 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3322 struct ice_vsi_list_map_info *map_info = NULL;
3323 struct LIST_HEAD_TYPE *list_head;
3325 list_head = &recp_list->filt_rules;
3326 if (recp_list->adv_rule) {
3327 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3329 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3330 ice_adv_fltr_mgmt_list_entry,
3332 if (list_itr->vsi_list_info) {
3333 map_info = list_itr->vsi_list_info;
3334 if (ice_is_bit_set(map_info->vsi_map,
3336 *vsi_list_id = map_info->vsi_list_id;
3342 struct ice_fltr_mgmt_list_entry *list_itr;
3344 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3345 ice_fltr_mgmt_list_entry,
3347 if (list_itr->vsi_count == 1 &&
3348 list_itr->vsi_list_info) {
3349 map_info = list_itr->vsi_list_info;
3350 if (ice_is_bit_set(map_info->vsi_map,
3352 *vsi_list_id = map_info->vsi_list_id;
3362 * ice_add_rule_internal - add rule for a given lookup type
3363 * @hw: pointer to the hardware structure
3364 * @recp_list: recipe list for which rule has to be added
3365 * @lport: logic port number on which function add rule
3366 * @f_entry: structure containing MAC forwarding information
3368 * Adds or updates the rule lists for a given recipe
3370 static enum ice_status
3371 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3372 u8 lport, struct ice_fltr_list_entry *f_entry)
3374 struct ice_fltr_info *new_fltr, *cur_fltr;
3375 struct ice_fltr_mgmt_list_entry *m_entry;
3376 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3377 enum ice_status status = ICE_SUCCESS;
3379 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3380 return ICE_ERR_PARAM;
3382 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3383 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3384 f_entry->fltr_info.fwd_id.hw_vsi_id =
3385 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3387 rule_lock = &recp_list->filt_rule_lock;
3389 ice_acquire_lock(rule_lock);
3390 new_fltr = &f_entry->fltr_info;
3391 if (new_fltr->flag & ICE_FLTR_RX)
3392 new_fltr->src = lport;
3393 else if (new_fltr->flag & ICE_FLTR_TX)
3395 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3397 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3399 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3400 goto exit_add_rule_internal;
3403 cur_fltr = &m_entry->fltr_info;
3404 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3406 exit_add_rule_internal:
3407 ice_release_lock(rule_lock);
3412 * ice_remove_vsi_list_rule
3413 * @hw: pointer to the hardware structure
3414 * @vsi_list_id: VSI list ID generated as part of allocate resource
3415 * @lkup_type: switch rule filter lookup type
3417 * The VSI list should be emptied before this function is called to remove the
3420 static enum ice_status
3421 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3422 enum ice_sw_lkup_type lkup_type)
3424 /* Free the vsi_list resource that we allocated. It is assumed that the
3425 * list is empty at this point.
3427 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3428 ice_aqc_opc_free_res);
3432 * ice_rem_update_vsi_list
3433 * @hw: pointer to the hardware structure
3434 * @vsi_handle: VSI handle of the VSI to remove
3435 * @fm_list: filter management entry for which the VSI list management needs to
3438 static enum ice_status
3439 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3440 struct ice_fltr_mgmt_list_entry *fm_list)
3442 enum ice_sw_lkup_type lkup_type;
3443 enum ice_status status = ICE_SUCCESS;
3446 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3447 fm_list->vsi_count == 0)
3448 return ICE_ERR_PARAM;
3450 /* A rule with the VSI being removed does not exist */
3451 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3452 return ICE_ERR_DOES_NOT_EXIST;
3454 lkup_type = fm_list->fltr_info.lkup_type;
3455 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3456 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3457 ice_aqc_opc_update_sw_rules,
3462 fm_list->vsi_count--;
3463 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3465 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3466 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3467 struct ice_vsi_list_map_info *vsi_list_info =
3468 fm_list->vsi_list_info;
3471 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3473 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3474 return ICE_ERR_OUT_OF_RANGE;
3476 /* Make sure VSI list is empty before removing it below */
3477 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3479 ice_aqc_opc_update_sw_rules,
3484 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3485 tmp_fltr_info.fwd_id.hw_vsi_id =
3486 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3487 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3488 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3490 ice_debug(hw, ICE_DBG_SW,
3491 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3492 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3496 fm_list->fltr_info = tmp_fltr_info;
3499 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3500 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3501 struct ice_vsi_list_map_info *vsi_list_info =
3502 fm_list->vsi_list_info;
3504 /* Remove the VSI list since it is no longer used */
3505 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3507 ice_debug(hw, ICE_DBG_SW,
3508 "Failed to remove VSI list %d, error %d\n",
3509 vsi_list_id, status);
3513 LIST_DEL(&vsi_list_info->list_entry);
3514 ice_free(hw, vsi_list_info);
3515 fm_list->vsi_list_info = NULL;
3522 * ice_remove_rule_internal - Remove a filter rule of a given type
3524 * @hw: pointer to the hardware structure
3525 * @recp_list: recipe list for which the rule needs to removed
3526 * @f_entry: rule entry containing filter information
3528 static enum ice_status
3529 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3530 struct ice_fltr_list_entry *f_entry)
3532 struct ice_fltr_mgmt_list_entry *list_elem;
3533 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3534 enum ice_status status = ICE_SUCCESS;
3535 bool remove_rule = false;
3538 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3539 return ICE_ERR_PARAM;
3540 f_entry->fltr_info.fwd_id.hw_vsi_id =
3541 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3543 rule_lock = &recp_list->filt_rule_lock;
3544 ice_acquire_lock(rule_lock);
3545 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3546 &f_entry->fltr_info);
3548 status = ICE_ERR_DOES_NOT_EXIST;
3552 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3554 } else if (!list_elem->vsi_list_info) {
3555 status = ICE_ERR_DOES_NOT_EXIST;
3557 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3558 /* a ref_cnt > 1 indicates that the vsi_list is being
3559 * shared by multiple rules. Decrement the ref_cnt and
3560 * remove this rule, but do not modify the list, as it
3561 * is in-use by other rules.
3563 list_elem->vsi_list_info->ref_cnt--;
3566 /* a ref_cnt of 1 indicates the vsi_list is only used
3567 * by one rule. However, the original removal request is only
3568 * for a single VSI. Update the vsi_list first, and only
3569 * remove the rule if there are no further VSIs in this list.
3571 vsi_handle = f_entry->fltr_info.vsi_handle;
3572 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3575 /* if VSI count goes to zero after updating the VSI list */
3576 if (list_elem->vsi_count == 0)
3581 /* Remove the lookup rule */
3582 struct ice_aqc_sw_rules_elem *s_rule;
3584 s_rule = (struct ice_aqc_sw_rules_elem *)
3585 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3587 status = ICE_ERR_NO_MEMORY;
3591 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3592 ice_aqc_opc_remove_sw_rules);
3594 status = ice_aq_sw_rules(hw, s_rule,
3595 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3596 ice_aqc_opc_remove_sw_rules, NULL);
3598 /* Remove a book keeping from the list */
3599 ice_free(hw, s_rule);
3604 LIST_DEL(&list_elem->list_entry);
3605 ice_free(hw, list_elem);
3608 ice_release_lock(rule_lock);
3613 * ice_aq_get_res_alloc - get allocated resources
3614 * @hw: pointer to the HW struct
3615 * @num_entries: pointer to u16 to store the number of resource entries returned
3616 * @buf: pointer to user-supplied buffer
3617 * @buf_size: size of buff
3618 * @cd: pointer to command details structure or NULL
3620 * The user-supplied buffer must be large enough to store the resource
3621 * information for all resource types. Each resource type is an
3622 * ice_aqc_get_res_resp_data_elem structure.
3625 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3626 u16 buf_size, struct ice_sq_cd *cd)
3628 struct ice_aqc_get_res_alloc *resp;
3629 enum ice_status status;
3630 struct ice_aq_desc desc;
3633 return ICE_ERR_BAD_PTR;
3635 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3636 return ICE_ERR_INVAL_SIZE;
3638 resp = &desc.params.get_res;
3640 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3641 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3643 if (!status && num_entries)
3644 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3650 * ice_aq_get_res_descs - get allocated resource descriptors
3651 * @hw: pointer to the hardware structure
3652 * @num_entries: number of resource entries in buffer
3653 * @buf: Indirect buffer to hold data parameters and response
3654 * @buf_size: size of buffer for indirect commands
3655 * @res_type: resource type
3656 * @res_shared: is resource shared
3657 * @desc_id: input - first desc ID to start; output - next desc ID
3658 * @cd: pointer to command details structure or NULL
3661 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3662 struct ice_aqc_get_allocd_res_desc_resp *buf,
3663 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3664 struct ice_sq_cd *cd)
3666 struct ice_aqc_get_allocd_res_desc *cmd;
3667 struct ice_aq_desc desc;
3668 enum ice_status status;
3670 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3672 cmd = &desc.params.get_res_desc;
3675 return ICE_ERR_PARAM;
3677 if (buf_size != (num_entries * sizeof(*buf)))
3678 return ICE_ERR_PARAM;
3680 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3682 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3683 ICE_AQC_RES_TYPE_M) | (res_shared ?
3684 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3685 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3687 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3689 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3695 * ice_add_mac_rule - Add a MAC address based filter rule
3696 * @hw: pointer to the hardware structure
3697 * @m_list: list of MAC addresses and forwarding information
3698 * @sw: pointer to switch info struct for which function add rule
3699 * @lport: logic port number on which function add rule
3701 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3702 * multiple unicast addresses, the function assumes that all the
3703 * addresses are unique in a given add_mac call. It doesn't
3704 * check for duplicates in this case, removing duplicates from a given
3705 * list should be taken care of in the caller of this function.
3707 static enum ice_status
3708 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3709 struct ice_switch_info *sw, u8 lport)
3711 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3712 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3713 struct ice_fltr_list_entry *m_list_itr;
3714 struct LIST_HEAD_TYPE *rule_head;
3715 u16 total_elem_left, s_rule_size;
3716 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3717 enum ice_status status = ICE_SUCCESS;
3718 u16 num_unicast = 0;
3722 rule_lock = &recp_list->filt_rule_lock;
3723 rule_head = &recp_list->filt_rules;
3725 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3727 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3731 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3732 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3733 if (!ice_is_vsi_valid(hw, vsi_handle))
3734 return ICE_ERR_PARAM;
3735 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3736 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3737 /* update the src in case it is VSI num */
3738 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3739 return ICE_ERR_PARAM;
3740 m_list_itr->fltr_info.src = hw_vsi_id;
3741 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3742 IS_ZERO_ETHER_ADDR(add))
3743 return ICE_ERR_PARAM;
3744 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3745 /* Don't overwrite the unicast address */
3746 ice_acquire_lock(rule_lock);
3747 if (ice_find_rule_entry(rule_head,
3748 &m_list_itr->fltr_info)) {
3749 ice_release_lock(rule_lock);
3750 return ICE_ERR_ALREADY_EXISTS;
3752 ice_release_lock(rule_lock);
3754 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3755 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3756 m_list_itr->status =
3757 ice_add_rule_internal(hw, recp_list, lport,
3759 if (m_list_itr->status)
3760 return m_list_itr->status;
3764 ice_acquire_lock(rule_lock);
3765 /* Exit if no suitable entries were found for adding bulk switch rule */
3767 status = ICE_SUCCESS;
3768 goto ice_add_mac_exit;
3771 /* Allocate switch rule buffer for the bulk update for unicast */
3772 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3773 s_rule = (struct ice_aqc_sw_rules_elem *)
3774 ice_calloc(hw, num_unicast, s_rule_size);
3776 status = ICE_ERR_NO_MEMORY;
3777 goto ice_add_mac_exit;
3781 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3783 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3784 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3786 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3787 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3788 ice_aqc_opc_add_sw_rules);
3789 r_iter = (struct ice_aqc_sw_rules_elem *)
3790 ((u8 *)r_iter + s_rule_size);
3794 /* Call AQ bulk switch rule update for all unicast addresses */
3796 /* Call AQ switch rule in AQ_MAX chunk */
3797 for (total_elem_left = num_unicast; total_elem_left > 0;
3798 total_elem_left -= elem_sent) {
3799 struct ice_aqc_sw_rules_elem *entry = r_iter;
3801 elem_sent = MIN_T(u8, total_elem_left,
3802 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3803 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3804 elem_sent, ice_aqc_opc_add_sw_rules,
3807 goto ice_add_mac_exit;
3808 r_iter = (struct ice_aqc_sw_rules_elem *)
3809 ((u8 *)r_iter + (elem_sent * s_rule_size));
3812 /* Fill up rule ID based on the value returned from FW */
3814 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3816 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3817 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3818 struct ice_fltr_mgmt_list_entry *fm_entry;
3820 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3821 f_info->fltr_rule_id =
3822 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3823 f_info->fltr_act = ICE_FWD_TO_VSI;
3824 /* Create an entry to track this MAC address */
3825 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3826 ice_malloc(hw, sizeof(*fm_entry));
3828 status = ICE_ERR_NO_MEMORY;
3829 goto ice_add_mac_exit;
3831 fm_entry->fltr_info = *f_info;
3832 fm_entry->vsi_count = 1;
3833 /* The book keeping entries will get removed when
3834 * base driver calls remove filter AQ command
3837 LIST_ADD(&fm_entry->list_entry, rule_head);
3838 r_iter = (struct ice_aqc_sw_rules_elem *)
3839 ((u8 *)r_iter + s_rule_size);
3844 ice_release_lock(rule_lock);
3846 ice_free(hw, s_rule);
3851 * ice_add_mac - Add a MAC address based filter rule
3852 * @hw: pointer to the hardware structure
3853 * @m_list: list of MAC addresses and forwarding information
3855 * Function add MAC rule for logical port from HW struct
3857 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3860 return ICE_ERR_PARAM;
3862 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3863 hw->port_info->lport);
3867 * ice_add_vlan_internal - Add one VLAN based filter rule
3868 * @hw: pointer to the hardware structure
3869 * @recp_list: recipe list for which rule has to be added
3870 * @f_entry: filter entry containing one VLAN information
3872 static enum ice_status
3873 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3874 struct ice_fltr_list_entry *f_entry)
3876 struct ice_fltr_mgmt_list_entry *v_list_itr;
3877 struct ice_fltr_info *new_fltr, *cur_fltr;
3878 enum ice_sw_lkup_type lkup_type;
3879 u16 vsi_list_id = 0, vsi_handle;
3880 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3881 enum ice_status status = ICE_SUCCESS;
3883 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3884 return ICE_ERR_PARAM;
3886 f_entry->fltr_info.fwd_id.hw_vsi_id =
3887 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3888 new_fltr = &f_entry->fltr_info;
3890 /* VLAN ID should only be 12 bits */
3891 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3892 return ICE_ERR_PARAM;
3894 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3895 return ICE_ERR_PARAM;
3897 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3898 lkup_type = new_fltr->lkup_type;
3899 vsi_handle = new_fltr->vsi_handle;
3900 rule_lock = &recp_list->filt_rule_lock;
3901 ice_acquire_lock(rule_lock);
3902 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3904 struct ice_vsi_list_map_info *map_info = NULL;
3906 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3907 /* All VLAN pruning rules use a VSI list. Check if
3908 * there is already a VSI list containing VSI that we
3909 * want to add. If found, use the same vsi_list_id for
3910 * this new VLAN rule or else create a new list.
3912 map_info = ice_find_vsi_list_entry(recp_list,
3916 status = ice_create_vsi_list_rule(hw,
3924 /* Convert the action to forwarding to a VSI list. */
3925 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3926 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3929 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3931 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3934 status = ICE_ERR_DOES_NOT_EXIST;
3937 /* reuse VSI list for new rule and increment ref_cnt */
3939 v_list_itr->vsi_list_info = map_info;
3940 map_info->ref_cnt++;
3942 v_list_itr->vsi_list_info =
3943 ice_create_vsi_list_map(hw, &vsi_handle,
3947 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3948 /* Update existing VSI list to add new VSI ID only if it used
3951 cur_fltr = &v_list_itr->fltr_info;
3952 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3955 /* If VLAN rule exists and VSI list being used by this rule is
3956 * referenced by more than 1 VLAN rule. Then create a new VSI
3957 * list appending previous VSI with new VSI and update existing
3958 * VLAN rule to point to new VSI list ID
3960 struct ice_fltr_info tmp_fltr;
3961 u16 vsi_handle_arr[2];
3964 /* Current implementation only supports reusing VSI list with
3965 * one VSI count. We should never hit below condition
3967 if (v_list_itr->vsi_count > 1 &&
3968 v_list_itr->vsi_list_info->ref_cnt > 1) {
3969 ice_debug(hw, ICE_DBG_SW,
3970 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3971 status = ICE_ERR_CFG;
3976 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3979 /* A rule already exists with the new VSI being added */
3980 if (cur_handle == vsi_handle) {
3981 status = ICE_ERR_ALREADY_EXISTS;
3985 vsi_handle_arr[0] = cur_handle;
3986 vsi_handle_arr[1] = vsi_handle;
3987 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3988 &vsi_list_id, lkup_type);
3992 tmp_fltr = v_list_itr->fltr_info;
3993 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3994 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3995 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3996 /* Update the previous switch rule to a new VSI list which
3997 * includes current VSI that is requested
3999 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4003 /* before overriding VSI list map info. decrement ref_cnt of
4006 v_list_itr->vsi_list_info->ref_cnt--;
4008 /* now update to newly created list */
4009 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4010 v_list_itr->vsi_list_info =
4011 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4013 v_list_itr->vsi_count++;
4017 ice_release_lock(rule_lock);
4022 * ice_add_vlan_rule - Add VLAN based filter rule
4023 * @hw: pointer to the hardware structure
4024 * @v_list: list of VLAN entries and forwarding information
4025 * @sw: pointer to switch info struct for which function add rule
4027 static enum ice_status
4028 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4029 struct ice_switch_info *sw)
4031 struct ice_fltr_list_entry *v_list_itr;
4032 struct ice_sw_recipe *recp_list;
4034 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4035 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4037 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4038 return ICE_ERR_PARAM;
4039 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4040 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4042 if (v_list_itr->status)
4043 return v_list_itr->status;
4049 * ice_add_vlan - Add a VLAN based filter rule
4050 * @hw: pointer to the hardware structure
4051 * @v_list: list of VLAN and forwarding information
4053 * Function add VLAN rule for logical port from HW struct
4055 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4058 return ICE_ERR_PARAM;
4060 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4064 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4065 * @hw: pointer to the hardware structure
4066 * @mv_list: list of MAC and VLAN filters
4067 * @sw: pointer to switch info struct for which function add rule
4068 * @lport: logic port number on which function add rule
4070 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4071 * pruning bits enabled, then it is the responsibility of the caller to make
4072 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4073 * VLAN won't be received on that VSI otherwise.
4075 static enum ice_status
4076 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4077 struct ice_switch_info *sw, u8 lport)
4079 struct ice_fltr_list_entry *mv_list_itr;
4080 struct ice_sw_recipe *recp_list;
4082 if (!mv_list || !hw)
4083 return ICE_ERR_PARAM;
4085 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4086 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4088 enum ice_sw_lkup_type l_type =
4089 mv_list_itr->fltr_info.lkup_type;
4091 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4092 return ICE_ERR_PARAM;
4093 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4094 mv_list_itr->status =
4095 ice_add_rule_internal(hw, recp_list, lport,
4097 if (mv_list_itr->status)
4098 return mv_list_itr->status;
4104 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4105 * @hw: pointer to the hardware structure
4106 * @mv_list: list of MAC VLAN addresses and forwarding information
4108 * Function add MAC VLAN rule for logical port from HW struct
4111 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4113 if (!mv_list || !hw)
4114 return ICE_ERR_PARAM;
4116 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4117 hw->port_info->lport);
4121 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4122 * @hw: pointer to the hardware structure
4123 * @em_list: list of ether type MAC filter, MAC is optional
4124 * @sw: pointer to switch info struct for which function add rule
4125 * @lport: logic port number on which function add rule
4127 * This function requires the caller to populate the entries in
4128 * the filter list with the necessary fields (including flags to
4129 * indicate Tx or Rx rules).
4131 static enum ice_status
4132 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4133 struct ice_switch_info *sw, u8 lport)
4135 struct ice_fltr_list_entry *em_list_itr;
4137 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4139 struct ice_sw_recipe *recp_list;
4140 enum ice_sw_lkup_type l_type;
4142 l_type = em_list_itr->fltr_info.lkup_type;
4143 recp_list = &sw->recp_list[l_type];
4145 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4146 l_type != ICE_SW_LKUP_ETHERTYPE)
4147 return ICE_ERR_PARAM;
4149 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4152 if (em_list_itr->status)
4153 return em_list_itr->status;
4159 * ice_add_eth_mac - Add a ethertype based filter rule
4160 * @hw: pointer to the hardware structure
4161 * @em_list: list of ethertype and forwarding information
4163 * Function add ethertype rule for logical port from HW struct
4166 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4168 if (!em_list || !hw)
4169 return ICE_ERR_PARAM;
4171 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4172 hw->port_info->lport);
4176 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4177 * @hw: pointer to the hardware structure
4178 * @em_list: list of ethertype or ethertype MAC entries
4179 * @sw: pointer to switch info struct for which function add rule
4181 static enum ice_status
4182 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4183 struct ice_switch_info *sw)
4185 struct ice_fltr_list_entry *em_list_itr, *tmp;
4187 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4189 struct ice_sw_recipe *recp_list;
4190 enum ice_sw_lkup_type l_type;
4192 l_type = em_list_itr->fltr_info.lkup_type;
4194 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4195 l_type != ICE_SW_LKUP_ETHERTYPE)
4196 return ICE_ERR_PARAM;
4198 recp_list = &sw->recp_list[l_type];
4199 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4201 if (em_list_itr->status)
4202 return em_list_itr->status;
4208 * ice_remove_eth_mac - remove a ethertype based filter rule
4209 * @hw: pointer to the hardware structure
4210 * @em_list: list of ethertype and forwarding information
4214 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4216 if (!em_list || !hw)
4217 return ICE_ERR_PARAM;
4219 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4223 * ice_rem_sw_rule_info
4224 * @hw: pointer to the hardware structure
4225 * @rule_head: pointer to the switch list structure that we want to delete
4228 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4230 if (!LIST_EMPTY(rule_head)) {
4231 struct ice_fltr_mgmt_list_entry *entry;
4232 struct ice_fltr_mgmt_list_entry *tmp;
4234 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4235 ice_fltr_mgmt_list_entry, list_entry) {
4236 LIST_DEL(&entry->list_entry);
4237 ice_free(hw, entry);
4243 * ice_rem_adv_rule_info
4244 * @hw: pointer to the hardware structure
4245 * @rule_head: pointer to the switch list structure that we want to delete
4248 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4250 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4251 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4253 if (LIST_EMPTY(rule_head))
4256 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4257 ice_adv_fltr_mgmt_list_entry, list_entry) {
4258 LIST_DEL(&lst_itr->list_entry);
4259 ice_free(hw, lst_itr->lkups);
4260 ice_free(hw, lst_itr);
4265 * ice_rem_all_sw_rules_info
4266 * @hw: pointer to the hardware structure
4268 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4270 struct ice_switch_info *sw = hw->switch_info;
4273 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4274 struct LIST_HEAD_TYPE *rule_head;
4276 rule_head = &sw->recp_list[i].filt_rules;
4277 if (!sw->recp_list[i].adv_rule)
4278 ice_rem_sw_rule_info(hw, rule_head);
4280 ice_rem_adv_rule_info(hw, rule_head);
4281 if (sw->recp_list[i].adv_rule &&
4282 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4283 sw->recp_list[i].adv_rule = false;
4288 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4289 * @pi: pointer to the port_info structure
4290 * @vsi_handle: VSI handle to set as default
4291 * @set: true to add the above mentioned switch rule, false to remove it
4292 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4294 * add filter rule to set/unset given VSI as default VSI for the switch
4295 * (represented by swid)
4298 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4301 struct ice_aqc_sw_rules_elem *s_rule;
4302 struct ice_fltr_info f_info;
4303 struct ice_hw *hw = pi->hw;
4304 enum ice_adminq_opc opcode;
4305 enum ice_status status;
4309 if (!ice_is_vsi_valid(hw, vsi_handle))
4310 return ICE_ERR_PARAM;
4311 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4313 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4314 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4315 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4317 return ICE_ERR_NO_MEMORY;
4319 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4321 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4322 f_info.flag = direction;
4323 f_info.fltr_act = ICE_FWD_TO_VSI;
4324 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4326 if (f_info.flag & ICE_FLTR_RX) {
4327 f_info.src = pi->lport;
4328 f_info.src_id = ICE_SRC_ID_LPORT;
4330 f_info.fltr_rule_id =
4331 pi->dflt_rx_vsi_rule_id;
4332 } else if (f_info.flag & ICE_FLTR_TX) {
4333 f_info.src_id = ICE_SRC_ID_VSI;
4334 f_info.src = hw_vsi_id;
4336 f_info.fltr_rule_id =
4337 pi->dflt_tx_vsi_rule_id;
4341 opcode = ice_aqc_opc_add_sw_rules;
4343 opcode = ice_aqc_opc_remove_sw_rules;
4345 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4347 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4348 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4351 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4353 if (f_info.flag & ICE_FLTR_TX) {
4354 pi->dflt_tx_vsi_num = hw_vsi_id;
4355 pi->dflt_tx_vsi_rule_id = index;
4356 } else if (f_info.flag & ICE_FLTR_RX) {
4357 pi->dflt_rx_vsi_num = hw_vsi_id;
4358 pi->dflt_rx_vsi_rule_id = index;
4361 if (f_info.flag & ICE_FLTR_TX) {
4362 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4363 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4364 } else if (f_info.flag & ICE_FLTR_RX) {
4365 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4366 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4371 ice_free(hw, s_rule);
4376 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4377 * @list_head: head of rule list
4378 * @f_info: rule information
4380 * Helper function to search for a unicast rule entry - this is to be used
4381 * to remove unicast MAC filter that is not shared with other VSIs on the
4384 * Returns pointer to entry storing the rule if found
4386 static struct ice_fltr_mgmt_list_entry *
4387 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4388 struct ice_fltr_info *f_info)
4390 struct ice_fltr_mgmt_list_entry *list_itr;
4392 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4394 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4395 sizeof(f_info->l_data)) &&
4396 f_info->fwd_id.hw_vsi_id ==
4397 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4398 f_info->flag == list_itr->fltr_info.flag)
4405 * ice_remove_mac_rule - remove a MAC based filter rule
4406 * @hw: pointer to the hardware structure
4407 * @m_list: list of MAC addresses and forwarding information
4408 * @recp_list: list from which function remove MAC address
4410 * This function removes either a MAC filter rule or a specific VSI from a
4411 * VSI list for a multicast MAC address.
4413 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4414 * ice_add_mac. Caller should be aware that this call will only work if all
4415 * the entries passed into m_list were added previously. It will not attempt to
4416 * do a partial remove of entries that were found.
4418 static enum ice_status
4419 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4420 struct ice_sw_recipe *recp_list)
4422 struct ice_fltr_list_entry *list_itr, *tmp;
4423 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4426 return ICE_ERR_PARAM;
4428 rule_lock = &recp_list->filt_rule_lock;
4429 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4431 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4432 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4435 if (l_type != ICE_SW_LKUP_MAC)
4436 return ICE_ERR_PARAM;
4438 vsi_handle = list_itr->fltr_info.vsi_handle;
4439 if (!ice_is_vsi_valid(hw, vsi_handle))
4440 return ICE_ERR_PARAM;
4442 list_itr->fltr_info.fwd_id.hw_vsi_id =
4443 ice_get_hw_vsi_num(hw, vsi_handle);
4444 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4445 /* Don't remove the unicast address that belongs to
4446 * another VSI on the switch, since it is not being
4449 ice_acquire_lock(rule_lock);
4450 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4451 &list_itr->fltr_info)) {
4452 ice_release_lock(rule_lock);
4453 return ICE_ERR_DOES_NOT_EXIST;
4455 ice_release_lock(rule_lock);
4457 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4459 if (list_itr->status)
4460 return list_itr->status;
4466 * ice_remove_mac - remove a MAC address based filter rule
4467 * @hw: pointer to the hardware structure
4468 * @m_list: list of MAC addresses and forwarding information
4471 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4473 struct ice_sw_recipe *recp_list;
4475 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4476 return ice_remove_mac_rule(hw, m_list, recp_list);
4480 * ice_remove_vlan_rule - Remove VLAN based filter rule
4481 * @hw: pointer to the hardware structure
4482 * @v_list: list of VLAN entries and forwarding information
4483 * @recp_list: list from which function remove VLAN
4485 static enum ice_status
4486 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4487 struct ice_sw_recipe *recp_list)
4489 struct ice_fltr_list_entry *v_list_itr, *tmp;
4491 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4493 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4495 if (l_type != ICE_SW_LKUP_VLAN)
4496 return ICE_ERR_PARAM;
4497 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4499 if (v_list_itr->status)
4500 return v_list_itr->status;
4506 * ice_remove_vlan - remove a VLAN address based filter rule
4507 * @hw: pointer to the hardware structure
4508 * @v_list: list of VLAN and forwarding information
4512 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4514 struct ice_sw_recipe *recp_list;
4517 return ICE_ERR_PARAM;
4519 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4520 return ice_remove_vlan_rule(hw, v_list, recp_list);
4524 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4525 * @hw: pointer to the hardware structure
4526 * @v_list: list of MAC VLAN entries and forwarding information
4527 * @recp_list: list from which function remove MAC VLAN
4529 static enum ice_status
4530 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4531 struct ice_sw_recipe *recp_list)
4533 struct ice_fltr_list_entry *v_list_itr, *tmp;
4535 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4536 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4538 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4540 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4541 return ICE_ERR_PARAM;
4542 v_list_itr->status =
4543 ice_remove_rule_internal(hw, recp_list,
4545 if (v_list_itr->status)
4546 return v_list_itr->status;
4552 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4553 * @hw: pointer to the hardware structure
4554 * @mv_list: list of MAC VLAN and forwarding information
4557 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4559 struct ice_sw_recipe *recp_list;
4561 if (!mv_list || !hw)
4562 return ICE_ERR_PARAM;
4564 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4565 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4569 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4570 * @fm_entry: filter entry to inspect
4571 * @vsi_handle: VSI handle to compare with filter info
4574 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4576 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4577 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4578 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4579 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4584 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4585 * @hw: pointer to the hardware structure
4586 * @vsi_handle: VSI handle to remove filters from
4587 * @vsi_list_head: pointer to the list to add entry to
4588 * @fi: pointer to fltr_info of filter entry to copy & add
4590 * Helper function, used when creating a list of filters to remove from
4591 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4592 * original filter entry, with the exception of fltr_info.fltr_act and
4593 * fltr_info.fwd_id fields. These are set such that later logic can
4594 * extract which VSI to remove the fltr from, and pass on that information.
4596 static enum ice_status
4597 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4598 struct LIST_HEAD_TYPE *vsi_list_head,
4599 struct ice_fltr_info *fi)
4601 struct ice_fltr_list_entry *tmp;
4603 /* this memory is freed up in the caller function
4604 * once filters for this VSI are removed
4606 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4608 return ICE_ERR_NO_MEMORY;
4610 tmp->fltr_info = *fi;
4612 /* Overwrite these fields to indicate which VSI to remove filter from,
4613 * so find and remove logic can extract the information from the
4614 * list entries. Note that original entries will still have proper
4617 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4618 tmp->fltr_info.vsi_handle = vsi_handle;
4619 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4621 LIST_ADD(&tmp->list_entry, vsi_list_head);
4627 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4628 * @hw: pointer to the hardware structure
4629 * @vsi_handle: VSI handle to remove filters from
4630 * @lkup_list_head: pointer to the list that has certain lookup type filters
4631 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4633 * Locates all filters in lkup_list_head that are used by the given VSI,
4634 * and adds COPIES of those entries to vsi_list_head (intended to be used
4635 * to remove the listed filters).
4636 * Note that this means all entries in vsi_list_head must be explicitly
4637 * deallocated by the caller when done with list.
4639 static enum ice_status
4640 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4641 struct LIST_HEAD_TYPE *lkup_list_head,
4642 struct LIST_HEAD_TYPE *vsi_list_head)
4644 struct ice_fltr_mgmt_list_entry *fm_entry;
4645 enum ice_status status = ICE_SUCCESS;
4647 /* check to make sure VSI ID is valid and within boundary */
4648 if (!ice_is_vsi_valid(hw, vsi_handle))
4649 return ICE_ERR_PARAM;
4651 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4652 ice_fltr_mgmt_list_entry, list_entry) {
4653 struct ice_fltr_info *fi;
4655 fi = &fm_entry->fltr_info;
4656 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4659 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4668 * ice_determine_promisc_mask
4669 * @fi: filter info to parse
4671 * Helper function to determine which ICE_PROMISC_ mask corresponds
4672 * to given filter into.
4674 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4676 u16 vid = fi->l_data.mac_vlan.vlan_id;
4677 u8 *macaddr = fi->l_data.mac.mac_addr;
4678 bool is_tx_fltr = false;
4679 u8 promisc_mask = 0;
4681 if (fi->flag == ICE_FLTR_TX)
4684 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4685 promisc_mask |= is_tx_fltr ?
4686 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4687 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4688 promisc_mask |= is_tx_fltr ?
4689 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4690 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4691 promisc_mask |= is_tx_fltr ?
4692 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4694 promisc_mask |= is_tx_fltr ?
4695 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4697 return promisc_mask;
4701 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4702 * @hw: pointer to the hardware structure
4703 * @vsi_handle: VSI handle to retrieve info from
4704 * @promisc_mask: pointer to mask to be filled in
4705 * @vid: VLAN ID of promisc VLAN VSI
4706 * @sw: pointer to switch info struct for which function add rule
4708 static enum ice_status
4709 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4710 u16 *vid, struct ice_switch_info *sw)
4712 struct ice_fltr_mgmt_list_entry *itr;
4713 struct LIST_HEAD_TYPE *rule_head;
4714 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4716 if (!ice_is_vsi_valid(hw, vsi_handle))
4717 return ICE_ERR_PARAM;
4721 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4722 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4724 ice_acquire_lock(rule_lock);
4725 LIST_FOR_EACH_ENTRY(itr, rule_head,
4726 ice_fltr_mgmt_list_entry, list_entry) {
4727 /* Continue if this filter doesn't apply to this VSI or the
4728 * VSI ID is not in the VSI map for this filter
4730 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4733 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4735 ice_release_lock(rule_lock);
4741 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4742 * @hw: pointer to the hardware structure
4743 * @vsi_handle: VSI handle to retrieve info from
4744 * @promisc_mask: pointer to mask to be filled in
4745 * @vid: VLAN ID of promisc VLAN VSI
4748 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4751 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4752 vid, hw->switch_info);
4756 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4757 * @hw: pointer to the hardware structure
4758 * @vsi_handle: VSI handle to retrieve info from
4759 * @promisc_mask: pointer to mask to be filled in
4760 * @vid: VLAN ID of promisc VLAN VSI
4761 * @sw: pointer to switch info struct for which function add rule
4763 static enum ice_status
4764 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4765 u16 *vid, struct ice_switch_info *sw)
4767 struct ice_fltr_mgmt_list_entry *itr;
4768 struct LIST_HEAD_TYPE *rule_head;
4769 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4771 if (!ice_is_vsi_valid(hw, vsi_handle))
4772 return ICE_ERR_PARAM;
4776 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4777 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4779 ice_acquire_lock(rule_lock);
4780 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4782 /* Continue if this filter doesn't apply to this VSI or the
4783 * VSI ID is not in the VSI map for this filter
4785 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4788 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4790 ice_release_lock(rule_lock);
4796 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4797 * @hw: pointer to the hardware structure
4798 * @vsi_handle: VSI handle to retrieve info from
4799 * @promisc_mask: pointer to mask to be filled in
4800 * @vid: VLAN ID of promisc VLAN VSI
4803 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4806 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4807 vid, hw->switch_info);
4811 * ice_remove_promisc - Remove promisc based filter rules
4812 * @hw: pointer to the hardware structure
4813 * @recp_id: recipe ID for which the rule needs to removed
4814 * @v_list: list of promisc entries
4816 static enum ice_status
4817 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4818 struct LIST_HEAD_TYPE *v_list)
4820 struct ice_fltr_list_entry *v_list_itr, *tmp;
4821 struct ice_sw_recipe *recp_list;
4823 recp_list = &hw->switch_info->recp_list[recp_id];
4824 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4826 v_list_itr->status =
4827 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4828 if (v_list_itr->status)
4829 return v_list_itr->status;
4835 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4836 * @hw: pointer to the hardware structure
4837 * @vsi_handle: VSI handle to clear mode
4838 * @promisc_mask: mask of promiscuous config bits to clear
4839 * @vid: VLAN ID to clear VLAN promiscuous
4840 * @sw: pointer to switch info struct for which function add rule
4842 static enum ice_status
4843 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4844 u16 vid, struct ice_switch_info *sw)
4846 struct ice_fltr_list_entry *fm_entry, *tmp;
4847 struct LIST_HEAD_TYPE remove_list_head;
4848 struct ice_fltr_mgmt_list_entry *itr;
4849 struct LIST_HEAD_TYPE *rule_head;
4850 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4851 enum ice_status status = ICE_SUCCESS;
4854 if (!ice_is_vsi_valid(hw, vsi_handle))
4855 return ICE_ERR_PARAM;
4857 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4858 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4860 recipe_id = ICE_SW_LKUP_PROMISC;
4862 rule_head = &sw->recp_list[recipe_id].filt_rules;
4863 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4865 INIT_LIST_HEAD(&remove_list_head);
4867 ice_acquire_lock(rule_lock);
4868 LIST_FOR_EACH_ENTRY(itr, rule_head,
4869 ice_fltr_mgmt_list_entry, list_entry) {
4870 struct ice_fltr_info *fltr_info;
4871 u8 fltr_promisc_mask = 0;
4873 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4875 fltr_info = &itr->fltr_info;
4877 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4878 vid != fltr_info->l_data.mac_vlan.vlan_id)
4881 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4883 /* Skip if filter is not completely specified by given mask */
4884 if (fltr_promisc_mask & ~promisc_mask)
4887 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4891 ice_release_lock(rule_lock);
4892 goto free_fltr_list;
4895 ice_release_lock(rule_lock);
4897 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4900 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4901 ice_fltr_list_entry, list_entry) {
4902 LIST_DEL(&fm_entry->list_entry);
4903 ice_free(hw, fm_entry);
4910 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4911 * @hw: pointer to the hardware structure
4912 * @vsi_handle: VSI handle to clear mode
4913 * @promisc_mask: mask of promiscuous config bits to clear
4914 * @vid: VLAN ID to clear VLAN promiscuous
4917 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4918 u8 promisc_mask, u16 vid)
4920 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4921 vid, hw->switch_info);
4925 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4926 * @hw: pointer to the hardware structure
4927 * @vsi_handle: VSI handle to configure
4928 * @promisc_mask: mask of promiscuous config bits
4929 * @vid: VLAN ID to set VLAN promiscuous
4930 * @lport: logical port number to configure promisc mode
4931 * @sw: pointer to switch info struct for which function add rule
4933 static enum ice_status
4934 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4935 u16 vid, u8 lport, struct ice_switch_info *sw)
4937 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4938 struct ice_fltr_list_entry f_list_entry;
4939 struct ice_fltr_info new_fltr;
4940 enum ice_status status = ICE_SUCCESS;
4946 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4948 if (!ice_is_vsi_valid(hw, vsi_handle))
4949 return ICE_ERR_PARAM;
4950 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4952 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4954 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4955 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4956 new_fltr.l_data.mac_vlan.vlan_id = vid;
4957 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4959 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4960 recipe_id = ICE_SW_LKUP_PROMISC;
4963 /* Separate filters must be set for each direction/packet type
4964 * combination, so we will loop over the mask value, store the
4965 * individual type, and clear it out in the input mask as it
4968 while (promisc_mask) {
4969 struct ice_sw_recipe *recp_list;
4975 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4976 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4977 pkt_type = UCAST_FLTR;
4978 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4979 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4980 pkt_type = UCAST_FLTR;
4982 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4983 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4984 pkt_type = MCAST_FLTR;
4985 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4986 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4987 pkt_type = MCAST_FLTR;
4989 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4990 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4991 pkt_type = BCAST_FLTR;
4992 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4993 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4994 pkt_type = BCAST_FLTR;
4998 /* Check for VLAN promiscuous flag */
4999 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5000 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5001 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5002 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5006 /* Set filter DA based on packet type */
5007 mac_addr = new_fltr.l_data.mac.mac_addr;
5008 if (pkt_type == BCAST_FLTR) {
5009 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5010 } else if (pkt_type == MCAST_FLTR ||
5011 pkt_type == UCAST_FLTR) {
5012 /* Use the dummy ether header DA */
5013 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5014 ICE_NONDMA_TO_NONDMA);
5015 if (pkt_type == MCAST_FLTR)
5016 mac_addr[0] |= 0x1; /* Set multicast bit */
5019 /* Need to reset this to zero for all iterations */
5022 new_fltr.flag |= ICE_FLTR_TX;
5023 new_fltr.src = hw_vsi_id;
5025 new_fltr.flag |= ICE_FLTR_RX;
5026 new_fltr.src = lport;
5029 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5030 new_fltr.vsi_handle = vsi_handle;
5031 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5032 f_list_entry.fltr_info = new_fltr;
5033 recp_list = &sw->recp_list[recipe_id];
5035 status = ice_add_rule_internal(hw, recp_list, lport,
5037 if (status != ICE_SUCCESS)
5038 goto set_promisc_exit;
5046 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5047 * @hw: pointer to the hardware structure
5048 * @vsi_handle: VSI handle to configure
5049 * @promisc_mask: mask of promiscuous config bits
5050 * @vid: VLAN ID to set VLAN promiscuous
5053 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5056 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5057 hw->port_info->lport,
5062 * _ice_set_vlan_vsi_promisc
5063 * @hw: pointer to the hardware structure
5064 * @vsi_handle: VSI handle to configure
5065 * @promisc_mask: mask of promiscuous config bits
5066 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5067 * @lport: logical port number to configure promisc mode
5068 * @sw: pointer to switch info struct for which function add rule
5070 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5072 static enum ice_status
5073 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5074 bool rm_vlan_promisc, u8 lport,
5075 struct ice_switch_info *sw)
5077 struct ice_fltr_list_entry *list_itr, *tmp;
5078 struct LIST_HEAD_TYPE vsi_list_head;
5079 struct LIST_HEAD_TYPE *vlan_head;
5080 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5081 enum ice_status status;
5084 INIT_LIST_HEAD(&vsi_list_head);
5085 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5086 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5087 ice_acquire_lock(vlan_lock);
5088 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5090 ice_release_lock(vlan_lock);
5092 goto free_fltr_list;
5094 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5096 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5097 if (rm_vlan_promisc)
5098 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5102 status = _ice_set_vsi_promisc(hw, vsi_handle,
5103 promisc_mask, vlan_id,
5110 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5111 ice_fltr_list_entry, list_entry) {
5112 LIST_DEL(&list_itr->list_entry);
5113 ice_free(hw, list_itr);
5119 * ice_set_vlan_vsi_promisc
5120 * @hw: pointer to the hardware structure
5121 * @vsi_handle: VSI handle to configure
5122 * @promisc_mask: mask of promiscuous config bits
5123 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5125 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5128 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5129 bool rm_vlan_promisc)
5131 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5132 rm_vlan_promisc, hw->port_info->lport,
5137 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5138 * @hw: pointer to the hardware structure
5139 * @vsi_handle: VSI handle to remove filters from
5140 * @recp_list: recipe list from which function remove fltr
5141 * @lkup: switch rule filter lookup type
5144 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5145 struct ice_sw_recipe *recp_list,
5146 enum ice_sw_lkup_type lkup)
5148 struct ice_fltr_list_entry *fm_entry;
5149 struct LIST_HEAD_TYPE remove_list_head;
5150 struct LIST_HEAD_TYPE *rule_head;
5151 struct ice_fltr_list_entry *tmp;
5152 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5153 enum ice_status status;
5155 INIT_LIST_HEAD(&remove_list_head);
5156 rule_lock = &recp_list[lkup].filt_rule_lock;
5157 rule_head = &recp_list[lkup].filt_rules;
5158 ice_acquire_lock(rule_lock);
5159 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5161 ice_release_lock(rule_lock);
5166 case ICE_SW_LKUP_MAC:
5167 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5169 case ICE_SW_LKUP_VLAN:
5170 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5172 case ICE_SW_LKUP_PROMISC:
5173 case ICE_SW_LKUP_PROMISC_VLAN:
5174 ice_remove_promisc(hw, lkup, &remove_list_head);
5176 case ICE_SW_LKUP_MAC_VLAN:
5177 ice_remove_mac_vlan(hw, &remove_list_head);
5179 case ICE_SW_LKUP_ETHERTYPE:
5180 case ICE_SW_LKUP_ETHERTYPE_MAC:
5181 ice_remove_eth_mac(hw, &remove_list_head);
5183 case ICE_SW_LKUP_DFLT:
5184 ice_debug(hw, ICE_DBG_SW,
5185 "Remove filters for this lookup type hasn't been implemented yet\n");
5187 case ICE_SW_LKUP_LAST:
5188 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5192 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5193 ice_fltr_list_entry, list_entry) {
5194 LIST_DEL(&fm_entry->list_entry);
5195 ice_free(hw, fm_entry);
5200 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5201 * @hw: pointer to the hardware structure
5202 * @vsi_handle: VSI handle to remove filters from
5203 * @sw: pointer to switch info struct
5206 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5207 struct ice_switch_info *sw)
5209 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5211 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5212 sw->recp_list, ICE_SW_LKUP_MAC);
5213 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5214 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5215 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5216 sw->recp_list, ICE_SW_LKUP_PROMISC);
5217 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5218 sw->recp_list, ICE_SW_LKUP_VLAN);
5219 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5220 sw->recp_list, ICE_SW_LKUP_DFLT);
5221 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5222 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5223 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5224 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5225 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5226 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5230 * ice_remove_vsi_fltr - Remove all filters for a VSI
5231 * @hw: pointer to the hardware structure
5232 * @vsi_handle: VSI handle to remove filters from
5234 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5236 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5240 * ice_alloc_res_cntr - allocating resource counter
5241 * @hw: pointer to the hardware structure
5242 * @type: type of resource
5243 * @alloc_shared: if set it is shared else dedicated
5244 * @num_items: number of entries requested for FD resource type
5245 * @counter_id: counter index returned by AQ call
5248 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5251 struct ice_aqc_alloc_free_res_elem *buf;
5252 enum ice_status status;
5255 /* Allocate resource */
5256 buf_len = sizeof(*buf);
5257 buf = (struct ice_aqc_alloc_free_res_elem *)
5258 ice_malloc(hw, buf_len);
5260 return ICE_ERR_NO_MEMORY;
5262 buf->num_elems = CPU_TO_LE16(num_items);
5263 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5264 ICE_AQC_RES_TYPE_M) | alloc_shared);
5266 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5267 ice_aqc_opc_alloc_res, NULL);
5271 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5279 * ice_free_res_cntr - free resource counter
5280 * @hw: pointer to the hardware structure
5281 * @type: type of resource
5282 * @alloc_shared: if set it is shared else dedicated
5283 * @num_items: number of entries to be freed for FD resource type
5284 * @counter_id: counter ID resource which needs to be freed
5287 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5290 struct ice_aqc_alloc_free_res_elem *buf;
5291 enum ice_status status;
5295 buf_len = sizeof(*buf);
5296 buf = (struct ice_aqc_alloc_free_res_elem *)
5297 ice_malloc(hw, buf_len);
5299 return ICE_ERR_NO_MEMORY;
5301 buf->num_elems = CPU_TO_LE16(num_items);
5302 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5303 ICE_AQC_RES_TYPE_M) | alloc_shared);
5304 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5306 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5307 ice_aqc_opc_free_res, NULL);
5309 ice_debug(hw, ICE_DBG_SW,
5310 "counter resource could not be freed\n");
5317 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5318 * @hw: pointer to the hardware structure
5319 * @counter_id: returns counter index
5321 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5323 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5324 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5329 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5330 * @hw: pointer to the hardware structure
5331 * @counter_id: counter index to be freed
5333 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5335 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5336 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5341 * ice_alloc_res_lg_act - add large action resource
5342 * @hw: pointer to the hardware structure
5343 * @l_id: large action ID to fill it in
5344 * @num_acts: number of actions to hold with a large action entry
5346 static enum ice_status
5347 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5349 struct ice_aqc_alloc_free_res_elem *sw_buf;
5350 enum ice_status status;
5353 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5354 return ICE_ERR_PARAM;
5356 /* Allocate resource for large action */
5357 buf_len = sizeof(*sw_buf);
5358 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
5359 ice_malloc(hw, buf_len);
5361 return ICE_ERR_NO_MEMORY;
5363 sw_buf->num_elems = CPU_TO_LE16(1);
5365 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5366 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5367 * If num_acts is greater than 2, then use
5368 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5369 * The num_acts cannot exceed 4. This was ensured at the
5370 * beginning of the function.
5373 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5374 else if (num_acts == 2)
5375 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5377 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5379 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5380 ice_aqc_opc_alloc_res, NULL);
5382 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5384 ice_free(hw, sw_buf);
5389 * ice_add_mac_with_sw_marker - add filter with sw marker
5390 * @hw: pointer to the hardware structure
5391 * @f_info: filter info structure containing the MAC filter information
5392 * @sw_marker: sw marker to tag the Rx descriptor with
5395 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5398 struct ice_fltr_mgmt_list_entry *m_entry;
5399 struct ice_fltr_list_entry fl_info;
5400 struct ice_sw_recipe *recp_list;
5401 struct LIST_HEAD_TYPE l_head;
5402 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5403 enum ice_status ret;
5407 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5408 return ICE_ERR_PARAM;
5410 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5411 return ICE_ERR_PARAM;
5413 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5414 return ICE_ERR_PARAM;
5416 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5417 return ICE_ERR_PARAM;
5418 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5420 /* Add filter if it doesn't exist so then the adding of large
5421 * action always results in update
5424 INIT_LIST_HEAD(&l_head);
5425 fl_info.fltr_info = *f_info;
5426 LIST_ADD(&fl_info.list_entry, &l_head);
5428 entry_exists = false;
5429 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5430 hw->port_info->lport);
5431 if (ret == ICE_ERR_ALREADY_EXISTS)
5432 entry_exists = true;
5436 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5437 rule_lock = &recp_list->filt_rule_lock;
5438 ice_acquire_lock(rule_lock);
5439 /* Get the book keeping entry for the filter */
5440 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5444 /* If counter action was enabled for this rule then don't enable
5445 * sw marker large action
5447 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5448 ret = ICE_ERR_PARAM;
5452 /* if same marker was added before */
5453 if (m_entry->sw_marker_id == sw_marker) {
5454 ret = ICE_ERR_ALREADY_EXISTS;
5458 /* Allocate a hardware table entry to hold large act. Three actions
5459 * for marker based large action
5461 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5465 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5468 /* Update the switch rule to add the marker action */
5469 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5471 ice_release_lock(rule_lock);
5476 ice_release_lock(rule_lock);
5477 /* only remove entry if it did not exist previously */
5479 ret = ice_remove_mac(hw, &l_head);
5485 * ice_add_mac_with_counter - add filter with counter enabled
5486 * @hw: pointer to the hardware structure
5487 * @f_info: pointer to filter info structure containing the MAC filter
5491 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5493 struct ice_fltr_mgmt_list_entry *m_entry;
5494 struct ice_fltr_list_entry fl_info;
5495 struct ice_sw_recipe *recp_list;
5496 struct LIST_HEAD_TYPE l_head;
5497 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5498 enum ice_status ret;
5503 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5504 return ICE_ERR_PARAM;
5506 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5507 return ICE_ERR_PARAM;
5509 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5510 return ICE_ERR_PARAM;
5511 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5512 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5514 entry_exist = false;
5516 rule_lock = &recp_list->filt_rule_lock;
5518 /* Add filter if it doesn't exist so then the adding of large
5519 * action always results in update
5521 INIT_LIST_HEAD(&l_head);
5523 fl_info.fltr_info = *f_info;
5524 LIST_ADD(&fl_info.list_entry, &l_head);
5526 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5527 hw->port_info->lport);
5528 if (ret == ICE_ERR_ALREADY_EXISTS)
5533 ice_acquire_lock(rule_lock);
5534 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5536 ret = ICE_ERR_BAD_PTR;
5540 /* Don't enable counter for a filter for which sw marker was enabled */
5541 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5542 ret = ICE_ERR_PARAM;
5546 /* If a counter was already enabled then don't need to add again */
5547 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5548 ret = ICE_ERR_ALREADY_EXISTS;
5552 /* Allocate a hardware table entry to VLAN counter */
5553 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5557 /* Allocate a hardware table entry to hold large act. Two actions for
5558 * counter based large action
5560 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5564 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5567 /* Update the switch rule to add the counter action */
5568 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5570 ice_release_lock(rule_lock);
5575 ice_release_lock(rule_lock);
5576 /* only remove entry if it did not exist previously */
5578 ret = ice_remove_mac(hw, &l_head);
5583 /* This is mapping table entry that maps every word within a given protocol
5584 * structure to the real byte offset as per the specification of that
5586 * for example dst address is 3 words in ethertype header and corresponding
5587 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5588 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5589 * matching entry describing its field. This needs to be updated if new
5590 * structure is added to that union.
5592 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5593 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5594 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5595 { ICE_ETYPE_OL, { 0 } },
5596 { ICE_VLAN_OFOS, { 0, 2 } },
5597 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5598 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5599 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5600 26, 28, 30, 32, 34, 36, 38 } },
5601 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5602 26, 28, 30, 32, 34, 36, 38 } },
5603 { ICE_TCP_IL, { 0, 2 } },
5604 { ICE_UDP_OF, { 0, 2 } },
5605 { ICE_UDP_ILOS, { 0, 2 } },
5606 { ICE_SCTP_IL, { 0, 2 } },
5607 { ICE_VXLAN, { 8, 10, 12, 14 } },
5608 { ICE_GENEVE, { 8, 10, 12, 14 } },
5609 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5610 { ICE_NVGRE, { 0, 2, 4, 6 } },
5611 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5612 { ICE_PPPOE, { 0, 2, 4, 6 } },
5613 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5614 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5615 { ICE_ESP, { 0, 2, 4, 6 } },
5616 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5617 { ICE_NAT_T, { 8, 10, 12, 14 } },
5620 /* The following table describes preferred grouping of recipes.
5621 * If a recipe that needs to be programmed is a superset or matches one of the
5622 * following combinations, then the recipe needs to be chained as per the
5626 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5627 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5628 { ICE_MAC_IL, ICE_MAC_IL_HW },
5629 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5630 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5631 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5632 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5633 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5634 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5635 { ICE_TCP_IL, ICE_TCP_IL_HW },
5636 { ICE_UDP_OF, ICE_UDP_OF_HW },
5637 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5638 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5639 { ICE_VXLAN, ICE_UDP_OF_HW },
5640 { ICE_GENEVE, ICE_UDP_OF_HW },
5641 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5642 { ICE_NVGRE, ICE_GRE_OF_HW },
5643 { ICE_GTP, ICE_UDP_OF_HW },
5644 { ICE_PPPOE, ICE_PPPOE_HW },
5645 { ICE_PFCP, ICE_UDP_ILOS_HW },
5646 { ICE_L2TPV3, ICE_L2TPV3_HW },
5647 { ICE_ESP, ICE_ESP_HW },
5648 { ICE_AH, ICE_AH_HW },
5649 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5653 * ice_find_recp - find a recipe
5654 * @hw: pointer to the hardware structure
5655 * @lkup_exts: extension sequence to match
5657 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5659 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5660 enum ice_sw_tunnel_type tun_type)
5662 bool refresh_required = true;
5663 struct ice_sw_recipe *recp;
5666 /* Walk through existing recipes to find a match */
5667 recp = hw->switch_info->recp_list;
5668 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5669 /* If recipe was not created for this ID, in SW bookkeeping,
5670 * check if FW has an entry for this recipe. If the FW has an
5671 * entry update it in our SW bookkeeping and continue with the
5674 if (!recp[i].recp_created)
5675 if (ice_get_recp_frm_fw(hw,
5676 hw->switch_info->recp_list, i,
5680 /* Skip inverse action recipes */
5681 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5682 ICE_AQ_RECIPE_ACT_INV_ACT)
5685 /* if number of words we are looking for match */
5686 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5687 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5688 struct ice_fv_word *be = lkup_exts->fv_words;
5689 u16 *cr = recp[i].lkup_exts.field_mask;
5690 u16 *de = lkup_exts->field_mask;
5694 /* ar, cr, and qr are related to the recipe words, while
5695 * be, de, and pe are related to the lookup words
5697 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5698 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5700 if (ar[qr].off == be[pe].off &&
5701 ar[qr].prot_id == be[pe].prot_id &&
5703 /* Found the "pe"th word in the
5708 /* After walking through all the words in the
5709 * "i"th recipe if "p"th word was not found then
5710 * this recipe is not what we are looking for.
5711 * So break out from this loop and try the next
5714 if (qr >= recp[i].lkup_exts.n_val_words) {
5719 /* If for "i"th recipe the found was never set to false
5720 * then it means we found our match
5722 if (tun_type == recp[i].tun_type && found)
5723 return i; /* Return the recipe ID */
5726 return ICE_MAX_NUM_RECIPES;
5730 * ice_prot_type_to_id - get protocol ID from protocol type
5731 * @type: protocol type
5732 * @id: pointer to variable that will receive the ID
5734 * Returns true if found, false otherwise
5736 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5740 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5741 if (ice_prot_id_tbl[i].type == type) {
5742 *id = ice_prot_id_tbl[i].protocol_id;
5749 * ice_find_valid_words - count valid words
5750 * @rule: advanced rule with lookup information
5751 * @lkup_exts: byte offset extractions of the words that are valid
5753 * calculate valid words in a lookup rule using mask value
5756 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5757 struct ice_prot_lkup_ext *lkup_exts)
5759 u8 j, word, prot_id, ret_val;
5761 if (!ice_prot_type_to_id(rule->type, &prot_id))
5764 word = lkup_exts->n_val_words;
5766 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5767 if (((u16 *)&rule->m_u)[j] &&
5768 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5769 /* No more space to accommodate */
5770 if (word >= ICE_MAX_CHAIN_WORDS)
5772 lkup_exts->fv_words[word].off =
5773 ice_prot_ext[rule->type].offs[j];
5774 lkup_exts->fv_words[word].prot_id =
5775 ice_prot_id_tbl[rule->type].protocol_id;
5776 lkup_exts->field_mask[word] =
5777 BE16_TO_CPU(((__be16 *)&rule->m_u)[j]);
5781 ret_val = word - lkup_exts->n_val_words;
5782 lkup_exts->n_val_words = word;
5788 * ice_create_first_fit_recp_def - Create a recipe grouping
5789 * @hw: pointer to the hardware structure
5790 * @lkup_exts: an array of protocol header extractions
5791 * @rg_list: pointer to a list that stores new recipe groups
5792 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5794 * Using first fit algorithm, take all the words that are still not done
5795 * and start grouping them in 4-word groups. Each group makes up one
5798 static enum ice_status
5799 ice_create_first_fit_recp_def(struct ice_hw *hw,
5800 struct ice_prot_lkup_ext *lkup_exts,
5801 struct LIST_HEAD_TYPE *rg_list,
5804 struct ice_pref_recipe_group *grp = NULL;
5809 if (!lkup_exts->n_val_words) {
5810 struct ice_recp_grp_entry *entry;
5812 entry = (struct ice_recp_grp_entry *)
5813 ice_malloc(hw, sizeof(*entry));
5815 return ICE_ERR_NO_MEMORY;
5816 LIST_ADD(&entry->l_entry, rg_list);
5817 grp = &entry->r_group;
5819 grp->n_val_pairs = 0;
5822 /* Walk through every word in the rule to check if it is not done. If so
5823 * then this word needs to be part of a new recipe.
5825 for (j = 0; j < lkup_exts->n_val_words; j++)
5826 if (!ice_is_bit_set(lkup_exts->done, j)) {
5828 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5829 struct ice_recp_grp_entry *entry;
5831 entry = (struct ice_recp_grp_entry *)
5832 ice_malloc(hw, sizeof(*entry));
5834 return ICE_ERR_NO_MEMORY;
5835 LIST_ADD(&entry->l_entry, rg_list);
5836 grp = &entry->r_group;
5840 grp->pairs[grp->n_val_pairs].prot_id =
5841 lkup_exts->fv_words[j].prot_id;
5842 grp->pairs[grp->n_val_pairs].off =
5843 lkup_exts->fv_words[j].off;
5844 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5852 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5853 * @hw: pointer to the hardware structure
5854 * @fv_list: field vector with the extraction sequence information
5855 * @rg_list: recipe groupings with protocol-offset pairs
5857 * Helper function to fill in the field vector indices for protocol-offset
5858 * pairs. These indexes are then ultimately programmed into a recipe.
5860 static enum ice_status
5861 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5862 struct LIST_HEAD_TYPE *rg_list)
5864 struct ice_sw_fv_list_entry *fv;
5865 struct ice_recp_grp_entry *rg;
5866 struct ice_fv_word *fv_ext;
5868 if (LIST_EMPTY(fv_list))
5871 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5872 fv_ext = fv->fv_ptr->ew;
5874 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5877 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5878 struct ice_fv_word *pr;
5883 pr = &rg->r_group.pairs[i];
5884 mask = rg->r_group.mask[i];
5886 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5887 if (fv_ext[j].prot_id == pr->prot_id &&
5888 fv_ext[j].off == pr->off) {
5891 /* Store index of field vector */
5893 rg->fv_mask[i] = mask;
5897 /* Protocol/offset could not be found, caller gave an
5901 return ICE_ERR_PARAM;
5909 * ice_find_free_recp_res_idx - find free result indexes for recipe
5910 * @hw: pointer to hardware structure
5911 * @profiles: bitmap of profiles that will be associated with the new recipe
5912 * @free_idx: pointer to variable to receive the free index bitmap
5914 * The algorithm used here is:
5915 * 1. When creating a new recipe, create a set P which contains all
5916 * Profiles that will be associated with our new recipe
5918 * 2. For each Profile p in set P:
5919 * a. Add all recipes associated with Profile p into set R
5920 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5921 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5922 * i. Or just assume they all have the same possible indexes:
5924 * i.e., PossibleIndexes = 0x0000F00000000000
5926 * 3. For each Recipe r in set R:
5927 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5928 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5930 * FreeIndexes will contain the bits indicating the indexes free for use,
5931 * then the code needs to update the recipe[r].used_result_idx_bits to
5932 * indicate which indexes were selected for use by this recipe.
5935 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5936 ice_bitmap_t *free_idx)
5938 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5939 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5940 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5944 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5945 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5946 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5947 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5949 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5950 ice_set_bit(count, possible_idx);
5952 /* For each profile we are going to associate the recipe with, add the
5953 * recipes that are associated with that profile. This will give us
5954 * the set of recipes that our recipe may collide with. Also, determine
5955 * what possible result indexes are usable given this set of profiles.
5958 while (ICE_MAX_NUM_PROFILES >
5959 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5960 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5961 ICE_MAX_NUM_RECIPES);
5962 ice_and_bitmap(possible_idx, possible_idx,
5963 hw->switch_info->prof_res_bm[bit],
5968 /* For each recipe that our new recipe may collide with, determine
5969 * which indexes have been used.
5971 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5972 if (ice_is_bit_set(recipes, bit)) {
5973 ice_or_bitmap(used_idx, used_idx,
5974 hw->switch_info->recp_list[bit].res_idxs,
5978 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5980 /* return number of free indexes */
5983 while (ICE_MAX_FV_WORDS >
5984 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5993 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5994 * @hw: pointer to hardware structure
5995 * @rm: recipe management list entry
5996 * @match_tun_mask: tunnel mask that needs to be programmed
5997 * @profiles: bitmap of profiles that will be associated.
5999 static enum ice_status
6000 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6001 u16 match_tun_mask, ice_bitmap_t *profiles)
6003 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6004 struct ice_aqc_recipe_data_elem *tmp;
6005 struct ice_aqc_recipe_data_elem *buf;
6006 struct ice_recp_grp_entry *entry;
6007 enum ice_status status;
6013 /* When more than one recipe are required, another recipe is needed to
6014 * chain them together. Matching a tunnel metadata ID takes up one of
6015 * the match fields in the chaining recipe reducing the number of
6016 * chained recipes by one.
6018 /* check number of free result indices */
6019 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6020 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6022 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6023 free_res_idx, rm->n_grp_count);
6025 if (rm->n_grp_count > 1) {
6026 if (rm->n_grp_count > free_res_idx)
6027 return ICE_ERR_MAX_LIMIT;
6032 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6033 return ICE_ERR_MAX_LIMIT;
6035 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6036 ICE_MAX_NUM_RECIPES,
6039 return ICE_ERR_NO_MEMORY;
6041 buf = (struct ice_aqc_recipe_data_elem *)
6042 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6044 status = ICE_ERR_NO_MEMORY;
6048 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6049 recipe_count = ICE_MAX_NUM_RECIPES;
6050 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6052 if (status || recipe_count == 0)
6055 /* Allocate the recipe resources, and configure them according to the
6056 * match fields from protocol headers and extracted field vectors.
6058 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6059 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6062 status = ice_alloc_recipe(hw, &entry->rid);
6066 /* Clear the result index of the located recipe, as this will be
6067 * updated, if needed, later in the recipe creation process.
6069 tmp[0].content.result_indx = 0;
6071 buf[recps] = tmp[0];
6072 buf[recps].recipe_indx = (u8)entry->rid;
6073 /* if the recipe is a non-root recipe RID should be programmed
6074 * as 0 for the rules to be applied correctly.
6076 buf[recps].content.rid = 0;
6077 ice_memset(&buf[recps].content.lkup_indx, 0,
6078 sizeof(buf[recps].content.lkup_indx),
6081 /* All recipes use look-up index 0 to match switch ID. */
6082 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6083 buf[recps].content.mask[0] =
6084 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6085 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6088 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6089 buf[recps].content.lkup_indx[i] = 0x80;
6090 buf[recps].content.mask[i] = 0;
6093 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6094 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6095 buf[recps].content.mask[i + 1] =
6096 CPU_TO_LE16(entry->fv_mask[i]);
6099 if (rm->n_grp_count > 1) {
6100 /* Checks to see if there really is a valid result index
6103 if (chain_idx >= ICE_MAX_FV_WORDS) {
6104 ice_debug(hw, ICE_DBG_SW,
6105 "No chain index available\n");
6106 status = ICE_ERR_MAX_LIMIT;
6110 entry->chain_idx = chain_idx;
6111 buf[recps].content.result_indx =
6112 ICE_AQ_RECIPE_RESULT_EN |
6113 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6114 ICE_AQ_RECIPE_RESULT_DATA_M);
6115 ice_clear_bit(chain_idx, result_idx_bm);
6116 chain_idx = ice_find_first_bit(result_idx_bm,
6120 /* fill recipe dependencies */
6121 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6122 ICE_MAX_NUM_RECIPES);
6123 ice_set_bit(buf[recps].recipe_indx,
6124 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6125 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6129 if (rm->n_grp_count == 1) {
6130 rm->root_rid = buf[0].recipe_indx;
6131 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6132 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6133 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6134 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6135 sizeof(buf[0].recipe_bitmap),
6136 ICE_NONDMA_TO_NONDMA);
6138 status = ICE_ERR_BAD_PTR;
6141 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6142 * the recipe which is getting created if specified
6143 * by user. Usually any advanced switch filter, which results
6144 * into new extraction sequence, ended up creating a new recipe
6145 * of type ROOT and usually recipes are associated with profiles
6146 * Switch rule referreing newly created recipe, needs to have
6147 * either/or 'fwd' or 'join' priority, otherwise switch rule
6148 * evaluation will not happen correctly. In other words, if
6149 * switch rule to be evaluated on priority basis, then recipe
6150 * needs to have priority, otherwise it will be evaluated last.
6152 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6154 struct ice_recp_grp_entry *last_chain_entry;
6157 /* Allocate the last recipe that will chain the outcomes of the
6158 * other recipes together
6160 status = ice_alloc_recipe(hw, &rid);
6164 buf[recps].recipe_indx = (u8)rid;
6165 buf[recps].content.rid = (u8)rid;
6166 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6167 /* the new entry created should also be part of rg_list to
6168 * make sure we have complete recipe
6170 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6171 sizeof(*last_chain_entry));
6172 if (!last_chain_entry) {
6173 status = ICE_ERR_NO_MEMORY;
6176 last_chain_entry->rid = rid;
6177 ice_memset(&buf[recps].content.lkup_indx, 0,
6178 sizeof(buf[recps].content.lkup_indx),
6180 /* All recipes use look-up index 0 to match switch ID. */
6181 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6182 buf[recps].content.mask[0] =
6183 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6184 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6185 buf[recps].content.lkup_indx[i] =
6186 ICE_AQ_RECIPE_LKUP_IGNORE;
6187 buf[recps].content.mask[i] = 0;
6191 /* update r_bitmap with the recp that is used for chaining */
6192 ice_set_bit(rid, rm->r_bitmap);
6193 /* this is the recipe that chains all the other recipes so it
6194 * should not have a chaining ID to indicate the same
6196 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6197 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6199 last_chain_entry->fv_idx[i] = entry->chain_idx;
6200 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6201 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6202 ice_set_bit(entry->rid, rm->r_bitmap);
6204 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6205 if (sizeof(buf[recps].recipe_bitmap) >=
6206 sizeof(rm->r_bitmap)) {
6207 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6208 sizeof(buf[recps].recipe_bitmap),
6209 ICE_NONDMA_TO_NONDMA);
6211 status = ICE_ERR_BAD_PTR;
6214 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6216 /* To differentiate among different UDP tunnels, a meta data ID
6219 if (match_tun_mask) {
6220 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
6221 buf[recps].content.mask[i] =
6222 CPU_TO_LE16(match_tun_mask);
6226 rm->root_rid = (u8)rid;
6228 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6232 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6233 ice_release_change_lock(hw);
6237 /* Every recipe that just got created add it to the recipe
6240 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6241 struct ice_switch_info *sw = hw->switch_info;
6242 bool is_root, idx_found = false;
6243 struct ice_sw_recipe *recp;
6244 u16 idx, buf_idx = 0;
6246 /* find buffer index for copying some data */
6247 for (idx = 0; idx < rm->n_grp_count; idx++)
6248 if (buf[idx].recipe_indx == entry->rid) {
6254 status = ICE_ERR_OUT_OF_RANGE;
6258 recp = &sw->recp_list[entry->rid];
6259 is_root = (rm->root_rid == entry->rid);
6260 recp->is_root = is_root;
6262 recp->root_rid = entry->rid;
6263 recp->big_recp = (is_root && rm->n_grp_count > 1);
6265 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6266 entry->r_group.n_val_pairs *
6267 sizeof(struct ice_fv_word),
6268 ICE_NONDMA_TO_NONDMA);
6270 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6271 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6273 /* Copy non-result fv index values and masks to recipe. This
6274 * call will also update the result recipe bitmask.
6276 ice_collect_result_idx(&buf[buf_idx], recp);
6278 /* for non-root recipes, also copy to the root, this allows
6279 * easier matching of a complete chained recipe
6282 ice_collect_result_idx(&buf[buf_idx],
6283 &sw->recp_list[rm->root_rid]);
6285 recp->n_ext_words = entry->r_group.n_val_pairs;
6286 recp->chain_idx = entry->chain_idx;
6287 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6288 recp->n_grp_count = rm->n_grp_count;
6289 recp->tun_type = rm->tun_type;
6290 recp->recp_created = true;
6304 * ice_create_recipe_group - creates recipe group
6305 * @hw: pointer to hardware structure
6306 * @rm: recipe management list entry
6307 * @lkup_exts: lookup elements
6309 static enum ice_status
6310 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6311 struct ice_prot_lkup_ext *lkup_exts)
6313 enum ice_status status;
6316 rm->n_grp_count = 0;
6318 /* Create recipes for words that are marked not done by packing them
6321 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6322 &rm->rg_list, &recp_count);
6324 rm->n_grp_count += recp_count;
6325 rm->n_ext_words = lkup_exts->n_val_words;
6326 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6327 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6328 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6329 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6336 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6337 * @hw: pointer to hardware structure
6338 * @lkups: lookup elements or match criteria for the advanced recipe, one
6339 * structure per protocol header
6340 * @lkups_cnt: number of protocols
6341 * @bm: bitmap of field vectors to consider
6342 * @fv_list: pointer to a list that holds the returned field vectors
6344 static enum ice_status
6345 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6346 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6348 enum ice_status status;
6355 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6357 return ICE_ERR_NO_MEMORY;
6359 for (i = 0; i < lkups_cnt; i++)
6360 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6361 status = ICE_ERR_CFG;
6365 /* Find field vectors that include all specified protocol types */
6366 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6369 ice_free(hw, prot_ids);
6374 * ice_tun_type_match_mask - determine if tun type needs a match mask
6375 * @tun_type: tunnel type
6376 * @mask: mask to be used for the tunnel
6378 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6381 case ICE_SW_TUN_VXLAN_GPE:
6382 case ICE_SW_TUN_GENEVE:
6383 case ICE_SW_TUN_VXLAN:
6384 case ICE_SW_TUN_NVGRE:
6385 case ICE_SW_TUN_UDP:
6386 case ICE_ALL_TUNNELS:
6387 *mask = ICE_TUN_FLAG_MASK;
6390 case ICE_SW_TUN_GENEVE_VLAN:
6391 case ICE_SW_TUN_VXLAN_VLAN:
6392 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6402 * ice_add_special_words - Add words that are not protocols, such as metadata
6403 * @rinfo: other information regarding the rule e.g. priority and action info
6404 * @lkup_exts: lookup word structure
6406 static enum ice_status
6407 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6408 struct ice_prot_lkup_ext *lkup_exts)
6412 /* If this is a tunneled packet, then add recipe index to match the
6413 * tunnel bit in the packet metadata flags.
6415 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6416 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6417 u8 word = lkup_exts->n_val_words++;
6419 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6420 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6421 lkup_exts->field_mask[word] = mask;
6423 return ICE_ERR_MAX_LIMIT;
6430 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6431 * @hw: pointer to hardware structure
6432 * @rinfo: other information regarding the rule e.g. priority and action info
6433 * @bm: pointer to memory for returning the bitmap of field vectors
6436 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6439 enum ice_prof_type prof_type;
6441 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6443 switch (rinfo->tun_type) {
6445 prof_type = ICE_PROF_NON_TUN;
6447 case ICE_ALL_TUNNELS:
6448 prof_type = ICE_PROF_TUN_ALL;
6450 case ICE_SW_TUN_VXLAN_GPE:
6451 case ICE_SW_TUN_GENEVE:
6452 case ICE_SW_TUN_GENEVE_VLAN:
6453 case ICE_SW_TUN_VXLAN:
6454 case ICE_SW_TUN_VXLAN_VLAN:
6455 case ICE_SW_TUN_UDP:
6456 case ICE_SW_TUN_GTP:
6457 prof_type = ICE_PROF_TUN_UDP;
6459 case ICE_SW_TUN_NVGRE:
6460 prof_type = ICE_PROF_TUN_GRE;
6462 case ICE_SW_TUN_PPPOE:
6463 prof_type = ICE_PROF_TUN_PPPOE;
6465 case ICE_SW_TUN_PPPOE_PAY:
6466 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6468 case ICE_SW_TUN_PPPOE_IPV4:
6469 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6470 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6471 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6473 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6474 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6476 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6477 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6479 case ICE_SW_TUN_PPPOE_IPV6:
6480 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6481 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6482 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6484 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6485 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6487 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6488 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6490 case ICE_SW_TUN_PROFID_IPV6_ESP:
6491 case ICE_SW_TUN_IPV6_ESP:
6492 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6494 case ICE_SW_TUN_PROFID_IPV6_AH:
6495 case ICE_SW_TUN_IPV6_AH:
6496 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6498 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6499 case ICE_SW_TUN_IPV6_L2TPV3:
6500 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6502 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6503 case ICE_SW_TUN_IPV6_NAT_T:
6504 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6506 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6507 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6509 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6510 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6512 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6513 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6515 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6516 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6518 case ICE_SW_TUN_IPV4_NAT_T:
6519 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6521 case ICE_SW_TUN_IPV4_L2TPV3:
6522 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6524 case ICE_SW_TUN_IPV4_ESP:
6525 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6527 case ICE_SW_TUN_IPV4_AH:
6528 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6530 case ICE_SW_IPV4_TCP:
6531 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6533 case ICE_SW_IPV4_UDP:
6534 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6536 case ICE_SW_IPV6_TCP:
6537 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6539 case ICE_SW_IPV6_UDP:
6540 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6542 case ICE_SW_TUN_AND_NON_TUN:
6544 prof_type = ICE_PROF_ALL;
6548 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6552 * ice_is_prof_rule - determine if rule type is a profile rule
6553 * @type: the rule type
6555 * if the rule type is a profile rule, that means that there no field value
6556 * match required, in this case just a profile hit is required.
6558 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6561 case ICE_SW_TUN_PROFID_IPV6_ESP:
6562 case ICE_SW_TUN_PROFID_IPV6_AH:
6563 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6564 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6565 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6566 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6567 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6568 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6578 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6579 * @hw: pointer to hardware structure
6580 * @lkups: lookup elements or match criteria for the advanced recipe, one
6581 * structure per protocol header
6582 * @lkups_cnt: number of protocols
6583 * @rinfo: other information regarding the rule e.g. priority and action info
6584 * @rid: return the recipe ID of the recipe created
6586 static enum ice_status
6587 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6588 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6590 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6591 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6592 struct ice_prot_lkup_ext *lkup_exts;
6593 struct ice_recp_grp_entry *r_entry;
6594 struct ice_sw_fv_list_entry *fvit;
6595 struct ice_recp_grp_entry *r_tmp;
6596 struct ice_sw_fv_list_entry *tmp;
6597 enum ice_status status = ICE_SUCCESS;
6598 struct ice_sw_recipe *rm;
6599 u16 match_tun_mask = 0;
6603 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6604 return ICE_ERR_PARAM;
6606 lkup_exts = (struct ice_prot_lkup_ext *)
6607 ice_malloc(hw, sizeof(*lkup_exts));
6609 return ICE_ERR_NO_MEMORY;
6611 /* Determine the number of words to be matched and if it exceeds a
6612 * recipe's restrictions
6614 for (i = 0; i < lkups_cnt; i++) {
6617 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6618 status = ICE_ERR_CFG;
6619 goto err_free_lkup_exts;
6622 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6624 status = ICE_ERR_CFG;
6625 goto err_free_lkup_exts;
6629 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6631 status = ICE_ERR_NO_MEMORY;
6632 goto err_free_lkup_exts;
6635 /* Get field vectors that contain fields extracted from all the protocol
6636 * headers being programmed.
6638 INIT_LIST_HEAD(&rm->fv_list);
6639 INIT_LIST_HEAD(&rm->rg_list);
6641 /* Get bitmap of field vectors (profiles) that are compatible with the
6642 * rule request; only these will be searched in the subsequent call to
6645 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6647 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6651 /* Group match words into recipes using preferred recipe grouping
6654 status = ice_create_recipe_group(hw, rm, lkup_exts);
6658 /* For certain tunnel types it is necessary to use a metadata ID flag to
6659 * differentiate different tunnel types. A separate recipe needs to be
6660 * used for the metadata.
6662 if (ice_tun_type_match_word(rinfo->tun_type, &mask) &&
6663 rm->n_grp_count > 1)
6664 match_tun_mask = mask;
6666 /* set the recipe priority if specified */
6667 rm->priority = (u8)rinfo->priority;
6669 /* Find offsets from the field vector. Pick the first one for all the
6672 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6676 /* An empty FV list means to use all the profiles returned in the
6679 if (LIST_EMPTY(&rm->fv_list)) {
6682 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
6683 if (ice_is_bit_set(fv_bitmap, j)) {
6684 struct ice_sw_fv_list_entry *fvl;
6686 fvl = (struct ice_sw_fv_list_entry *)
6687 ice_malloc(hw, sizeof(*fvl));
6691 fvl->profile_id = j;
6692 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6696 /* get bitmap of all profiles the recipe will be associated with */
6697 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6698 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6700 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6701 ice_set_bit((u16)fvit->profile_id, profiles);
6704 /* Create any special protocol/offset pairs, such as looking at tunnel
6705 * bits by extracting metadata
6707 status = ice_add_special_words(rinfo, lkup_exts);
6709 goto err_free_lkup_exts;
6711 /* Look for a recipe which matches our requested fv / mask list */
6712 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6713 if (*rid < ICE_MAX_NUM_RECIPES)
6714 /* Success if found a recipe that match the existing criteria */
6717 rm->tun_type = rinfo->tun_type;
6718 /* Recipe we need does not exist, add a recipe */
6719 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
6723 /* Associate all the recipes created with all the profiles in the
6724 * common field vector.
6726 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6728 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6731 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6732 (u8 *)r_bitmap, NULL);
6736 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6737 ICE_MAX_NUM_RECIPES);
6738 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6742 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6745 ice_release_change_lock(hw);
6750 /* Update profile to recipe bitmap array */
6751 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6752 ICE_MAX_NUM_RECIPES);
6754 /* Update recipe to profile bitmap array */
6755 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
6756 if (ice_is_bit_set(r_bitmap, j))
6757 ice_set_bit((u16)fvit->profile_id,
6758 recipe_to_profile[j]);
6761 *rid = rm->root_rid;
6762 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6763 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6765 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6766 ice_recp_grp_entry, l_entry) {
6767 LIST_DEL(&r_entry->l_entry);
6768 ice_free(hw, r_entry);
6771 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6773 LIST_DEL(&fvit->list_entry);
6778 ice_free(hw, rm->root_buf);
6783 ice_free(hw, lkup_exts);
6789 * ice_find_dummy_packet - find dummy packet by tunnel type
6791 * @lkups: lookup elements or match criteria for the advanced recipe, one
6792 * structure per protocol header
6793 * @lkups_cnt: number of protocols
6794 * @tun_type: tunnel type from the match criteria
6795 * @pkt: dummy packet to fill according to filter match criteria
6796 * @pkt_len: packet length of dummy packet
6797 * @offsets: pointer to receive the pointer to the offsets for the packet
6800 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6801 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6803 const struct ice_dummy_pkt_offsets **offsets)
6805 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6809 for (i = 0; i < lkups_cnt; i++) {
6810 if (lkups[i].type == ICE_UDP_ILOS)
6812 else if (lkups[i].type == ICE_TCP_IL)
6814 else if (lkups[i].type == ICE_IPV6_OFOS)
6816 else if (lkups[i].type == ICE_VLAN_OFOS)
6818 else if (lkups[i].type == ICE_IPV4_OFOS &&
6819 lkups[i].h_u.ipv4_hdr.protocol ==
6820 ICE_IPV4_NVGRE_PROTO_ID &&
6821 lkups[i].m_u.ipv4_hdr.protocol ==
6824 else if (lkups[i].type == ICE_PPPOE &&
6825 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6826 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6827 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6830 else if (lkups[i].type == ICE_ETYPE_OL &&
6831 lkups[i].h_u.ethertype.ethtype_id ==
6832 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6833 lkups[i].m_u.ethertype.ethtype_id ==
6836 else if (lkups[i].type == ICE_IPV4_IL &&
6837 lkups[i].h_u.ipv4_hdr.protocol ==
6839 lkups[i].m_u.ipv4_hdr.protocol ==
6844 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6845 *pkt = dummy_ipv4_esp_pkt;
6846 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6847 *offsets = dummy_ipv4_esp_packet_offsets;
6851 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6852 *pkt = dummy_ipv6_esp_pkt;
6853 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6854 *offsets = dummy_ipv6_esp_packet_offsets;
6858 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6859 *pkt = dummy_ipv4_ah_pkt;
6860 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6861 *offsets = dummy_ipv4_ah_packet_offsets;
6865 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6866 *pkt = dummy_ipv6_ah_pkt;
6867 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6868 *offsets = dummy_ipv6_ah_packet_offsets;
6872 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6873 *pkt = dummy_ipv4_nat_pkt;
6874 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6875 *offsets = dummy_ipv4_nat_packet_offsets;
6879 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6880 *pkt = dummy_ipv6_nat_pkt;
6881 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6882 *offsets = dummy_ipv6_nat_packet_offsets;
6886 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6887 *pkt = dummy_ipv4_l2tpv3_pkt;
6888 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6889 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6893 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6894 *pkt = dummy_ipv6_l2tpv3_pkt;
6895 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6896 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6900 if (tun_type == ICE_SW_TUN_GTP) {
6901 *pkt = dummy_udp_gtp_packet;
6902 *pkt_len = sizeof(dummy_udp_gtp_packet);
6903 *offsets = dummy_udp_gtp_packet_offsets;
6907 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6908 *pkt = dummy_pppoe_ipv6_packet;
6909 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6910 *offsets = dummy_pppoe_packet_offsets;
6912 } else if (tun_type == ICE_SW_TUN_PPPOE ||
6913 tun_type == ICE_SW_TUN_PPPOE_PAY) {
6914 *pkt = dummy_pppoe_ipv4_packet;
6915 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6916 *offsets = dummy_pppoe_packet_offsets;
6920 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
6921 *pkt = dummy_pppoe_ipv4_packet;
6922 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6923 *offsets = dummy_pppoe_packet_ipv4_offsets;
6927 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
6928 *pkt = dummy_pppoe_ipv4_tcp_packet;
6929 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
6930 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
6934 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
6935 *pkt = dummy_pppoe_ipv4_udp_packet;
6936 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
6937 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
6941 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
6942 *pkt = dummy_pppoe_ipv6_packet;
6943 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6944 *offsets = dummy_pppoe_packet_ipv6_offsets;
6948 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
6949 *pkt = dummy_pppoe_ipv6_tcp_packet;
6950 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
6951 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
6955 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
6956 *pkt = dummy_pppoe_ipv6_udp_packet;
6957 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
6958 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
6962 if (tun_type == ICE_SW_IPV4_TCP) {
6963 *pkt = dummy_tcp_packet;
6964 *pkt_len = sizeof(dummy_tcp_packet);
6965 *offsets = dummy_tcp_packet_offsets;
6969 if (tun_type == ICE_SW_IPV4_UDP) {
6970 *pkt = dummy_udp_packet;
6971 *pkt_len = sizeof(dummy_udp_packet);
6972 *offsets = dummy_udp_packet_offsets;
6976 if (tun_type == ICE_SW_IPV6_TCP) {
6977 *pkt = dummy_tcp_ipv6_packet;
6978 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6979 *offsets = dummy_tcp_ipv6_packet_offsets;
6983 if (tun_type == ICE_SW_IPV6_UDP) {
6984 *pkt = dummy_udp_ipv6_packet;
6985 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6986 *offsets = dummy_udp_ipv6_packet_offsets;
6990 if (tun_type == ICE_ALL_TUNNELS) {
6991 *pkt = dummy_gre_udp_packet;
6992 *pkt_len = sizeof(dummy_gre_udp_packet);
6993 *offsets = dummy_gre_udp_packet_offsets;
6997 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6999 *pkt = dummy_gre_tcp_packet;
7000 *pkt_len = sizeof(dummy_gre_tcp_packet);
7001 *offsets = dummy_gre_tcp_packet_offsets;
7005 *pkt = dummy_gre_udp_packet;
7006 *pkt_len = sizeof(dummy_gre_udp_packet);
7007 *offsets = dummy_gre_udp_packet_offsets;
7011 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7012 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7013 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7014 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7016 *pkt = dummy_udp_tun_tcp_packet;
7017 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7018 *offsets = dummy_udp_tun_tcp_packet_offsets;
7022 *pkt = dummy_udp_tun_udp_packet;
7023 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7024 *offsets = dummy_udp_tun_udp_packet_offsets;
7030 *pkt = dummy_vlan_udp_packet;
7031 *pkt_len = sizeof(dummy_vlan_udp_packet);
7032 *offsets = dummy_vlan_udp_packet_offsets;
7035 *pkt = dummy_udp_packet;
7036 *pkt_len = sizeof(dummy_udp_packet);
7037 *offsets = dummy_udp_packet_offsets;
7039 } else if (udp && ipv6) {
7041 *pkt = dummy_vlan_udp_ipv6_packet;
7042 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7043 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7046 *pkt = dummy_udp_ipv6_packet;
7047 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7048 *offsets = dummy_udp_ipv6_packet_offsets;
7050 } else if ((tcp && ipv6) || ipv6) {
7052 *pkt = dummy_vlan_tcp_ipv6_packet;
7053 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7054 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7057 *pkt = dummy_tcp_ipv6_packet;
7058 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7059 *offsets = dummy_tcp_ipv6_packet_offsets;
7064 *pkt = dummy_vlan_tcp_packet;
7065 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7066 *offsets = dummy_vlan_tcp_packet_offsets;
7068 *pkt = dummy_tcp_packet;
7069 *pkt_len = sizeof(dummy_tcp_packet);
7070 *offsets = dummy_tcp_packet_offsets;
7075 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7077 * @lkups: lookup elements or match criteria for the advanced recipe, one
7078 * structure per protocol header
7079 * @lkups_cnt: number of protocols
7080 * @s_rule: stores rule information from the match criteria
7081 * @dummy_pkt: dummy packet to fill according to filter match criteria
7082 * @pkt_len: packet length of dummy packet
7083 * @offsets: offset info for the dummy packet
7085 static enum ice_status
7086 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7087 struct ice_aqc_sw_rules_elem *s_rule,
7088 const u8 *dummy_pkt, u16 pkt_len,
7089 const struct ice_dummy_pkt_offsets *offsets)
7094 /* Start with a packet with a pre-defined/dummy content. Then, fill
7095 * in the header values to be looked up or matched.
7097 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7099 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7101 for (i = 0; i < lkups_cnt; i++) {
7102 enum ice_protocol_type type;
7103 u16 offset = 0, len = 0, j;
7106 /* find the start of this layer; it should be found since this
7107 * was already checked when search for the dummy packet
7109 type = lkups[i].type;
7110 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7111 if (type == offsets[j].type) {
7112 offset = offsets[j].offset;
7117 /* this should never happen in a correct calling sequence */
7119 return ICE_ERR_PARAM;
7121 switch (lkups[i].type) {
7124 len = sizeof(struct ice_ether_hdr);
7127 len = sizeof(struct ice_ethtype_hdr);
7130 len = sizeof(struct ice_vlan_hdr);
7134 len = sizeof(struct ice_ipv4_hdr);
7138 len = sizeof(struct ice_ipv6_hdr);
7143 len = sizeof(struct ice_l4_hdr);
7146 len = sizeof(struct ice_sctp_hdr);
7149 len = sizeof(struct ice_nvgre);
7154 len = sizeof(struct ice_udp_tnl_hdr);
7158 len = sizeof(struct ice_udp_gtp_hdr);
7161 len = sizeof(struct ice_pppoe_hdr);
7164 len = sizeof(struct ice_esp_hdr);
7167 len = sizeof(struct ice_nat_t_hdr);
7170 len = sizeof(struct ice_ah_hdr);
7173 len = sizeof(struct ice_l2tpv3_sess_hdr);
7176 return ICE_ERR_PARAM;
7179 /* the length should be a word multiple */
7180 if (len % ICE_BYTES_PER_WORD)
7183 /* We have the offset to the header start, the length, the
7184 * caller's header values and mask. Use this information to
7185 * copy the data into the dummy packet appropriately based on
7186 * the mask. Note that we need to only write the bits as
7187 * indicated by the mask to make sure we don't improperly write
7188 * over any significant packet data.
7190 for (j = 0; j < len / sizeof(u16); j++)
7191 if (((u16 *)&lkups[i].m_u)[j])
7192 ((u16 *)(pkt + offset))[j] =
7193 (((u16 *)(pkt + offset))[j] &
7194 ~((u16 *)&lkups[i].m_u)[j]) |
7195 (((u16 *)&lkups[i].h_u)[j] &
7196 ((u16 *)&lkups[i].m_u)[j]);
7199 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7205 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7206 * @hw: pointer to the hardware structure
7207 * @tun_type: tunnel type
7208 * @pkt: dummy packet to fill in
7209 * @offsets: offset info for the dummy packet
7211 static enum ice_status
7212 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7213 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7218 case ICE_SW_TUN_AND_NON_TUN:
7219 case ICE_SW_TUN_VXLAN_GPE:
7220 case ICE_SW_TUN_VXLAN:
7221 case ICE_SW_TUN_VXLAN_VLAN:
7222 case ICE_SW_TUN_UDP:
7223 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7227 case ICE_SW_TUN_GENEVE:
7228 case ICE_SW_TUN_GENEVE_VLAN:
7229 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7234 /* Nothing needs to be done for this tunnel type */
7238 /* Find the outer UDP protocol header and insert the port number */
7239 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7240 if (offsets[i].type == ICE_UDP_OF) {
7241 struct ice_l4_hdr *hdr;
7244 offset = offsets[i].offset;
7245 hdr = (struct ice_l4_hdr *)&pkt[offset];
7246 hdr->dst_port = CPU_TO_BE16(open_port);
7256 * ice_find_adv_rule_entry - Search a rule entry
7257 * @hw: pointer to the hardware structure
7258 * @lkups: lookup elements or match criteria for the advanced recipe, one
7259 * structure per protocol header
7260 * @lkups_cnt: number of protocols
7261 * @recp_id: recipe ID for which we are finding the rule
7262 * @rinfo: other information regarding the rule e.g. priority and action info
7264 * Helper function to search for a given advance rule entry
7265 * Returns pointer to entry storing the rule if found
7267 static struct ice_adv_fltr_mgmt_list_entry *
7268 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7269 u16 lkups_cnt, u16 recp_id,
7270 struct ice_adv_rule_info *rinfo)
7272 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7273 struct ice_switch_info *sw = hw->switch_info;
7276 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7277 ice_adv_fltr_mgmt_list_entry, list_entry) {
7278 bool lkups_matched = true;
7280 if (lkups_cnt != list_itr->lkups_cnt)
7282 for (i = 0; i < list_itr->lkups_cnt; i++)
7283 if (memcmp(&list_itr->lkups[i], &lkups[i],
7285 lkups_matched = false;
7288 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7289 rinfo->tun_type == list_itr->rule_info.tun_type &&
7297 * ice_adv_add_update_vsi_list
7298 * @hw: pointer to the hardware structure
7299 * @m_entry: pointer to current adv filter management list entry
7300 * @cur_fltr: filter information from the book keeping entry
7301 * @new_fltr: filter information with the new VSI to be added
7303 * Call AQ command to add or update previously created VSI list with new VSI.
7305 * Helper function to do book keeping associated with adding filter information
7306 * The algorithm to do the booking keeping is described below :
7307 * When a VSI needs to subscribe to a given advanced filter
7308 * if only one VSI has been added till now
7309 * Allocate a new VSI list and add two VSIs
7310 * to this list using switch rule command
7311 * Update the previously created switch rule with the
7312 * newly created VSI list ID
7313 * if a VSI list was previously created
7314 * Add the new VSI to the previously created VSI list set
7315 * using the update switch rule command
7317 static enum ice_status
7318 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7319 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7320 struct ice_adv_rule_info *cur_fltr,
7321 struct ice_adv_rule_info *new_fltr)
7323 enum ice_status status;
7324 u16 vsi_list_id = 0;
7326 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7327 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7328 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7329 return ICE_ERR_NOT_IMPL;
7331 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7332 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7333 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7334 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7335 return ICE_ERR_NOT_IMPL;
7337 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7338 /* Only one entry existed in the mapping and it was not already
7339 * a part of a VSI list. So, create a VSI list with the old and
7342 struct ice_fltr_info tmp_fltr;
7343 u16 vsi_handle_arr[2];
7345 /* A rule already exists with the new VSI being added */
7346 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7347 new_fltr->sw_act.fwd_id.hw_vsi_id)
7348 return ICE_ERR_ALREADY_EXISTS;
7350 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7351 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7352 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7358 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7359 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7360 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7361 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7362 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7363 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7365 /* Update the previous switch rule of "forward to VSI" to
7368 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7372 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7373 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7374 m_entry->vsi_list_info =
7375 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7378 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7380 if (!m_entry->vsi_list_info)
7383 /* A rule already exists with the new VSI being added */
7384 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7387 /* Update the previously created VSI list set with
7388 * the new VSI ID passed in
7390 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7392 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7394 ice_aqc_opc_update_sw_rules,
7396 /* update VSI list mapping info with new VSI ID */
7398 ice_set_bit(vsi_handle,
7399 m_entry->vsi_list_info->vsi_map);
7402 m_entry->vsi_count++;
7407 * ice_add_adv_rule - helper function to create an advanced switch rule
7408 * @hw: pointer to the hardware structure
7409 * @lkups: information on the words that needs to be looked up. All words
7410 * together makes one recipe
7411 * @lkups_cnt: num of entries in the lkups array
7412 * @rinfo: other information related to the rule that needs to be programmed
7413 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7414 * ignored is case of error.
7416 * This function can program only 1 rule at a time. The lkups is used to
7417 * describe the all the words that forms the "lookup" portion of the recipe.
7418 * These words can span multiple protocols. Callers to this function need to
7419 * pass in a list of protocol headers with lookup information along and mask
7420 * that determines which words are valid from the given protocol header.
7421 * rinfo describes other information related to this rule such as forwarding
7422 * IDs, priority of this rule, etc.
7425 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7426 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7427 struct ice_rule_query_data *added_entry)
7429 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7430 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7431 const struct ice_dummy_pkt_offsets *pkt_offsets;
7432 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7433 struct LIST_HEAD_TYPE *rule_head;
7434 struct ice_switch_info *sw;
7435 enum ice_status status;
7436 const u8 *pkt = NULL;
7442 /* Initialize profile to result index bitmap */
7443 if (!hw->switch_info->prof_res_bm_init) {
7444 hw->switch_info->prof_res_bm_init = 1;
7445 ice_init_prof_result_bm(hw);
7448 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7449 if (!prof_rule && !lkups_cnt)
7450 return ICE_ERR_PARAM;
7452 /* get # of words we need to match */
7454 for (i = 0; i < lkups_cnt; i++) {
7457 ptr = (u16 *)&lkups[i].m_u;
7458 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7464 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7465 return ICE_ERR_PARAM;
7467 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7468 return ICE_ERR_PARAM;
7471 /* make sure that we can locate a dummy packet */
7472 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7475 status = ICE_ERR_PARAM;
7476 goto err_ice_add_adv_rule;
7479 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7480 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7481 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7482 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7485 vsi_handle = rinfo->sw_act.vsi_handle;
7486 if (!ice_is_vsi_valid(hw, vsi_handle))
7487 return ICE_ERR_PARAM;
7489 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7490 rinfo->sw_act.fwd_id.hw_vsi_id =
7491 ice_get_hw_vsi_num(hw, vsi_handle);
7492 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7493 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7495 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7498 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7500 /* we have to add VSI to VSI_LIST and increment vsi_count.
7501 * Also Update VSI list so that we can change forwarding rule
7502 * if the rule already exists, we will check if it exists with
7503 * same vsi_id, if not then add it to the VSI list if it already
7504 * exists if not then create a VSI list and add the existing VSI
7505 * ID and the new VSI ID to the list
7506 * We will add that VSI to the list
7508 status = ice_adv_add_update_vsi_list(hw, m_entry,
7509 &m_entry->rule_info,
7512 added_entry->rid = rid;
7513 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7514 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7518 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7519 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7521 return ICE_ERR_NO_MEMORY;
7522 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7523 switch (rinfo->sw_act.fltr_act) {
7524 case ICE_FWD_TO_VSI:
7525 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7526 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7527 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7530 act |= ICE_SINGLE_ACT_TO_Q;
7531 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7532 ICE_SINGLE_ACT_Q_INDEX_M;
7534 case ICE_FWD_TO_QGRP:
7535 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7536 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7537 act |= ICE_SINGLE_ACT_TO_Q;
7538 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7539 ICE_SINGLE_ACT_Q_INDEX_M;
7540 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7541 ICE_SINGLE_ACT_Q_REGION_M;
7543 case ICE_DROP_PACKET:
7544 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7545 ICE_SINGLE_ACT_VALID_BIT;
7548 status = ICE_ERR_CFG;
7549 goto err_ice_add_adv_rule;
7552 /* set the rule LOOKUP type based on caller specified 'RX'
7553 * instead of hardcoding it to be either LOOKUP_TX/RX
7555 * for 'RX' set the source to be the port number
7556 * for 'TX' set the source to be the source HW VSI number (determined
7560 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7561 s_rule->pdata.lkup_tx_rx.src =
7562 CPU_TO_LE16(hw->port_info->lport);
7564 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7565 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7568 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7569 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7571 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7572 pkt_len, pkt_offsets);
7574 goto err_ice_add_adv_rule;
7576 if (rinfo->tun_type != ICE_NON_TUN &&
7577 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7578 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7579 s_rule->pdata.lkup_tx_rx.hdr,
7582 goto err_ice_add_adv_rule;
7585 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7586 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7589 goto err_ice_add_adv_rule;
7590 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7591 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7593 status = ICE_ERR_NO_MEMORY;
7594 goto err_ice_add_adv_rule;
7597 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7598 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7599 ICE_NONDMA_TO_NONDMA);
7600 if (!adv_fltr->lkups && !prof_rule) {
7601 status = ICE_ERR_NO_MEMORY;
7602 goto err_ice_add_adv_rule;
7605 adv_fltr->lkups_cnt = lkups_cnt;
7606 adv_fltr->rule_info = *rinfo;
7607 adv_fltr->rule_info.fltr_rule_id =
7608 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7609 sw = hw->switch_info;
7610 sw->recp_list[rid].adv_rule = true;
7611 rule_head = &sw->recp_list[rid].filt_rules;
7613 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7614 adv_fltr->vsi_count = 1;
7616 /* Add rule entry to book keeping list */
7617 LIST_ADD(&adv_fltr->list_entry, rule_head);
7619 added_entry->rid = rid;
7620 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7621 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7623 err_ice_add_adv_rule:
7624 if (status && adv_fltr) {
7625 ice_free(hw, adv_fltr->lkups);
7626 ice_free(hw, adv_fltr);
7629 ice_free(hw, s_rule);
7635 * ice_adv_rem_update_vsi_list
7636 * @hw: pointer to the hardware structure
7637 * @vsi_handle: VSI handle of the VSI to remove
7638 * @fm_list: filter management entry for which the VSI list management needs to
7641 static enum ice_status
7642 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7643 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7645 struct ice_vsi_list_map_info *vsi_list_info;
7646 enum ice_sw_lkup_type lkup_type;
7647 enum ice_status status;
7650 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7651 fm_list->vsi_count == 0)
7652 return ICE_ERR_PARAM;
7654 /* A rule with the VSI being removed does not exist */
7655 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7656 return ICE_ERR_DOES_NOT_EXIST;
7658 lkup_type = ICE_SW_LKUP_LAST;
7659 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7660 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7661 ice_aqc_opc_update_sw_rules,
7666 fm_list->vsi_count--;
7667 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7668 vsi_list_info = fm_list->vsi_list_info;
7669 if (fm_list->vsi_count == 1) {
7670 struct ice_fltr_info tmp_fltr;
7673 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7675 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7676 return ICE_ERR_OUT_OF_RANGE;
7678 /* Make sure VSI list is empty before removing it below */
7679 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7681 ice_aqc_opc_update_sw_rules,
7686 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7687 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7688 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7689 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7690 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7691 tmp_fltr.fwd_id.hw_vsi_id =
7692 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7693 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7694 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7695 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7697 /* Update the previous switch rule of "MAC forward to VSI" to
7698 * "MAC fwd to VSI list"
7700 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7702 ice_debug(hw, ICE_DBG_SW,
7703 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7704 tmp_fltr.fwd_id.hw_vsi_id, status);
7707 fm_list->vsi_list_info->ref_cnt--;
7709 /* Remove the VSI list since it is no longer used */
7710 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7712 ice_debug(hw, ICE_DBG_SW,
7713 "Failed to remove VSI list %d, error %d\n",
7714 vsi_list_id, status);
7718 LIST_DEL(&vsi_list_info->list_entry);
7719 ice_free(hw, vsi_list_info);
7720 fm_list->vsi_list_info = NULL;
7727 * ice_rem_adv_rule - removes existing advanced switch rule
7728 * @hw: pointer to the hardware structure
7729 * @lkups: information on the words that needs to be looked up. All words
7730 * together makes one recipe
7731 * @lkups_cnt: num of entries in the lkups array
7732 * @rinfo: Its the pointer to the rule information for the rule
7734 * This function can be used to remove 1 rule at a time. The lkups is
7735 * used to describe all the words that forms the "lookup" portion of the
7736 * rule. These words can span multiple protocols. Callers to this function
7737 * need to pass in a list of protocol headers with lookup information along
7738 * and mask that determines which words are valid from the given protocol
7739 * header. rinfo describes other information related to this rule such as
7740 * forwarding IDs, priority of this rule, etc.
7743 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7744 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7746 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7747 struct ice_prot_lkup_ext lkup_exts;
7748 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7749 enum ice_status status = ICE_SUCCESS;
7750 bool remove_rule = false;
7751 u16 i, rid, vsi_handle;
7753 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7754 for (i = 0; i < lkups_cnt; i++) {
7757 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7760 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7765 /* Create any special protocol/offset pairs, such as looking at tunnel
7766 * bits by extracting metadata
7768 status = ice_add_special_words(rinfo, &lkup_exts);
7772 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7773 /* If did not find a recipe that match the existing criteria */
7774 if (rid == ICE_MAX_NUM_RECIPES)
7775 return ICE_ERR_PARAM;
7777 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7778 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7779 /* the rule is already removed */
7782 ice_acquire_lock(rule_lock);
7783 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7785 } else if (list_elem->vsi_count > 1) {
7786 remove_rule = false;
7787 vsi_handle = rinfo->sw_act.vsi_handle;
7788 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7790 vsi_handle = rinfo->sw_act.vsi_handle;
7791 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7793 ice_release_lock(rule_lock);
7796 if (list_elem->vsi_count == 0)
7799 ice_release_lock(rule_lock);
7801 struct ice_aqc_sw_rules_elem *s_rule;
7804 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7806 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7809 return ICE_ERR_NO_MEMORY;
7810 s_rule->pdata.lkup_tx_rx.act = 0;
7811 s_rule->pdata.lkup_tx_rx.index =
7812 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7813 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7814 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7816 ice_aqc_opc_remove_sw_rules, NULL);
7817 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
7818 struct ice_switch_info *sw = hw->switch_info;
7820 ice_acquire_lock(rule_lock);
7821 LIST_DEL(&list_elem->list_entry);
7822 ice_free(hw, list_elem->lkups);
7823 ice_free(hw, list_elem);
7824 ice_release_lock(rule_lock);
7825 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
7826 sw->recp_list[rid].adv_rule = false;
7828 ice_free(hw, s_rule);
7834 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7835 * @hw: pointer to the hardware structure
7836 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7838 * This function is used to remove 1 rule at a time. The removal is based on
7839 * the remove_entry parameter. This function will remove rule for a given
7840 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7843 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7844 struct ice_rule_query_data *remove_entry)
7846 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7847 struct LIST_HEAD_TYPE *list_head;
7848 struct ice_adv_rule_info rinfo;
7849 struct ice_switch_info *sw;
7851 sw = hw->switch_info;
7852 if (!sw->recp_list[remove_entry->rid].recp_created)
7853 return ICE_ERR_PARAM;
7854 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7855 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7857 if (list_itr->rule_info.fltr_rule_id ==
7858 remove_entry->rule_id) {
7859 rinfo = list_itr->rule_info;
7860 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7861 return ice_rem_adv_rule(hw, list_itr->lkups,
7862 list_itr->lkups_cnt, &rinfo);
7865 /* either list is empty or unable to find rule */
7866 return ICE_ERR_DOES_NOT_EXIST;
7870 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7872 * @hw: pointer to the hardware structure
7873 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7875 * This function is used to remove all the rules for a given VSI and as soon
7876 * as removing a rule fails, it will return immediately with the error code,
7877 * else it will return ICE_SUCCESS
7879 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7881 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7882 struct ice_vsi_list_map_info *map_info;
7883 struct LIST_HEAD_TYPE *list_head;
7884 struct ice_adv_rule_info rinfo;
7885 struct ice_switch_info *sw;
7886 enum ice_status status;
7887 u16 vsi_list_id = 0;
7890 sw = hw->switch_info;
7891 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7892 if (!sw->recp_list[rid].recp_created)
7894 if (!sw->recp_list[rid].adv_rule)
7896 list_head = &sw->recp_list[rid].filt_rules;
7898 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7899 ice_adv_fltr_mgmt_list_entry, list_entry) {
7900 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7905 rinfo = list_itr->rule_info;
7906 rinfo.sw_act.vsi_handle = vsi_handle;
7907 status = ice_rem_adv_rule(hw, list_itr->lkups,
7908 list_itr->lkups_cnt, &rinfo);
7918 * ice_replay_fltr - Replay all the filters stored by a specific list head
7919 * @hw: pointer to the hardware structure
7920 * @list_head: list for which filters needs to be replayed
7921 * @recp_id: Recipe ID for which rules need to be replayed
7923 static enum ice_status
7924 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7926 struct ice_fltr_mgmt_list_entry *itr;
7927 enum ice_status status = ICE_SUCCESS;
7928 struct ice_sw_recipe *recp_list;
7929 u8 lport = hw->port_info->lport;
7930 struct LIST_HEAD_TYPE l_head;
7932 if (LIST_EMPTY(list_head))
7935 recp_list = &hw->switch_info->recp_list[recp_id];
7936 /* Move entries from the given list_head to a temporary l_head so that
7937 * they can be replayed. Otherwise when trying to re-add the same
7938 * filter, the function will return already exists
7940 LIST_REPLACE_INIT(list_head, &l_head);
7942 /* Mark the given list_head empty by reinitializing it so filters
7943 * could be added again by *handler
7945 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7947 struct ice_fltr_list_entry f_entry;
7949 f_entry.fltr_info = itr->fltr_info;
7950 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7951 status = ice_add_rule_internal(hw, recp_list, lport,
7953 if (status != ICE_SUCCESS)
7958 /* Add a filter per VSI separately */
7963 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7965 if (!ice_is_vsi_valid(hw, vsi_handle))
7968 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7969 f_entry.fltr_info.vsi_handle = vsi_handle;
7970 f_entry.fltr_info.fwd_id.hw_vsi_id =
7971 ice_get_hw_vsi_num(hw, vsi_handle);
7972 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7973 if (recp_id == ICE_SW_LKUP_VLAN)
7974 status = ice_add_vlan_internal(hw, recp_list,
7977 status = ice_add_rule_internal(hw, recp_list,
7980 if (status != ICE_SUCCESS)
7985 /* Clear the filter management list */
7986 ice_rem_sw_rule_info(hw, &l_head);
7991 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7992 * @hw: pointer to the hardware structure
7994 * NOTE: This function does not clean up partially added filters on error.
7995 * It is up to caller of the function to issue a reset or fail early.
7997 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7999 struct ice_switch_info *sw = hw->switch_info;
8000 enum ice_status status = ICE_SUCCESS;
8003 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8004 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8006 status = ice_replay_fltr(hw, i, head);
8007 if (status != ICE_SUCCESS)
8014 * ice_replay_vsi_fltr - Replay filters for requested VSI
8015 * @hw: pointer to the hardware structure
8016 * @pi: pointer to port information structure
8017 * @sw: pointer to switch info struct for which function replays filters
8018 * @vsi_handle: driver VSI handle
8019 * @recp_id: Recipe ID for which rules need to be replayed
8020 * @list_head: list for which filters need to be replayed
8022 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8023 * It is required to pass valid VSI handle.
8025 static enum ice_status
8026 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8027 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8028 struct LIST_HEAD_TYPE *list_head)
8030 struct ice_fltr_mgmt_list_entry *itr;
8031 enum ice_status status = ICE_SUCCESS;
8032 struct ice_sw_recipe *recp_list;
8035 if (LIST_EMPTY(list_head))
8037 recp_list = &sw->recp_list[recp_id];
8038 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8040 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8042 struct ice_fltr_list_entry f_entry;
8044 f_entry.fltr_info = itr->fltr_info;
8045 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8046 itr->fltr_info.vsi_handle == vsi_handle) {
8047 /* update the src in case it is VSI num */
8048 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8049 f_entry.fltr_info.src = hw_vsi_id;
8050 status = ice_add_rule_internal(hw, recp_list,
8053 if (status != ICE_SUCCESS)
8057 if (!itr->vsi_list_info ||
8058 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8060 /* Clearing it so that the logic can add it back */
8061 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8062 f_entry.fltr_info.vsi_handle = vsi_handle;
8063 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8064 /* update the src in case it is VSI num */
8065 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8066 f_entry.fltr_info.src = hw_vsi_id;
8067 if (recp_id == ICE_SW_LKUP_VLAN)
8068 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8070 status = ice_add_rule_internal(hw, recp_list,
8073 if (status != ICE_SUCCESS)
8081 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8082 * @hw: pointer to the hardware structure
8083 * @vsi_handle: driver VSI handle
8084 * @list_head: list for which filters need to be replayed
8086 * Replay the advanced rule for the given VSI.
8088 static enum ice_status
8089 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8090 struct LIST_HEAD_TYPE *list_head)
8092 struct ice_rule_query_data added_entry = { 0 };
8093 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8094 enum ice_status status = ICE_SUCCESS;
8096 if (LIST_EMPTY(list_head))
8098 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8100 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8101 u16 lk_cnt = adv_fltr->lkups_cnt;
8103 if (vsi_handle != rinfo->sw_act.vsi_handle)
8105 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8114 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8115 * @hw: pointer to the hardware structure
8116 * @pi: pointer to port information structure
8117 * @vsi_handle: driver VSI handle
8119 * Replays filters for requested VSI via vsi_handle.
8122 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8125 struct ice_switch_info *sw = hw->switch_info;
8126 enum ice_status status;
8129 /* Update the recipes that were created */
8130 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8131 struct LIST_HEAD_TYPE *head;
8133 head = &sw->recp_list[i].filt_replay_rules;
8134 if (!sw->recp_list[i].adv_rule)
8135 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8138 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8139 if (status != ICE_SUCCESS)
8147 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8148 * @hw: pointer to the HW struct
8149 * @sw: pointer to switch info struct for which function removes filters
8151 * Deletes the filter replay rules for given switch
8153 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8160 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8161 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8162 struct LIST_HEAD_TYPE *l_head;
8164 l_head = &sw->recp_list[i].filt_replay_rules;
8165 if (!sw->recp_list[i].adv_rule)
8166 ice_rem_sw_rule_info(hw, l_head);
8168 ice_rem_adv_rule_info(hw, l_head);
8174 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8175 * @hw: pointer to the HW struct
8177 * Deletes the filter replay rules.
8179 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8181 ice_rm_sw_replay_rule_info(hw, hw->switch_info);